id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8084341 | """
Copyright (c) 2018-present. <NAME>
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
import subprocess
import numpy as np
# BenA:
# the constant values here reflect the values in original FastText implementation
BOW = "<"
EOW = ">"
M32 = 0xffffffff
def m32(n):
return n & M32
def mmul(a, b):
return m32(a*b)
def hash(str):
h = m32(2166136261)
for c in str:
cc = m32(int(ord(c)))
h = m32(h ^ cc)
h = mmul(h, 16777619)
return h | StarcoderdataPython |
6480804 | # -*- coding: utf-8 -*-
"""
Created on Wed May 30 13:27:02 2012
@author: hlampesberger
"""
from data.dataset import Dataset, AnnotatedDataset
from learning.regular.ktestable import build_ktestable_DFA
from base import Result
from regular.ops import write_graphviz
import cProfile
def run():
# labelspath = "../testdata/cms.01.labels"
# datapath = "../testdata/cms.01.data"
# datapath = "../testdata/fun.data"
# labelspath = "../testdata/fun.labels"
# datapath = "../testdata/fun.data"
# ds = AnnotatedDataset.parse_sentence(labelspath, datapath)
# ds = AnnotatedDataset.parse_abbadingoformat("../testdata/train170.txt")
# ds = AnnotatedDataset.parse_ewsformat_bin(labelspath, datapath)
ds = Dataset.parse_linewise("testdata/KtestableDataSample.txt")
# labelspath = "../testdata/lol.labels"
# datapath = "../testdata/lol.data"
# ds = Dataset.parse_sentence(datapath)
ds = Dataset.from_list(["abracadabra"])
dfa = build_ktestable_DFA(2, ds)
for sample in ds:
if dfa.membership(sample[0]) != Result.accept:
print "problem", sample[0]
# dfa.write_graphviz('test.txt')
#dfa = dfa.minimize().rename()
#dfa.del_dead_states()
dfa.write_png()
print dfa
# for i in dfa.delta:
# print i
if __name__ == '__main__':
cProfile.run('run()')
# run()
| StarcoderdataPython |
1781993 | import numpy as np
import matplotlib.pyplot as plt
import os
def main():
# define parameters of class 1
a_1 = 0
b_1 = 1
# define parameters of class 2
a_2 = 1
b_2 = 2
# x axis
x = np.arange(-10, 10, 0.5)
# generate y function
y = -np.divide(np.abs(x - a_1), b_1) + np.divide(np.abs(x - a_2), b_2)
# plot figure
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
ax.grid(which='both', color='grey', linestyle='--')
ax.set_xticks(np.arange(-10, 11, 1))
ax.plot(x, y, linestyle='-', color='b', marker='o')
ax.set_title("Loglikelihood Ratio")
ax.set_xlabel("x")
ax.set_ylabel(r'$ln[\frac{p(x|L=1)}{p(x|L=2)}$')
plt.savefig('results/q2.png')
plt.show()
if __name__=='__main__':
dirName = "results"
if not os.path.exists(dirName):
try:
os.mkdir(dirName)
except OSError:
print("Creation of the directory %s failed" % dirName)
else:
print("Successfully created the directory %s " % dirName)
else:
print("Directory already exists.")
main()
| StarcoderdataPython |
8003354 | import math
# WGS84 ellipsoid semi-major axis
WGS84_ELLIPSOID_SEMI_MAJOR_AXIS = 6378137.
EARTH_EQUATORIAL_PERIMETER = 2. * math.pi * WGS84_ELLIPSOID_SEMI_MAJOR_AXIS
SEP = ";"
T_MIN = 180
L_MAX = 15
def degrees_to_meters(res):
return (res / 360.0) * EARTH_EQUATORIAL_PERIMETER
def res_table():
table = []
header = ["T", "N", "1/res0 (1/deg)", "res0 (deg)"]
for i in range(0, L_MAX + 1):
header.append(f"res{i}")
table.append(tuple(header))
for t in range(T_MIN, 25 * T_MIN + 1):
if t > T_MIN and t % T_MIN == 0:
continue
for n in range(0, L_MAX + 1):
u = t * 2 ** n
m = u // T_MIN
if T_MIN * m == u:
row = [t, n, m, 1 / m]
res0 = degrees_to_meters(1 / m)
for i in range(0, L_MAX + 1):
row.append(res0 * math.pow(2, -i))
table.append(tuple(row))
break
return tuple(table)
if __name__ == "__main__":
table = res_table()
with open("grid-resolutions.csv", "w") as fp:
fp.writelines([SEP.join(map(str, row)) + "\n" for row in table])
| StarcoderdataPython |
8165024 | # List all of the images available for recovering purposes
from oneandone.client import OneAndOneService
client = OneAndOneService('675fbe491b27896b57e76867604f8255')
recovery_appliances = client.list_recovery_images()
# Retrieve a specific appliance
from oneandone.client import OneAndOneService
client = OneAndOneService('675fbe491b27896b57e76867604f8255')
recovery_appliance = client.get_recovery_image(image_id='')
| StarcoderdataPython |
1753466 | from rest_framework import serializers
from talentmap_api.common.serializers import PrefetchedSerializer, StaticRepresentationField
from talentmap_api.position.models import Position, Grade, Skill, SkillCone, CapsuleDescription, Classification, Assignment, PositionBidStatistics
from talentmap_api.language.serializers import LanguageQualificationSerializer
from talentmap_api.organization.serializers import PostSerializer
class CapsuleDescriptionSerializer(PrefetchedSerializer):
last_editing_user = StaticRepresentationField(read_only=True)
# This is a dynamic flag used by the front end to simplify checking if the current user has permissions
is_editable_by_user = serializers.SerializerMethodField()
date_created = serializers.DateTimeField(read_only=True)
date_updated = serializers.DateTimeField(read_only=True)
def get_is_editable_by_user(self, obj):
try:
return self.context.get("request").user.has_perm(f"position.{obj.position.post.permission_edit_post_capsule_description_codename}")
except AttributeError:
# The position doesn't have a post, or otherwise
return False
class Meta:
model = CapsuleDescription
fields = "__all__"
writable_fields = ("content", "point_of_contact", "website",)
class CurrentAssignmentSerializer(PrefetchedSerializer):
user = serializers.SerializerMethodField()
tour_of_duty = StaticRepresentationField(read_only=True)
def get_user(self, obj):
if obj.user and obj.user.user:
return obj.user.user.last_name
else:
return ""
class Meta:
model = Assignment
fields = "__all__"
nested = {
"position": {
"class": "talentmap_api.position.serializers.AssignmentPositionSerializer",
"field": "position",
"kwargs": {
"override_fields": [
"id",
"post__location",
"current_assignment"
],
"read_only": True
}
}
}
class AssignmentSerializer(CurrentAssignmentSerializer):
emp_id = StaticRepresentationField(read_only=True)
class Meta:
model = Assignment
fields = "__all__"
nested = {
"position": {
"class": "talentmap_api.position.serializers.PositionSerializer",
"field": "position",
"kwargs": {
"override_fields": [
"id",
"position_number",
"bureau",
"skill",
"title",
"post__location",
"languages",
],
"read_only": True
}
}
}
class ClassificationSerializer(PrefetchedSerializer):
class Meta:
model = Classification
fields = "__all__"
class PositionWritableSerializer(PrefetchedSerializer):
class Meta:
model = Position
fields = ("classifications",)
writable_fields = ("classifications",)
class PositionBidStatisticsSerializer(PrefetchedSerializer):
bidcycle = StaticRepresentationField(read_only=True)
class Meta:
model = PositionBidStatistics
exclude = ("position",)
class PositionListSerializer(PrefetchedSerializer):
grade = StaticRepresentationField(read_only=True)
skill = StaticRepresentationField(read_only=True)
bureau = serializers.SerializerMethodField()
tour_of_duty = StaticRepresentationField(read_only=True)
organization = serializers.SerializerMethodField()
availability = serializers.SerializerMethodField()
# This method returns the string representation of the bureau, or the code
# if it doesn't currently exist in the database
def get_bureau(self, obj):
if obj.bureau:
return obj.bureau._string_representation
elif obj.organization:
return obj.organization._string_representation
else:
return obj._bureau_code
# This method returns org info for domestic positions
def get_organization(self, obj):
location = obj.post.location if obj.post is not None else None
if location and location.country and location.country.code == 'USA':
return obj.organization.short_description
def get_availability(self, obj):
return obj.availability
class Meta:
model = Position
fields = ["id", "grade", "skill", "bureau", "organization", "tour_of_duty", "languages", "post",
"current_assignment", "position_number", "posted_date", "title", "availability"]
nested = {
"description": {
"class": CapsuleDescriptionSerializer,
"field": "description",
"kwargs": {
"read_only": True
}
},
"bid_statistics": {
"class": PositionBidStatisticsSerializer,
"kwargs": {
"many": True,
"read_only": True
}
},
"languages": {
"class": LanguageQualificationSerializer,
"kwargs": {
"many": True,
"read_only": True
}
},
"post": {
"class": PostSerializer,
"field": "post",
"kwargs": {
"override_fields": [
"differential_rate",
"danger_pay",
"location",
"tour_of_duty",
"obc_id",
],
"many": False,
"read_only": True
}
},
"latest_bidcycle": {
"class": "talentmap_api.bidding.serializers.serializers.BidCycleSerializer",
"field": "latest_bidcycle",
"kwargs": {
"read_only": True
}
},
"current_assignment": {
"class": CurrentAssignmentSerializer,
"field": "current_assignment",
"kwargs": {
"override_fields": [
"user",
"estimated_end_date"
],
"read_only": True
}
}
}
class PositionSerializer(PrefetchedSerializer):
grade = StaticRepresentationField(read_only=True)
skill = StaticRepresentationField(read_only=True)
bureau = serializers.SerializerMethodField()
organization = serializers.SerializerMethodField()
tour_of_duty = StaticRepresentationField(read_only=True)
classifications = StaticRepresentationField(read_only=True, many=True)
representation = serializers.SerializerMethodField()
availability = serializers.SerializerMethodField()
# This method returns the string representation of the bureau, or the code
# if it doesn't currently exist in the database
def get_bureau(self, obj):
if obj.bureau:
return obj.bureau._string_representation
elif obj.organization:
return obj.organization._string_representation
else:
return obj._bureau_code
# This method returns the string representation of the parent org, or the code
# if it doesn't currently exist in the database
def get_organization(self, obj):
if obj.organization:
return obj.organization._string_representation
else:
return obj._org_code
def get_availability(self, obj):
return obj.availability
class Meta:
model = Position
fields = "__all__"
nested = {
"bid_statistics": {
"class": PositionBidStatisticsSerializer,
"kwargs": {
"many": True,
"read_only": True
}
},
"languages": {
"class": LanguageQualificationSerializer,
"kwargs": {
"many": True,
"read_only": True
}
},
"post": {
"class": PostSerializer,
"field": "post",
"kwargs": {
"many": False,
"read_only": True
}
},
"description": {
"class": CapsuleDescriptionSerializer,
"field": "description",
"kwargs": {
"read_only": True
}
},
"latest_bidcycle": {
"class": "talentmap_api.bidding.serializers.serializers.BidCycleSerializer",
"field": "latest_bidcycle",
"kwargs": {
"read_only": True
}
},
"current_assignment": {
"class": CurrentAssignmentSerializer,
"field": "current_assignment",
"kwargs": {
"override_fields": [
"user",
"status",
"start_date",
"tour_of_duty",
"estimated_end_date"
],
"read_only": True
}
}
}
class AssignmentPositionSerializer(PrefetchedSerializer):
grade = StaticRepresentationField(read_only=True)
skill = StaticRepresentationField(read_only=True)
bureau = serializers.SerializerMethodField()
organization = serializers.SerializerMethodField()
tour_of_duty = StaticRepresentationField(read_only=True)
classifications = StaticRepresentationField(read_only=True, many=True)
representation = serializers.SerializerMethodField()
availability = serializers.SerializerMethodField()
# This method returns the string representation of the bureau, or the code
# if it doesn't currently exist in the database
def get_bureau(self, obj):
if obj.bureau:
return obj.bureau._string_representation
else:
return obj._bureau_code
# This method returns the string representation of the parent org, or the code
# if it doesn't currently exist in the database
def get_organization(self, obj):
if obj.organization:
return obj.organization._string_representation
else:
return obj._org_code
def get_availability(self, obj):
return obj.availability
class Meta:
model = Position
fields = "__all__"
nested = {
"bid_statistics": {
"class": PositionBidStatisticsSerializer,
"kwargs": {
"many": True,
"read_only": True
}
},
"languages": {
"class": LanguageQualificationSerializer,
"kwargs": {
"many": True,
"read_only": True
}
},
"post": {
"class": PostSerializer,
"field": "post",
"kwargs": {
"many": False,
"read_only": True
}
},
"description": {
"class": CapsuleDescriptionSerializer,
"field": "description",
"kwargs": {
"read_only": True
}
},
"latest_bidcycle": {
"class": "talentmap_api.bidding.serializers.serializers.BidCycleSerializer",
"field": "latest_bidcycle",
"kwargs": {
"read_only": True
}
}
}
class GradeSerializer(PrefetchedSerializer):
class Meta:
model = Grade
fields = ("id", "code")
class SkillSerializer(PrefetchedSerializer):
cone = StaticRepresentationField(read_only=True)
class Meta:
model = Skill
fields = "__all__"
class SkillConeSerializer(PrefetchedSerializer):
skills = StaticRepresentationField(read_only=True, many=True)
class Meta:
model = SkillCone
fields = "__all__" | StarcoderdataPython |
1990311 | '''
编写一个程序,找出第 n 个丑数。
丑数就是质因数只包含 2, 3, 5 的正整数。
示例:
输入: n = 10
输出: 12
解释: 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 是前 10 个丑数。
说明:
1 是丑数。
n 不超过1690。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/ugly-number-ii
'''
class Ugly:
def __init__(self):
self.nums = nums = [1,]
i2 = i3 = i5 = 0
for i in range(1,1690):
ugly = min(nums[i2] * 2,nums[i3] * 3,nums[i5] * 5)
nums.append(ugly)
if ugly == nums[i2] * 2:
i2 += 1
if ugly == nums[i3] * 3:
i3 += 1
if ugly == nums[i5] * 5:
i5 += 1
class Solution:
u = Ugly()
def nthUglyNumber(self, n: int) -> int:
return self.u.nums[n-1] | StarcoderdataPython |
1826969 | <filename>api_comm.py
from requests import Request, Session
class ApiComm:
"""HTTP Client"""
def __init__(self, base_url, token=None, token_type=None, headers=None):
"""Prepare the client."""
self.session = Session()
if headers is not None:
self.session.headers.update(headers)
self.base_url = base_url.strip('/')
if token is not None:
if token_type is None:
raise ValueError('Token type is required.')
self.session.headers.update({"Authorization": f'{token_type} {token}'})
if token_type is not None:
if token is None:
raise ValueError('Token is required.')
post_req = Request('POST', base_url)
self.prep_post = self.session.prepare_request(post_req)
def connect(self, method, path=None, headers=None, params=None, data=None):
"""Make HTTP GET request."""
if path is not None:
url = '/'.join((self.base_url, path.strip('/')))
else:
url = self.base_url
if method.lower() == 'get':
return self.session.get(url, headers=headers, params=params)
elif method.lower() == 'post':
return self.session.post(url, data=data, headers=headers, params=params)
elif method.lower() == 'put':
return self.session.put(url, data=data, headers=headers, params=params)
elif method.lower() == 'patch':
return self.session.patch(url, data=data, headers=headers, params=params)
elif method.lower() == 'delete':
return self.session.delete(url, headers=headers, params=params)
else:
raise ValueError(f'Unsupported HTTP method provided: {method}.')
| StarcoderdataPython |
1675533 | from calendar_layout import *
#from update import *
from Tkinter import *
import sqlite3
import matrices
import numpy as np
import auto
import export
import random
import datetime as datetime
import urllib2
from bs4 import BeautifulSoup
import csv
import sys
import globalvars
# Menu and Frame Functions
def doNothing():
print "Ok!"
def doQuit():
root.destroy()
def doDownloadClasses():
print 'Classes Downloaded'
import update_classes
#import sys
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel = Label(top, text='Enter 4 digit Code of Term:')
self.myLabel.pack()
self.myEntryBox = Entry(top)
self.myEntryBox.pack()
self.myLabel2 = Label(top, text='Enter Location to Save Classes: Documents/')
self.myLabel2.pack()
self.myEntryBox2 = Entry(top)
self.myEntryBox2.pack()
self.mySubmitButton = Button(top, text='Update', command=lambda: self.send())
self.mySubmitButton.pack()
def send(self):
self.value = [self.myEntryBox.get(), self.myEntryBox2.get()]
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return(inputDialog.value)
a = onClick()
import os
docu_path = os.path.join(os.path.expanduser("~"), "Documents")
docu_path = docu_path + "/" + a[1]
update_classes.update_classes(a[0], docu_path,d)
update_classes.deleteExtraRecords(docu_path, d)
message = "Classes for Term " + a[0] + " Downloaded!"
d.set(message)
def doUpdateClasses():
print 'Classes updated'
import update_classes_table as uct
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel2 = Label(top, text='Enter Location of File: Documents/')
self.myLabel2.pack()
self.myEntryBox2 = Entry(top)
self.myEntryBox2.pack()
self.mySubmitButton = Button(top, text='Update', command=lambda: self.send())
self.mySubmitButton.pack()
def send(self):
self.value = [self.myEntryBox2.get()]
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return(inputDialog.value)
filename = onClick()
docu_path = os.path.join(os.path.expanduser("~"), "Documents")
docu_path = docu_path + "/" + filename[0]
uct.update_classes_table(docu_path)
message = "Analyzing Sections... Please be patient"
d.set(message)
global matrix_sections
matrix_sections = matrices.matrix_sections()
message = "Classes Updated From File"
d.set(message)
def doUpdateClassWorth():
print 'class worths updated'
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
cur.execute('SELECT ShortName, Worth From Classes')
classes = cur.fetchall()
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
height = len(classes)
width = 2
for i in range(height): #Rows
a = Label(top, text = classes[i][0])
a.grid(row = i+1, column = 1)
b = Entry(top)
b.insert(END, classes[i][1])
b.grid(row=i+1, column=2)
myLabel = Label(top, text = "Update Class Worth")
myLabel.grid(row = 0, column = 1, columnspan = 2)
mySubmitButton = Button(top, text='Update', command=lambda: self.send())
mySubmitButton.grid(row = 50, column = 1, columnspan = 2)
def send(self):
def find_in_grid(frame, row, column):
for children in frame.children.values():
info = children.grid_info()
#note that rows and column numbers are stored as string
if info['row'] == str(row) and info['column'] == str(column):
return children
return None
info = []
for i in range(len(classes)):
info.append(find_in_grid(self.top,i+1, 2).get())
self.value = info
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return(inputDialog.value)
worths = onClick()
# update in database
count = 0
for cl in classes:
cur.execute('UPDATE Classes SET Worth = ? WHERE ShortName = ?', (float(worths[count]), cl[0]))
count = count + 1
conn.commit()
message = "Updated Class Worths!"
d.set(message)
def doUpdateStudents():
print 'students updated'
import update_students_table as ust
import matrices
import os
import errno
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel = Label(top, text='Remember to rename survey response to \'students.tsv\'!')
self.myLabel.pack()
self.myLabel2 = Label(top, text='Enter File To Open:')
self.myLabel2.pack()
self.myEntryBox2 = Entry(top)
self.myEntryBox2.pack()
self.mySubmitButton = Button(top, text='Update', command=self.send)
self.mySubmitButton.pack()
def send(self):
self.value = self.myEntryBox2.get()
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return(inputDialog.value)
filename = onClick()
docu_path = os.path.join(os.path.expanduser("~"), "Documents")
docu_path = docu_path + "/" + filename
ust.update_students_table(docu_path, d)
matrices.matrix_pref(d)
globalvars.mat_prefs = np.load(globalvars.mat_prefs_path)
message = "Student Responses Updated!"
d.set(message)
# List Classes in popup box for google forms survey
def doListClasses():
print 'doListClasses'
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('SELECT ShortName, Name FROM Classes')
classes = cur.fetchall()
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel = Label(top, text='List of Classes:')
self.myLabel.pack()
self.myframe = Frame(top)
self.myframe.pack(fill = BOTH)
T = Text(self.myframe)
T.pack()
def addtolist(item):
T.insert(END, item[0] + " - " + item[1] + "\n")
for item in classes:
addtolist(item = item)
self.mySubmitButton = Button(top, text='Finished', command=self.send)
self.mySubmitButton.pack()
def send(self):
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return()
onClick()
def doListProfessors():
print 'doListProfessors'
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
profs = cur.execute('SELECT Name FROM Professors')
profs = cur.fetchall()
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel = Label(top, text='List of Professors:')
self.myLabel.pack()
self.myframe = Frame(top)
self.myframe.pack(fill = BOTH)
T = Text(self.myframe)
T.pack()
def addtolist(item):
T.insert(END, str(item[0])[1:-1] + "\n")
for item in profs:
addtolist(item = item)
self.mySubmitButton = Button(top, text='Finished', command=self.send)
self.mySubmitButton.pack()
def send(self):
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return()
onClick()
class StatusBar(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.label = Label(self, text = "Welcome!", bd=1, relief=SUNKEN, anchor=W)
self.label.pack(fill=X)
def set(self, format, *args):
self.label.config(text=format % args)
self.label.update_idletasks()
def clear(self):
self.label.config(text="")
self.label.update_idletasks()
def doByClass():
print 'doByClass'
global scheduling
scheduling = 'class'
leftListbox.delete(0,END)
chosenListbox.delete(0,END)
openListbox.delete(0,END)
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('SELECT ShortName, Name FROM Classes')
classes = cur.fetchall()
for item in classes:
leftListbox.insert(END, item)
def doByStudent():
print 'doByStudent'
global scheduling
scheduling = 'student'
leftListbox.delete(0,END)
chosenListbox.delete(0,END)
openListbox.delete(0,END)
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('SELECT StudentID, Name, Scheduled FROM Students')
classes = cur.fetchall()
for item in classes:
item = str(item[0]) + ": " + item[1] + " (" + str(item[2]) + ")"
leftListbox.insert(END, item)
def doAutomateFast():
print 'AutomateFast'
doSave(output2 = output)
message = "Generating TA Lines"
d.set(message)
auto.gen_sec_matrix(pop = 100, keep = 10, output = output)
auto.break_up(output)
global mat_sch
message = "Matching TAs with Lines"
d.set(message)
mat_sch = auto.gen_sec_stu_matrix(pop = 1000, keep = 1, mats = 10, output = output)[0]
global output
auto.updateDatabase(mat_sch, output)
message = "Schedule Created!"
d.set(message)
return()
def doAutomateBest():
print 'doAutomateBest'
doSave(output2 = output)
message = "Generating TA Lines"
d.set(message)
auto.gen_sec_matrix(pop = 1000, keep = 100, output = output)
auto.break_up(output)
global mat_sch
global mat_prefs
message = "Matching TAs with Lines"
d.set(message)
mat_sch = auto.gen_sec_stu_matrix(pop = 10000, keep = 1, mats = 100, output = output)[0]
auto.updateDatabase(mat_sch, output)
message = "Schedule Created!"
d.set(message)
return()
def doViewClass():
print 'doViewClass'
current = leftListbox.get(ANCHOR)[0]
global current_class
current_class = current
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('''
SELECT B.SectionID, B.Scheduled, B.Name, D.Day, D.Start, D.End
FROM Classes A INNER JOIN Sections B
ON A.ClassID = B.ClassID
INNER JOIN Sections_Times C
ON B.SectionID = C.SectionID
INNER JOIN Times D
ON C.TimeID = D.TimeID
WHERE A.ShortName = ?''', (current,))
classes = cur.fetchall()
cur.execute('''
SELECT B.StudentID, B.Name, A.SectionID
FROM Sections A INNER JOIN Students B
ON A.StudentID = B.StudentID
INNER JOIN Classes C
ON A.ClassID = C.ClassID
WHERE C.ShortName = ?''', (current,))
tas = cur.fetchall()
lines = dict()
for ta in tas:
lines[ta[2]] = ta[1]
doCalendar(calendarFrame)
info = []
multiclass = dict()
for cl in classes:
name = "0"
if cl[1] == 1:
name = lines[cl[0]]
if len(cl[3]) > 1:
for c in cl[3]:
try:
block_in_Calendar(text = cl[2] + " (" + name + ")", open = cl[1], day = c, start = cl[4], end = cl[5], calendarFrame = calendarFrame)
except:
info.append(cl[2] + ": " + name + " " + cl[4] + "-" + cl[5])
else:
try:
block_in_Calendar(text = cl[2] + " (" + name + ")", open = cl[1], day = cl[3], start = cl[4], end = cl[5], calendarFrame = calendarFrame)
except:
continue
#info.append(cl[2] + ": " + name + " " + cl[4] + "-" + cl[5])
add_info_Calendar(text = info, calendarFrame = calendarFrame)
return()
def doViewStudent(student):
print 'doViewStudent'
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
# immediate time conflicts
unavail = cur.execute('''SELECT D.Day, D.Start, D.End
FROM Con_Student_Time B INNER JOIN Times D
ON B.TimeID = D.TimeID
WHERE B.StudentID = ?''', (student, ))
unavail = cur.fetchall()
for ut in unavail:
block_in_Calendar(text = '', open = 3, day = ut[0], start = ut[1], end = ut[2], calendarFrame = calendarFrame)
prefer = cur.execute('''SELECT D.Day, D.Start, D.End
FROM Pref_Student_Time B INNER JOIN Times D
ON B.TimeID = D.TimeID
WHERE B.StudentID = ?''', (student, ))
prefer = cur.fetchall()
for pt in prefer:
block_in_Calendar(text = '', open = 2, day = pt[0], start = pt[1], end = pt[2], calendarFrame = calendarFrame)
cur.execute('SELECT Year, Division, Skill FROM Students WHERE StudentID = ?', (student,))
info = cur.fetchone()
add_info_Calendar(text = "Year: " + str(info[0]) + ", Div: " + str(info[1]) + ', Skill: ' + str(info[2]), calendarFrame = calendarFrame)
sch = cur.execute('SELECT Scheduled From Students WHERE StudentID = ?', (student,))
sch = cur.fetchone()
if sch[0] > 0:
sch_classes = cur.execute('''SELECT A.Name, C.ShortName, B.Name, D.Day, D.Start, D.End
FROM Students A INNER JOIN Sections B
ON A.StudentID = B.StudentID
INNER JOIN Classes C
ON B.ClassID = C.ClassID
INNER JOIN Sections_Times E
ON B.SectionID = E.SectionID
INNER JOIN Times D
ON E.TimeID = D.TimeID
WHERE A.StudentID = ?''', (student,))
sch_classes = cur.fetchall()
for cl in sch_classes:
if len(cl[3]) > 1:
for c in cl[3]:
try:
block_in_Calendar(text = cl[1] + " " + cl[2], open = 1, day = c, start = cl[4], end = cl[5], calendarFrame = calendarFrame)
except:
continue
else:
try:
block_in_Calendar(text = cl[1] + " " + cl[2], open = 1, day = cl[3], start = cl[4], end = cl[5], calendarFrame = calendarFrame)
except:
continue
def leftselect(): #Select
if scheduling == 'class':
print 'leftselect-class'
doViewClass()
current = leftListbox.get(ANCHOR)[0]
global current_class
current_class = current
chosenListbox.delete(0, END)
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('''
SELECT B.Scheduled, B.Name, D.Day, D.Time
FROM Classes A INNER JOIN Sections B
ON A.ClassID = B.ClassID
INNER JOIN Sections_Times C
ON B.SectionID = C.SectionID
INNER JOIN Times D
ON C.TimeID = D.TimeID
WHERE A.ShortName = ?''', (current_class,))
classes = cur.fetchall()
# Insert sections in middle box
chosenListbox.insert(END, "any")
for item in classes:
#if item[0] != 0:
# item[0] = 1
chosenListbox.insert(END, item)
if scheduling == 'student':
print 'leftselect-student'
doCalendar(calendarFrame)
current = leftListbox.get(ANCHOR)
global current_student
current_student = current
stu = current.split(":")[0]
chosenListbox.delete(0, END)
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
cur.execute('''SELECT B.SectionID, C.ShortName, B.Name
FROM Students A INNER JOIN Sections B
ON A.StudentID = B.StudentID
INNER JOIN Classes C
ON B.ClassID = C.ClassID
WHERE A.StudentID = ?''', (stu,))
classes = cur.fetchall()
doViewStudent(stu)
# Insert assigned sections in middle box
#chosenListbox.insert(END, "any")
for item in classes:
item = item[1] + " " + item[2]
chosenListbox.insert(END, item)
# Insert classes in right box
# centerselect will select class to view sections
openListbox.delete(0,END)
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('SELECT ShortName, Name FROM Classes')
classes = cur.fetchall()
for item in classes:
openListbox.insert(END, item)
# get students available for current section
# list in order of mat_prefs value
def centerselect(): #View
if scheduling == 'class':
print 'centerselect-class'
current = chosenListbox.get(ANCHOR)
global current_section
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
if current == "any":
current_section = "any"
else:
current_section = current[1]
cla = cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?',(current_class,))
cla = cur.fetchone()[0]
sec1 = cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ? and Name = ?', (cla, current_section))
global sec
sec = cur.fetchone()[0]
students = cur.execute('SELECT DISTINCT Name, StudentID, Scheduled FROM Students WHERE Scheduled < 0.9') # ie less than fully scheduled
students = cur.fetchall()
global mat_prefs
student_tuples = []
for i in range(len(students)):
if current_section != "any":
colnum = section_index[sec]
stu = students[i][1]
stuID = student_index[stu]
student_tuples.append((int(globalvars.mat_prefs[stuID,colnum]),(students[i][1],students[i][0], students[i][2]),i)) # update from i to stuindex
else:
student_tuples.append((int(0),(students[i][1], students[i][0], students[i][2]),i))
# sort p in order of highest value first
openListbox.delete(0,END)
if current_section != "any":
student_tuples = sorted(student_tuples, key = lambda student:student[0], reverse = True)
for item in student_tuples:
item = str(item[0]) + "; " + str(item[1][1]) + " (" + str(item[1][2]) + ")"
openListbox.insert(END, item)
openListbox.insert(END, "undergrad")
students = cur.execute('SELECT DISTINCT Name, StudentID, Scheduled FROM Students WHERE Scheduled > 0.9') # ie fully scheduled
students = cur.fetchall()
global mat_prefs
student_tuples = []
for i in range(len(students)):
if current_section != "any":
colnum = section_index[sec]
stu = students[i][1]
stuID = student_index[stu]
student_tuples.append((int(globalvars.mat_prefs[stuID,colnum]),(students[i][1],students[i][0], students[i][2]),i)) # update from i to stuindex
else:
student_tuples.append((int(0),(students[i][1], students[i][0], students[i][2]),i))
# sort p in order of highest value first
if current_section != "any":
student_tuples = sorted(student_tuples, key = lambda student:student[0], reverse = True)
for item in student_tuples:
item = str(item[0]) + "; " + str(item[1][1]) + " (" + str(item[1][2]) + ")"
openListbox.insert(END, item)
if scheduling == 'student':
print 'centerselect-student'
current = openListbox.get(ANCHOR)[0]
print current
global current_class
current_class = current
openListbox.delete(0, END)
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('''
SELECT B.Scheduled, B.Name, D.Day, D.Time
FROM Classes A INNER JOIN Sections B
ON A.ClassID = B.ClassID
INNER JOIN Sections_Times C
ON B.SectionID = C.SectionID
INNER JOIN Times D
ON C.TimeID = D.TimeID
WHERE A.ShortName = ?''', (current_class,))
classes = cur.fetchall()
# Insert sections in middle box
openListbox.insert(END, "any")
for item in classes:
#if item[0] != 0:
# item[0] = 1
openListbox.insert(END, item)
def get_class_value(SectionID):
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
cla = cur.execute('''SELECT Worth FROM Sections WHERE SectionID = ?''', (SectionID,))
cla = cur.fetchone()[0]
return(cla)
def openaddselect(): #Schedule
# schedule student to class
if scheduling == 'class':
print 'add-class'
current = openListbox.get(ANCHOR)
current = current.split("; ")[1]
current = current.split(" (")[0]
sqlite3.connect(globalvars.database_path)
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
stu = cur.execute('SELECT StudentID FROM Students WHERE Name = ?',(current,))
stu = cur.fetchone()[0]
global student_index
stuID = student_index[stu]
# byClass add student to specific section of class
if current_section != "any":
cur.execute('''SELECT A.SectionID FROM Sections A INNER JOIN
Classes B ON A.ClassID = B.ClassID
WHERE B.ShortName = ? and A.Name = ?
''', (current_class,current_section))
sec = cur.fetchone()[0]
global mat_yes
global mat_no
global section_index
secID = section_index[sec] # what's sec from
mat_yes[stuID, secID] = 1
mat_no[stuID, secID] = 0
addPrefForClass(stu)
value = get_class_value(sec)
oldvalue = cur.execute('SELECT Scheduled FROM Students WHERE StudentID = ?',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('UPDATE Students SET Scheduled = ? WHERE StudentID = ?',(float(value) + float(oldvalue), stu) )
c = cur.execute('UPDATE Sections SET Scheduled = ? WHERE SectionID = ?' , (1, sec))
c = cur.execute('UPDATE Sections SET StudentID = ? WHERE SectionID = ?', (stu, sec)) # wonder why this doesn't work with an AND statement
conn.commit()
# byClass add student to any section of class
if current_section == "any":
print 'any'
addPrefForClass(stu)
global mat_yes
global mat_add
global mat_no
cur.execute('SELECT A.SectionID FROM Sections A INNER JOIN Classes B ON A.ClassID = B.ClassID WHERE B.ShortName = ?',(current_class,))
secs = cur.fetchall()
global section_index
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_add[stuID,secID] = 1
mat_no[stuID,secID] = 0
print secs[0][0]
value = get_class_value(secs[0][0])
oldvalue = cur.execute('SELECT Scheduled FROM Students WHERE StudentID = ?',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('UPDATE Students SET Scheduled = ? WHERE StudentID = ?',(float(oldvalue) + float(value), stu) )
conn.commit()
doViewClass()
message = "Student Added to Class!"
centerselect()
d.set(message)
if scheduling == 'student':
print 'add-student'
current = openListbox.get(ANCHOR)
global current_section
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
if current == "any":
current_section = "any"
else:
current_section = current[1]
stu = current_student.split(":")[0]
stu = int(stu)
global student_index
stuID = student_index[stu]
# byStudent add student to single section of class
if current_section != "any":
cur.execute('''SELECT A.SectionID FROM Sections A INNER JOIN
Classes B ON A.ClassID = B.ClassID
WHERE B.ShortName = ? and A.Name = ?
''', (current_class,current_section))
sec = cur.fetchone()[0]
# mats
global mat_yes
global mat_no
secID = section_index[sec] # what's sec from
mat_yes[stuID, secID] = 1
mat_no[stuID, secID] = 0
addPrefForClass(stu)
# database
value = get_class_value(sec)
oldvalue = cur.execute('''SELECT Scheduled FROM Students
WHERE StudentID = ?''',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('''UPDATE Students SET Scheduled = ?
WHERE StudentID = ?''',(float(value) + float(oldvalue), stu) )
c = cur.execute('''UPDATE Sections SET Scheduled = ?
WHERE SectionID = ?''' , (1, sec))
c = cur.execute('''UPDATE Sections SET StudentID = ?
WHERE SectionID = ?''', (stu, sec))
# wonder why this doesn't work with an AND statement
# Insert assigned sections in middle box
#chosenListbox.insert(END, "any")
cur.execute('''SELECT B.SectionID, C.ShortName, B.Name
FROM Students A INNER JOIN Sections B
ON A.StudentID = B.StudentID
INNER JOIN Classes C
ON B.ClassID = C.ClassID
WHERE A.StudentID = ?''', (stu,))
classes = cur.fetchall()
chosenListbox.delete(0, END)
for item in classes:
item = item[1] + " " + item[2]
chosenListbox.insert(END, item)
conn.commit()
#byStudent add student to any section of class
if current_section == "any":
print 'any'
# add in mats
addPrefForClass(stu)
global mat_yes
global mat_add
global mat_no
cur.execute('''SELECT A.SectionID FROM Sections A
INNER JOIN Classes B
ON A.ClassID = B.ClassID
WHERE B.ShortName = ?''',(current_class,))
secs = cur.fetchall()
global section_index
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_add[stuID,secID] = 1
mat_no[stuID,secID] = 0
# add in database
value = get_class_value(secs[0][0])
oldvalue = cur.execute('''SELECT Scheduled FROM Students
WHERE StudentID = ?''',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('''UPDATE Students SET Scheduled = ?
WHERE StudentID = ?''',(float(oldvalue) + float(value), stu) )
conn.commit()
chosenListbox.insert(END, 'any ' + current_class)
doViewStudent(stu)
message = "Student Added to Class!"
d.set(message)
def openremoveselect(): #Remove
# remove student from class
if scheduling == 'class':
print 'remove-class'
current = openListbox.get(ANCHOR)
current = current.split("; ")[1]
current = current.split(" (")
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
stu = cur.execute('SELECT StudentID FROM Students WHERE Name = ?',(current[0],))
stu = cur.fetchone()[0]
global student_index
stuID = student_index[stu]
# byClass remove student from single section
if current_section != "any":
cur.execute('''SELECT A.SectionID FROM Sections A INNER JOIN
Classes B ON A.ClassID = B.ClassID
WHERE B.ShortName = ? and A.Name = ?
''', (current_class,current_section))
sec = cur.fetchone()[0]
print sec, stu
# mats
global mat_yes
global section_index
secID = section_index[sec]
mat_yes[stuID, secID] = 0
# remove from database
value = get_class_value(sec)
oldvalue = cur.execute('SELECT Scheduled FROM Students WHERE StudentID = ?',(stu, ) )
oldvalue = cur.fetchone()[0]
print value, oldvalue
cur.execute('''UPDATE Students SET Scheduled = ?
WHERE StudentID = ?''',(oldvalue - value, stu) )
cur.execute('''Update Sections SET Scheduled = ?
WHERE SectionID = ?''' , (0, sec))
cur.execute('''Update Sections SET StudentID = ?
WHERE SectionID = ?''' , (stu, sec))
conn.commit()
# byClass remove student from any section
if current_section == "any":
print 'any'
# remove scheduled from mats
global mat_yes
global mat_add
cur.execute('''SELECT A.SectionID FROM Sections A
INNER JOIN Classes B
ON A.ClassID = B.ClassID
WHERE B.ShortName = ?''',(current_class,))
secs = cur.fetchall()
global section_index
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_add[stuID,secID] = 0
# remove scheduled from database
value = get_class_value(secs[0][0])
oldvalue = cur.execute('''SELECT Scheduled FROM Students
WHERE StudentID = ?''',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('''Update Students SET Scheduled = ?
WHERE StudentID = ?''',(float(oldvalue) - float(value), stu) )
conn.commit()
doViewClass()
message = "Student Removed from Class!"
d.set(message)
if scheduling == 'student':
print 'remove-student'
current = openListbox.get(ANCHOR)
global current_section
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
if current == "any":
current_section = "any"
else:
current_section = current[1]
stu = current_student.split(":")[0]
stu = int(stu)
global student_index
stuID = student_index[stu]
# byStudent remove student from single section of class
if current_section != "any":
cur.execute('''SELECT A.SectionID FROM Sections A INNER JOIN
Classes B ON A.ClassID = B.ClassID
WHERE B.ShortName = ? and A.Name = ?
''', (current_class,current_section))
sec = cur.fetchone()[0]
# mats
global mat_yes
global mat_no
global section_index
secID = section_index[sec] # what's sec from
mat_yes[stuID, secID] = 1
mat_no[stuID, secID] = 0
addPrefForClass(stu)
# remove from database
value = get_class_value(sec)
oldvalue = cur.execute('SELECT Scheduled FROM Students WHERE StudentID = ?',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('UPDATE Students SET Scheduled = ? WHERE StudentID = ?',(float(value) - float(oldvalue), stu) )
c = cur.execute('UPDATE Sections SET Scheduled = ? WHERE SectionID = ?' , (0, sec))
c = cur.execute('UPDATE Sections SET StudentID = ? WHERE SectionID = ?', (0, sec)) # wonder why this doesn't work with an AND statement
# Insert assigned sections in middle box
#chosenListbox.insert(END, "any")
cur.execute('''SELECT B.SectionID, C.ShortName, B.Name
FROM Students A INNER JOIN Sections B
ON A.StudentID = B.StudentID
INNER JOIN Classes C
ON B.ClassID = C.ClassID
WHERE A.StudentID = ?''', (stu,))
classes = cur.fetchall()
chosenListbox.delete(0, END)
for item in classes:
item = item[1] + " " + item[2]
chosenListbox.insert(END, item)
conn.commit()
# byStudent remove from any section
if current_section == "any":
print 'any'
global current_class
removePrefForClass(stu)
# mats
global mat_yes
global mat_add
global mat_no
cur.execute('SELECT A.SectionID FROM Sections A INNER JOIN Classes B ON A.ClassID = B.ClassID WHERE B.ShortName = ?',(current_class,))
secs = cur.fetchall()
global section_index
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_add[stuID,secID] = 1
mat_no[stuID,secID] = 0
# remove from database
value = get_class_value(secs[0][0])
oldvalue = cur.execute('SELECT Scheduled FROM Students WHERE StudentID = ?',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('UPDATE Students SET Scheduled = ? WHERE StudentID = ?',(float(oldvalue) - float(value), stu) )
conn.commit()
items = chosenListbox.get(0,END)
a = items.index('any ' + current_class)
chosenListbox.delete(a)
doViewStudent(stu)
message = "Student Removed from Class!"
d.set(message)
def openaddblockselect(): #Add Block
# block student from class
if scheduling == 'class':
print 'block-class'
current = openListbox.get(ANCHOR)
current = current.split("; ")[1]
current = current.split(" (")
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
stu = cur.execute('SELECT StudentID FROM Students WHERE Name = ?',(current[0],))
stu = cur.fetchone()[0]
global student_index
stuID = student_index[stu]
# byClass block student from single section of class
if current_section != "any":
cur.execute('''SELECT A.SectionID FROM Sections A INNER JOIN
Classes B ON A.ClassID = B.ClassID
WHERE B.ShortName = ? and A.Name = ?
''', (current_class,current_section))
sec = cur.fetchone()[0]
global mat_yes
global mat_no
global section_index
secID = section_index[sec]
mat_yes[stuID, secID] = 0
mat_no[stuID, secID] = 1
# byClass block of student from any section in class
if current_section == 'any':
print 'any'
removePrefForClass(stu)
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?', (current_class,))
ClassID = cur.fetchone()[0]
cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ?'
, (ClassID,))
secs = cur.fetchall()
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_no[stuID,secID] = 1
conn.commit()
# byStudent block from class
if scheduling == 'student':
print 'block-student'
current = openListbox.get(ANCHOR)
global current_section
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
if current == "any":
current_section = "any"
else:
current_section = current[1]
stu = current_student.split(":")[0]
stu = int(stu)
global student_index
stuID = student_index[stu] #where stu from
# byStudent block from single section of class
if current_section != 'any':
global mat_yes
global mat_no
global section_index
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?',(current_class,))
cla = cur.fetchone()[0]
cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ? and Name = ?', (cla, current_section))
sec = cur.fetchone()[0]
secID = section_index[sec]
mat_yes[stuID,secID] = 0
mat_no[stuID,secID] = 1
#by Student block of any section of class
if current_section == 'any':
print 'any'
global mat_yes
global mat_no
global section_index
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?', (current_class,))
cla = cur.fetchone()[0]
cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ?'
, (cla,))
secs = cur.fetchall()
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_no[stuID,secID] = 1
removePrefForClass(stu)
message = "Student Blocked From Class!"
d.set(message)
def openremoveblockselect(): #Remove Block
# remove block from student from class
if scheduling == 'class':
print 'rmblock-class'
current = openListbox.get(ANCHOR)
current = current.split("; ")[1]
current = current.split(" (")
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
stu = cur.execute('SELECT StudentID FROM Students WHERE Name = ?',(current[0],))
stu = cur.fetchone()[0]
global student_index
stuID = student_index[stu]
# byClass remove block from single section of class
if current_section != 'any':
cur.execute('''SELECT A.SectionID FROM Sections A
INNER JOIN Classes B ON A.ClassID = B.ClassID
WHERE B.ShortName = ? and A.Name = ?
''', (current_class,current_section))
sec = cur.fetchone()
global mat_no
global section_index
secID = section_index[sec]
mat_no[stuID, secID] = 0
# byClass remove block from any section of class
if current_section == 'any':
print 'any'
addPrefForClass(stu)
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?', (current_class,))
ClassID = cur.fetchone()[0]
cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ?'
, (ClassID,))
secs = cur.fetchall()
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_no[stuID,secID] = 0
conn.commit()
if scheduling == 'student':
print 'rmblock-student'
current = openListbox.get(ANCHOR)
global current_section
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
if current == "any":
current_section = "any"
print 'any-not yet supported'
else:
current_section = current[1]
stu = current_student.split(":")[0]
stu = int(stu)
global student_index
stuID = student_index[stu]
# byStudent remove block from single section of class
if current_section != 'any':
global mat_yes
global mat_no
global section_index
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?',(current_class,))
cla = cur.fetchone()[0]
cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ? and Name = ?', (cla, current_section))
sec = cur.fetchone()[0]
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_no[stuID,secID] = 0
# byStudent remove block from any section of class
if current_section == 'any':
global mat_add
global section_index
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?', (current_class,))
cla = cur.fetchone()[0]
# get sections of class
cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ?', (cla, ))
secs = cur.fetchall()
global mat_add
global section_index
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID, secID] = 0
mat_no[stuID, secID] = 0
mat_add[stuID,secID] = 0
addPrefForClass(stu)
message = "Student Block Removed!"
d.set(message)
def addPrefForClass(student):
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
secs = cur.execute('''SELECT A.SectionID FROM Classes B
INNER JOIN Sections A
ON A.ClassID = B.ClassID
WHERE B.ShortName = ?''', (current_class,))
secs = cur.fetchall()
stuID = student_index[student]
for s in secs:
secID = section_index[s[0]]
cpref = globalvars.mat_prefs[stuID,secID]
globalvars.mat_prefs[stuID,secID] = int(cpref) + 10000
def removePrefForClass(student):
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
secs = cur.execute('''SELECT A.SectionID FROM Classes B
INNER JOIN Sections A
ON A.ClassID = B.ClassID
WHERE B.ShortName = ?''', (current_class,))
secs = cur.fetchall()
stuID = student_index[student]
for s in secs:
secID = section_index[s[0]]
cpref = globalvars.mat_prefs[stuID,secID]
globalvars.mat_prefs[stuID,secID] = int(cpref) - 10000
# File functions
def doNewSchedule():
print 'doNewSchedule'
import numpy as np
import globalvars
message = "Starting a New Schedule"
d.set(message)
# Data
try:
# open from file
globalvars.mat_prefs = np.load(globalvars.mat_prefs_path)
except:
# generate if unable to open
message = "Generating Missing Files"
d.set(message)
globalvars.mat_prefs = matrices.matrix_pref(d)
global section_index
section_index = matrices.section_index()
global student_index
student_index = matrices.student_index()
global mat_yes
mat_yes = matrices.matrix_schedule_manual()
global mat_add
mat_add = matrices.matrix_schedule_manual()
global mat_no
mat_no = matrices.matrix_schedule_manual()
try:
globalvars.matrix_sections = np.load(globalvars.sec_sec_matrix_path)
except:
try:
message = "Generating Missing Files"
d.set(message)
globalvars.matrix_sections = matrices.matrix_sections()
except:
globalvars.matrix_sections = np.zeros((100,100))
globalvars.matrix_sections.flags.writeable = True
global output
output = "output"
global scheduling
global mat_sch
global current_class
global current_student
global current_section
global sec
# update database
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
cur.execute('UPDATE Sections SET Scheduled = 0')
cur.execute('UPDATE Sections SET StudentID = 0')
cur.execute('UPDATE Students SET Scheduled = 0')
conn.commit()
message = "New Schedule"
d.set(message)
def doOpenSchedule(output2 = None):
print 'doOpenSchedule'
import os
import errno
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel = Label(top, text='Enter Name To Open:')
self.myLabel.pack()
self.myEntryBox = Entry(top)
self.myEntryBox.pack()
self.mySubmitButton = Button(top, text='Open', command=self.send)
self.mySubmitButton.pack()
def send(self):
self.value = self.myEntryBox.get()
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return(inputDialog.value)
if output2 is None:
global output
output = onClick()
else:
global output
output = output2
make_sure_path_exists(output)
global mat_yes
mat_yes = np.load(output + "/mat_yes.npy")
global mat_add
mat_add = np.load(output + "/mat_add.npy")
global mat_no
mat_no = np.load(output + "/mat_no.npy")
globalvars.matrix_sections = np.load(output + "/matrix_sections.npy")
globalvars.mat_prefs = np.load(output + "/mat_prefs.npy") # matrices.matrix_pref()
global section_index
section_index = matrices.section_index()
global student_index
student_index = matrices.student_index()
message = "Openned Schedule from " + output
d.set(message)
def doSave(output2):
print 'doSave'
np.save(output2 + "/mat_yes.npy", mat_yes)
np.save(output2 + "/mat_add.npy", mat_add)
np.save(output2 + "/mat_no.npy", mat_no)
np.save(output2 + "/matrix_sections.npy", globalvars.matrix_sections)
np.save(output2 + "/mat_prefs.npy", globalvars.mat_prefs)
message = "Schedule Saved"
d.set(message)
def doSaveAs():
print 'doSaveAs'
import os
import errno
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel = Label(top, text='Enter Name To Save Output:')
self.myLabel.pack()
self.myEntryBox = Entry(top)
self.myEntryBox.pack()
self.mySubmitButton = Button(top, text='Save', command=self.send)
self.mySubmitButton.pack()
def send(self):
self.value = self.myEntryBox.get()
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return(inputDialog.value)
global output
output = onClick()
make_sure_path_exists(output)
#np.save(output + "/automats", automats)
np.save(output + "/mat_yes.npy",mat_yes)
np.save(output + "/mat_add.npy", mat_add)
np.save(output + "/mat_no.npy", mat_no)
np.save(output + "/mat_prefs.npy", globalvars.mat_prefs)
np.save(output + "/matrix_sections.npy", globalvars.matrix_sections)
message = "Schedule Saved"
d.set(message)
# Main loop
root = Tk()
# Layout Frames
navFrame = Frame(root)
statusFrame = Frame(root)
calendarFrame = Frame(root)
navFrame.pack(side = TOP)
statusFrame.pack(side = BOTTOM, fill = X)
calendarFrame.pack(side = BOTTOM, fill = BOTH)
doCalendar(calendarFrame)
d = StatusBar(statusFrame)
d.pack(side = LEFT)
leftListbox = Listbox(navFrame)
leftListbox.pack(side = LEFT)
buttonSelectFrame = Frame(navFrame)
buttonSelectFrame.pack(side = LEFT)
bselect = Button(buttonSelectFrame, text="Select", command=lambda : leftselect()) # lambda necessary to prevent call upon opening
bselect.pack(side = TOP)
chosenListbox = Listbox(navFrame)
chosenListbox.pack(side = LEFT)
buttonFrame = Frame(navFrame)
buttonFrame.pack(side = LEFT)
bAdd = Button(buttonFrame, text="View", command=lambda : centerselect())
bAdd.pack(side = TOP)
openListbox = Listbox(navFrame)
openListbox.pack(side = LEFT)
buttonFrame2 = Frame(navFrame)
buttonFrame2.pack(side = LEFT)
bRemove = Button(buttonFrame2, text="Schedule", command=lambda : openaddselect())
bRemove.pack(side = TOP)
b2Remove = Button(buttonFrame2, text="Remove", command= lambda : openremoveselect())
b2Remove.pack(side = TOP)
b3Remove = Button(buttonFrame2, text="Add Block", command= lambda : openaddblockselect())
b3Remove.pack(side = TOP)
b4Remove = Button(buttonFrame2, text="Remove Block", command= lambda : openremoveblockselect())
b4Remove.pack(side = TOP)
# Menu Bar
menu = Menu(root)
root.config(menu = menu)
filemenu = Menu(menu)
updatemenu = Menu(menu)
schmenu = Menu(menu)
googlemenu = Menu(menu)
menu.add_cascade(label = "File", menu = filemenu)
menu.add_cascade(label = "Schedule", menu = schmenu)
menu.add_cascade(label = "Update", menu = updatemenu)
menu.add_cascade(label = "Google Survey", menu = googlemenu)
##File Menu
filemenu.add_command(label = "New Schedule", command=lambda : doNewSchedule())
filemenu.add_command(label = "Open Schedule", command = lambda: doOpenSchedule())
filemenu.add_separator()
filemenu.add_command(label = "Save Schedule", command = lambda : doSave(output))
filemenu.add_command(label = "Save Schedule As", command = lambda : doSaveAs())
filemenu.add_separator()
filemenu.add_command(label = "Export Email", command = lambda: export.doExportMail(output2 = output, d = d))
filemenu.add_command(label = "Export Susan", command = lambda: export.doExportSusan(output2 = output, d = d))
filemenu.add_command(label = "Export Linda", command = lambda: export.doExportLinda(output2 = output, d = d))
filemenu.add_command(label = "Export All", command = lambda: export.doExportAll(output2 = output, d = d))
filemenu.add_separator()
filemenu.add_command(label = "Exit", command = doQuit)
## Schedule Menu
schmenu.add_command(label = "By Class", command = lambda : doByClass())
schmenu.add_command(label = "By Student", command = lambda : doByStudent())
schmenu.add_separator()
schmenu.add_command(label = "Automate (Fast)", command = lambda : doAutomateFast())
schmenu.add_command(label = "Automate (Best)", command = lambda : doAutomateFast())
## Update Menu
updatemenu.add_command(label = "Download Classes", command = lambda : doDownloadClasses())
updatemenu.add_command(label = "Update Class Worths", command = lambda : doUpdateClassWorth())
updatemenu.add_command(label = "Update Classes", command = lambda : doUpdateClasses())
updatemenu.add_command(label = "Update Students", command = lambda: doUpdateStudents())
## Google Survey Meny
googlemenu.add_command(label = "List Classes", command = lambda : doListClasses())
googlemenu.add_command(label = "List Professors", command = lambda : doListProfessors())
# Run at startup
#doNewSchedule()
import os
try:
dir_path = os.path.join(os.environ['APPDATA'], 'TAScheduling')
except KeyError:
dir_path = os.path.join(os.environ['HOME'], '.TAScheduling')
if not os.path.exists(dir_path):
os.makedirs(dir_path)
globalvars.database_path = os.path.join(dir_path, 'tascheduling.db')
globalvars.mat_prefs_path = os.path.join(dir_path, 'student_preferences.npy')
globalvars.sec_sec_matrix_path = os.path.join(dir_path, 'section_section_matrix.npy')
globalvars.para_path = os.path.join(dir_path, 'parameters.txt')
sqlite3.connect(globalvars.database_path)
import errno
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
make_sure_path_exists('data/')
root.mainloop() | StarcoderdataPython |
223741 | #!/usr/bin/env python
#
# Adapted from an example by <NAME> at:
#
# http://www.michael-noll.com/wiki/Writing_An_Hadoop_MapReduce_Program_In_Python
#
'''
import sys, urllib, re
import numpy as np
# Read pairs as lines of input from STDIN
#for line in sys.stdin:
# We assume that we are fed a series of URLs, one per line
# url = line.strip()
# Fetch the content and output the title (pairs are tab-delimited)
dataOriginal= np.loadtxt('wine.data')
print "Shah","\t", "Anant"
'''
import hashlib
import Image
import sys
data=list()
imagelist=list()
imgname=list()
for line in sys.stdin:
dataline = line.strip()
imgname=dataline.split("\t")
#imagelist.append(dataline)
imagelist.append(imgname[1])
#print "Keyr1","\tThis is length",len(imagelist)
#print "Keyr1","\t",name[1]
def md5Checksum(filePath):
fh = open(filePath, 'rb')
m = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
for z in range(len(imagelist)):
#print 'The MD5 checksum of ',imagelist[i],'is ', md5Checksum(imagelist[i])
im = Image.open(imagelist[z])
pix= im.load()
size= im.size
#print 'Column: ',size[0],'Rows :',size[1]
#print pix[100,100][1]
#calculate row mean
rowmeanR=list()
rowmeanG=list()
rowmeanB=list()
csumr=0
csumg=0
csumb=0
for r in range(size[1]):
csumr=0
csumg=0
csumb=0
for c in range(size[0]):
csumr=csumr+pix[c,r][0]
csumg=csumg+pix[c,r][1]
csumb=csumb+pix[c,r][2]
rowmeanR.append(csumr/size[0])
rowmeanG.append(csumg/size[0])
rowmeanB.append(csumb/size[0])
csumr=0
csumg=0
csumb=0
for i in range(len(rowmeanR)):
csumr=csumr+rowmeanR[i]
for i in range(len(rowmeanG)):
csumg=csumg+rowmeanG[i]
for i in range(len(rowmeanB)):
csumb=csumb+rowmeanB[i]
ROWMEAN_R=csumr/len(rowmeanR)
ROWMEAN_G=csumg/len(rowmeanG)
ROWMEAN_B=csumb/len(rowmeanB)
#calculate column mean
colmeanR=list()
colmeanG=list()
colmeanB=list()
rsumr=0
rsumg=0
rsumb=0
for c in range(size[0]):
rsumr=0
rsumg=0
rsumb=0
for r in range(size[1]):
rsumr=rsumr+pix[c,r][0]
rsumg=rsumg+pix[c,r][1]
rsumb=rsumb+pix[c,r][2]
colmeanR.append(rsumr/size[1])
colmeanG.append(rsumg/size[1])
colmeanB.append(rsumb/size[1])
rsumr=0
rsumg=0
rsumb=0
for i in range(len(colmeanR)):
rsumr=rsumr+colmeanR[i]
for i in range(len(colmeanG)):
rsumg=rsumg+colmeanG[i]
for i in range(len(colmeanB)):
rsumb=rsumb+colmeanB[i]
COLMEAN_R=rsumr/len(colmeanR)
COLMEAN_G=rsumg/len(colmeanG)
COLMEAN_B=rsumb/len(colmeanB)
#calculate DCT Row mean
dctrowmeanR=list()
dctrowmeanG=list()
dctrowmeanB=list()
csumr=0
csumg=0
csumb=0
for r in range(size[1]):
csumr=0
csumg=0
csumb=0
for c in range(size[0]):
csumr=csumr+pix[c,r][0]
csumg=csumg+pix[c,r][1]
csumb=csumb+pix[c,r][2]
dctrowmeanR.append(csumr/size[0])
dctrowmeanG.append(csumg/size[0])
dctrowmeanB.append(csumb/size[0])
csumr=0
csumg=0
csumb=0
for i in range(len(dctrowmeanR)):
csumr=csumr+dctrowmeanR[i]
for i in range(len(dctrowmeanG)):
csumg=csumg+dctrowmeanG[i]
for i in range(len(dctrowmeanB)):
csumb=csumb+dctrowmeanB[i]
DCT_ROWMEAN_R=csumr/len(dctrowmeanR)
DCT_ROWMEAN_G=csumg/len(dctrowmeanG)
DCT_ROWMEAN_B=csumb/len(dctrowmeanB)
#select category
category=1
tokens = imagelist[z].split('.')
if int(tokens[0]) >= 1:
if int(tokens[0]) < 6:
category=1
if (int(tokens[0]) >= 6) and (int(tokens[0]) < 11):
category=2
if (int(tokens[0]) >= 11) and (int(tokens[0]) < 16):
category=3
#print features of this image
#print category,"\t",category,",",ROWMEAN_R,",",ROWMEAN_G,",",ROWMEAN_B,",",COLMEAN_R,",",COLMEAN_G,",",COLMEAN_B
outstr=str(category)+","+str(ROWMEAN_R)+","+str(ROWMEAN_G)+","+str(ROWMEAN_B)+","+str(COLMEAN_R)+","+str(COLMEAN_G)+","+str(COLMEAN_B)
print "keyr1","\t",outstr
print "keyr2","\t",outstr
print "keyr3","\t",outstr
| StarcoderdataPython |
203115 | <gh_stars>100-1000
# Dash packages
import dash_bootstrap_components as dbc
import dash_html_components as html
from app import app
###############################################################################
########### PAGE 2 LAYOUT ###########
###############################################################################
layout = dbc.Container([
html.H2('Page 2 Layout'),
html.Hr(),
], className="mt-4")
| StarcoderdataPython |
113766 | <reponame>Tongjilibo/bert4torch
import math
from typing import Callable, Iterable, Optional, Tuple, Union
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""
带warmup的schedule
参数
num_warmup_steps:
需要warmup的步数,一般为 num_training_steps * warmup_proportion(warmup的比例,建议0.05-0.15)
num_training_steps:
总的训练步数,一般为 train_batches * num_epoch
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
class AdamW(Optimizer):
"""
带权重衰减的Adam
<https://arxiv.org/abs/1711.05101>`__.
参数:
params (:obj:`Iterable[torch.nn.parameter.Parameter]`):
lr (:obj:`float`, `optional`, defaults to 1e-3):
学习率.
betas (:obj:`Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)):
Adam的betas参数 (b1, b2)
eps (:obj:`float`, `optional`, defaults to 1e-6):
Adam的epsilon参数,用于数值稳定性
weight_decay (:obj:`float`, `optional`, defaults to 0):
权重衰减参数
correct_bias (:obj:`bool`, `optional`, defaults to `True`):
修正Adm的bias (原始的tf版本的bert,没有修正bias,取值为False,但是可以尝试用True,可能会收敛更稳定)
例子:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-5, correct_bias=False)
"""
def __init__(
self,
params: Iterable[torch.nn.parameter.Parameter],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
):
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr} - should be >= 0.0")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0[")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0[")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps} - should be >= 0.0")
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure: Callable = None):
"""
执行单步优化
参数:
closure (:obj:`Callable`, `optional`):
评估模型并返回loss,是一个闭包
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# state初始化
if len(state) == 0:
state["step"] = 0
# 一阶梯度的指数加权移动平均,也即累积一阶动量的计算
state["exp_avg"] = torch.zeros_like(p.data)
# 二阶梯度的指数加权移动平均,也即累积二阶动量的计算
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# 计算一二阶梯度的beta系数下的衰减值,并进行更新
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
# 修正bias,对于bert来说,不需要执行此操作
if group["correct_bias"]:
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# 权重衰减项,目的是为了解决在adam等自适应优化算法中由于m和v的相互作用导致的L2正则表现不佳的情况。
# 使用权重衰减,能使得每个梯度都以相同的比例进行衰减(等价于SGD下的L2正则)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
return loss
class ExponentialMovingAverage():
'''
模型权重的指数滑动平均
注意区别于类似adam一类的自适应学习率优化器,针对一阶二阶梯度的指数滑动平均,两者完全不同
例子:
# 初始化
ema = ExponentialMovingAverage(model, 0.999)
# 训练过程中,更新完参数后,同步update ema_weights weights
def train():
optimizer.step()
ema.update()
# eval前,调用apply_ema_weights weights;eval之后,恢复原来模型的参数
def evaluate():
ema.apply_ema_weights()
# evaluate
# 如果想保存ema后的模型,请在reset_old_weights方法之前调用torch.save()
ema.reset_old_weights()
'''
def __init__(self, model, decay):
self.model = model
self.decay = decay
# 保存ema权重(当前step的每一层的滑动平均权重)
self.ema_weights = {}
# 在进行evaluate的时候,保存原始的模型权重,当执行完evaluate后,从ema权重恢复到原始权重
self.model_weights = {}
# 初始化ema_weights为model_weights
for name, param in self.model.named_parameters():
if param.requires_grad:
self.ema_weights[name] = param.data.clone()
def update(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.ema_weights
new_average = (1.0 - self.decay) * param.data + self.decay * self.ema_weights[name]
self.ema_weights[name] = new_average.clone()
def apply_ema_weights(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.ema_weights
self.model_weights[name] = param.data
param.data = self.ema_weights[name]
def reset_old_weights(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.model_weights
param.data = self.model_weights[name]
self.model_weights = {}
# def extend_with_exponential_moving_average(BaseOptimizer, model):
# class EmaOptimizer(BaseOptimizer):
# # @insert_arguments(ema_momentum=0.999)
# def __init__(self, model, *args, **kwargs):
# super(EmaOptimizer, self).__init__(*args, **kwargs)
# self.model = model
# # 保存ema权重(当前step的每一层的滑动平均权重)
# self.ema_weights = {}
# # 在进行evaluate的时候,保存原始的模型权重,当执行完evaluate后,从ema权重恢复到原始权重
# self.model_weights = {}
# # 初始化ema_weights为model_weights
# for name, param in self.model.named_parameters():
# if param.requires_grad:
# self.ema_weights[name] = param.data.clone()
# def step(sel, closure: Callable = None):
# """
# 执行单步优化
# 参数:
# closure (:obj:`Callable`, `optional`):
# 评估模型并返回loss,是一个闭包
# """
# loss = None
# if closure is not None:
# loss = closure()
# loss = super(NewOptimizer, self).step()
# self.update()
# return loss
# def update(self):
# for name, param in self.model.named_parameters():
# if param.requires_grad:
# assert name in self.ema_weights
# new_average = (1.0 - self.decay) * param.data + self.decay * self.ema_weights[name]
# self.ema_weights[name] = new_average.clone()
# def apply_ema_weights(self):
# for name, param in self.model.named_parameters():
# if param.requires_grad:
# assert name in self.ema_weights
# self.model_weights[name] = param.data
# param.data = self.ema_weights[name]
# def reset_old_weights(self):
# for name, param in self.model.named_parameters():
# if param.requires_grad:
# assert name in self.model_weights
# param.data = self.model_weights[name]
# self.model_weights = {}
# return EmaOptimizer | StarcoderdataPython |
3509048 | <reponame>CityPulse/dynamic-bus-scheduling<gh_stars>10-100
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
- LICENCE
The MIT License (MIT)
Copyright (c) 2016 <NAME> Ericsson AB (EU FP7 CityPulse Project)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
- DESCRIPTION OF DOCUMENTS
-- MongoDB Database Documents:
address_document: {
'_id', 'name', 'node_id', 'point': {'longitude', 'latitude'}
}
bus_line_document: {
'_id', 'bus_line_id', 'bus_stops': [{'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}]
}
bus_stop_document: {
'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}
}
bus_stop_waypoints_document: {
'_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[edge_object_id]]
}
bus_vehicle_document: {
'_id', 'bus_vehicle_id', 'maximum_capacity',
'routes': [{'starting_datetime', 'ending_datetime', 'timetable_id'}]
}
detailed_bus_stop_waypoints_document: {
'_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[edge_document]]
}
edge_document: {
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}
node_document: {
'_id', 'osm_id', 'tags', 'point': {'longitude', 'latitude'}
}
point_document: {
'_id', 'osm_id', 'point': {'longitude', 'latitude'}
}
timetable_document: {
'_id', 'timetable_id', 'bus_line_id', 'bus_vehicle_id',
'timetable_entries': [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime', 'number_of_onboarding_passengers',
'number_of_deboarding_passengers', 'number_of_current_passengers',
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}],
'travel_requests': [{
'_id', 'client_id', 'bus_line_id',
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime',
'starting_timetable_entry_index', 'ending_timetable_entry_index'
}]
}
traffic_event_document: {
'_id', 'event_id', 'event_type', 'event_level', 'point': {'longitude', 'latitude'}, 'datetime'
}
travel_request_document: {
'_id', 'client_id', 'bus_line_id',
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime',
'starting_timetable_entry_index', 'ending_timetable_entry_index'
}
way_document: {
'_id', 'osm_id', 'tags', 'references'
}
-- Route Generator Responses:
get_route_between_two_bus_stops: {
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}
get_route_between_multiple_bus_stops: [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}]
get_waypoints_between_two_bus_stops: {
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[{
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}]]
}
get_waypoints_between_multiple_bus_stops: [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[{
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}]]
}]
"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__credits__ = [
'<NAME> (Senior Researcher at Ericsson AB) - email: <EMAIL>'
'<NAME> (Senior Researcher at Ericsson AB) - email: <EMAIL>'
]
class MultiplePathsNode(object):
def __init__(self, osm_id):
self.osm_id = osm_id
self.followed_paths = []
def __str__(self):
return str(self.osm_id)
def add_followed_path(self, followed_path):
if followed_path not in self.followed_paths:
self.followed_paths.append(followed_path)
def get_followed_paths(self):
return self.followed_paths
def update_followed_paths(self, followed_paths_of_previous_node):
if len(followed_paths_of_previous_node) > 0:
for followed_path_of_previous_node in followed_paths_of_previous_node:
followed_path = followed_path_of_previous_node + [self.osm_id]
self.add_followed_path(followed_path=followed_path)
else:
followed_path = [self.osm_id]
self.followed_paths.append(followed_path)
class MultiplePathsSet(object):
"""
Following the principles of breadth-first search, the neighbors of each node are explored first,
before moving to the next level neighbors. For this reason, a data storing structure is implemented
in order to store the nodes whose neighbors have not yet been explored.
"""
def __init__(self):
self.node_osm_ids = []
self.nodes = []
def __len__(self):
return len(self.node_osm_ids)
def __contains__(self, node_osm_id):
"""
Check if a node exists in the nodes list.
:type node_osm_id: integer
:return: boolean
"""
return node_osm_id in self.node_osm_ids
def __str__(self):
return str(self.node_osm_ids)
def push(self, new_node):
"""
Insert a new node.
:param new_node: MultiplePathsNode
"""
new_node_osm_id = new_node.osm_id
self.node_osm_ids.append(new_node_osm_id)
self.nodes.append(new_node)
def pop(self):
"""
Remove - retrieve the first node of followed path.
:return: node: MultiplePathsNode
"""
node = self.nodes.pop(0)
self.node_osm_ids.remove(node.osm_id)
return node
def get_edge(starting_node, ending_node, edges_dictionary):
"""
Get the edge_document which connects starting_node with ending_node.
:param starting_node: osm_id
:param ending_node: osm_id
:param edges_dictionary: {starting_node_osm_id -> [edge_document]}
:return: edge: edge_document
"""
edge = None
starting_node_edges = edges_dictionary[starting_node]
for starting_node_edge in starting_node_edges:
if starting_node_edge.get('ending_node').get('osm_id') == ending_node:
edge = starting_node_edge
break
return edge
def identify_all_paths(starting_node_osm_id, ending_node_osm_id, edges_dictionary):
"""
This function is capable of identifying all the possible paths connecting the
starting with the ending node, implementing a variation of the Breadth-first
search algorithm. Each path is represented by a list of edge_documents (waypoints),
including details about intermediate nodes, maximum allowed speed, road type, and
current levels of traffic density. The returned value of the function is a
double list of edge_documents.
:param starting_node_osm_id: integer
:param ending_node_osm_id: integer
:param edges_dictionary: {starting_node_osm_id -> [edge_document]}
:return: waypoints: [[edge_document]]
"""
# Returned value
waypoints = []
# A data storing structure used in order to keep the nodes
# whose neighbors should be considered.
open_set = MultiplePathsSet()
# A dictionary ({node_osm_id -> node}) containing nodes
# whose neighbors have already been considered.
closed_set = {}
# starting_node is initialized and pushed into the open_set.
starting_node = MultiplePathsNode(osm_id=starting_node_osm_id)
starting_node.followed_paths = [[starting_node.osm_id]]
open_set.push(new_node=starting_node)
# The node in the first position of the open_set is retrieved,
# as long as the number of stored nodes is above zero.
while len(open_set) > 0:
current_node = open_set.pop()
# Continuation condition: ending_node has been discovered.
if current_node.osm_id == ending_node_osm_id:
# Each one of the followed paths is processed, in order to retrieve the
# corresponding edge_documents, and added to the returned double list.
for followed_path in current_node.get_followed_paths():
waypoints.append(process_followed_path(
followed_path=followed_path,
edges_dictionary=edges_dictionary)
)
current_node.followed_paths = []
continue
# Continuation condition: current_node is ignored in case it has no neighbors,
# or its neighbors have already been considered.
if current_node.osm_id not in edges_dictionary or current_node.osm_id in closed_set:
continue
# Following the edges of current_node, each one of its neighbors is considered.
for edge in edges_dictionary.get(current_node.osm_id):
next_node_osm_id = edge.get('ending_node').get('osm_id')
# Continuation condition: next_node has already been considered.
if next_node_osm_id in closed_set:
continue
else:
# Followed paths of next_node are updated and the node is pushed into the open_set,
# so as to allow its neighbors to be considered.
next_node = MultiplePathsNode(osm_id=next_node_osm_id)
next_node.update_followed_paths(followed_paths_of_previous_node=current_node.get_followed_paths())
open_set.push(new_node=next_node)
# Since all its neighbors have been considered, current_node is added to the closed_set.
closed_set[current_node.osm_id] = current_node
return waypoints
def process_followed_path(followed_path, edges_dictionary):
"""
This function is able to process the nodes of followed_path and
identify the edge_documents which connect them.
:param followed_path: [osm_id]
:param edges_dictionary: {starting_node_osm_id -> [edge_document]}
:return: detailed_followed_path: [edge_document]
"""
detailed_followed_path = []
for i in range(0, len(followed_path) - 1):
starting_node = followed_path[i]
ending_node = followed_path[i + 1]
edge = get_edge(starting_node=starting_node, ending_node=ending_node, edges_dictionary=edges_dictionary)
# path_entry = {'edge_id': edge.get('_id'), 'starting_node': starting_node, 'ending_node': ending_node}
detailed_followed_path.append(edge)
return detailed_followed_path
| StarcoderdataPython |
1902411 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Build pattern out of signatures.
List of tools designed to create signatures for allocations.
That should allow to do reverse guesswork of patterns (pointers)
and therefore identify similar record types allocations.
"""
import logging
import argparse
import pickle
import sys
import struct
import itertools
import collections
import numbers
import os
from haystack import dump_loader
from haystack.reverse import config
from haystack.reverse import utils
from haystack.reverse import matchers
from haystack.reverse import searchers
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2012 <NAME>"
__license__ = "GPL"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
log = logging.getLogger('pattern')
class Dummy(object):
pass
def findPatternText(sequence, elSize=1, minNbGroup=2):
"""
returns a regexp grouping repetitive patterns.
@param sequence: a sequence (str/bstr) with rfind() method.
@param elsize: the size of each element ( 1 to xxx ) in the sequence.
@param minNbGroup: the minimum number of repetition before trying to group the pattern.
Examples:
>>> from haystack.reverse import pattern
>>> s = 'aaaaa1111bbbccda2a2a2a2a2b1cb1cb1cb1cabcdabcdabcdabcdpooiiiuuuuyyyyy'
>>> pattern.findPatternText(s,1)
' (a){5} (1){4} (b){3} (c){2} d (a2){5} (b1c){4} (abcd){4} p (o){2} (i){3} (u){4} (y){5} '
>>> s = 'aaaaa1111bbbccda2a2a2a2a2b1cb1cb1cb1cabcdabcdabcdabcdpooiiiuuuuyyyyy'
>>> pattern.findPatternText(s,1,5)
' (a){5} 1111bbbccd (a2){5} b1cb1cb1cb1cabcdabcdabcdabcdpooiiiuuuu (y){5} '
"""
ret = findPattern(sequence, elSize, minNbGroup)
s = ''
for nb, txt in ret:
if nb == 1:
s += txt
else:
s += ' (%s){%d} ' % (txt, nb)
return s
def findPattern(sequence, elSize=1, minNbGroup=2):
"""
returns a regexp grouping repetitive patterns.
@param sequence: a sequence (str/bstr) with rfind() method.
@param elsize: the size of each element ( 1 to xxx ) in the sequence.
@param minNbGroup: the minimum number of repetition before trying to group the pattern.
Examples:
>>> from haystack.reverse import pattern
>>> s = 'aaaaa1111bbbccda2a2a2a2a2b1cb1cb1cb1cabcdabcdabcdabcdpooiiiuuuuyyyyy'
>>> pattern.findPattern(s,1)
[(5, 'a'), (4, '1'), (3, 'b'), (2, 'c'), (1, 'd'), (5, 'a2'), (4, 'b1c'), (4, 'abcd'), (1, 'p'), (2, 'o'), (3, 'i'), (4, 'u'), (5, 'y')]
>>> s = 'aaaaa1111bbbccda2a2a2a2a2b1cb1cb1cb1cabcdabcdabcdabcdpooiiiuuuuyyyyy'
>>> pattern.findPattern(s,1,5)
[(5, 'a'), (1, '1111bbbccd'), (5, 'a2'), (1, 'b1cb1cb1cb1cabcdabcdabcdabcdpooiiiuuuu'), (5, 'y')]
"""
if (len(sequence) % elSize) != 0:
pass # DEBUG TODO DELETE bypass needed for textprintout
#raise ValueError('your sequence length:%d has to be a multiple of element size:%d'%(len(sequence),elSize))
elif sequence == '':
return []
patterns = []
for seqlen in range(elSize, 1 + (len(sequence) / 2)):
seqs = [
sequence[
i:i +
seqlen] for i in xrange(
0,
len(sequence) -
seqlen +
1,
elSize)] # i %elSize, aligned on the elSize
for value, nb in collections.Counter(seqs).most_common():
# try repetition as long as it is > to minNbGroup
while nb >= minNbGroup:
ind = sequence.rfind(value * nb) # find the fulltext pattern
while ind != -1: # not found
patterns.append(
(nb *
len(value),
ind,
nb,
value)) # biggest is best, ind++ is better, large nb best
ind = sequence.rfind(
value *
nb,
0,
ind) # find it at another offset
nb -= 1 # try with a smaller number of repetition
#
if len(patterns) == 0:
return [(1, sequence)]
patterns = sorted(set(patterns))
best = patterns[-1] # higher wins
# print 'BEST:', best, best[0], best[3][:elSize], best[3][elSize:]
# print 'found new patterns :'
# for p in patterns:
# sequence2 = sequence.replace( p[3]*p[2], ' (%s){%d} '%(p[3],p[2]) )
# print p, sequence2
i = sequence.find(best[3] * best[2])
left = sequence[:i]
right = sequence[i + best[0]:]
log.debug('left %d:%s' % (len(left), left))
log.debug('right %d:%s' % (len(right), right))
ret = findPattern(left, elSize, minNbGroup)
ret2 = findPattern(right, elSize, minNbGroup)
return ret + [(best[2], best[3])] + ret2
class PatternEncoder:
def __init__(self, sequence, minGroupSize):
self.basicElements = set(sequence)
self.sequence = sequence
self.nb = len(self.basicElements)
self.minGroupSize = minGroupSize
if self.nb == 0:
raise ValueError('empty sequence')
elif self.nb < 0xff:
self.elSize = 1
elif self.nb < 0xffff:
self.elSize = 2
elif self.nb < 0xffffff:
self.elSize = 3
elif self.nb < 0xffffffff:
self.elSize = 4
else:
raise ValueError(
'I deny you the right to find patterns for more than 2^32 differents basic elements.')
self._makeDictionnary()
return
def _makeDictionnary(self):
log.debug('making pattern dictionnary')
self.dict = {}
self.dict_reverse = {}
for i, el in enumerate(self.basicElements):
cod = struct.pack('>L', i)[-self.elSize:] # code 0 to 0xff
self.dict[el] = cod
self.dict_reverse[cod] = el
# dict done
self.sequence_norm = [self.dict[el] for el in self.sequence]
self.sequence_text = ''.join(self.sequence_norm)
log.debug('done making pattern dictionnary %d' % (self.elSize))
return
def makePattern(self):
'''[(5, 'a'), (4, '1'), (3, 'b'), (2, 'c'), (1, 'd'), (5, 'a2'), (4, 'b1c'), .. '''
# as of today, i do not have any other sequence class support rfind than string, so i have to decapsulate
# a string of findPattern to basic elements
ret = []
patterns = findPattern(
self.sequence_text,
self.elSize,
self.minGroupSize)
for nb, p in patterns:
plen = len(p)
if plen % self.elSize != 0:
raise ValueError('serious bug in findpattern')
elif nb == 1:
for i in range(0, plen, self.elSize):
ret.append((nb, self.dict_reverse[p[i:i + self.elSize]]))
else:
seq = [self.dict_reverse[p[i:i + self.elSize]]
for i in range(0, plen, self.elSize)]
ret.append((nb, seq))
return ret
def make(opts):
log.info('Make the signature.')
# head + first word size
memory_handler = dump_loader.load(opts.dumpfiles[0])
word_size = memory_handler.get_target_platform().get_word_size()
ppMapper = PinnedPointersMapper(word_size)
heap_sig = PointerIntervalSignature(memory_handler, '[heap]')
log.info('pinning offset list created for heap %s.' % (heap_sig))
ppMapper.addSignature(heap_sig)
# now do the others
for dumpfile in opts.dumpfiles[1:]:
memory_handler = dump_loader.load(dumpfile)
if memory_handler.get_target_platform().get_word_size() != word_size:
log.error("Differing wordsize between samples")
heap_sig = PointerIntervalSignature(memory_handler, '[heap]')
log.info('pinning offset list created for heap %s.' % (heap_sig))
ppMapper.addSignature(heap_sig)
log.info('Find similar vectors between pointers on all signatures.')
ppMapper.run()
# we have :
# resolved PinnedPointers on all sigs in ppMapper.resolved
# unresolved PP in ppMapper.unresolved
# next step
log.info('Pin resolved PinnedPointers to their respective heap.')
class PointerIntervalSignature:
'''
Wrapper object the list of intervals between pointers identified in the dumpfile.
When the memory is :
P....P..P.PPP.PP.PPPP.PPP.P..P..................P
with P being a Word of 4 bytes which value could be a pointer value.
The signature is
[20,12,8,4,4,8,4,8,4,4,4,8,4,4,8,12,80]
It abstracts the memory contents to its signature.
'''
def __init__(self, memory_handler, pathname='[heap]'):
self.mmap = None
self.mmap_pathname = pathname
self.memory_handler = memory_handler
self.name = memory_handler.get_name()
self.cacheFilenamePrefix = config.get_cache_folder_name(self.name)
self.addressCache = {}
self.sig = None
self._word_size = memory_handler.get_target_platform().get_word_size()
self._feedback = searchers.NoFeedback()
self._get_mapping()
self._load()
def _get_mapping(self):
# XXX todo this is getHeap...
self.mmap = self.memory_handler._get_mapping(self.mmap_pathname)[0]
return
def _load(self):
# DO NOT SORT LIST. c'est des sequences. pas des sets.
myname = self.cacheFilenamePrefix + '.pinned'
log.debug('Reading signature from %s',myname)
sig = utils.int_array_cache(myname)
if sig is None:
log.info(
"Signature has to be calculated for %s. It's gonna take a while." %
(self.name))
matcher = matchers.PointerSearcher(self.memory_handler)
pointerSearcher = searchers.WordAlignedSearcher(self.mmap, matcher, self._feedback, self._word_size)
#pointerSearcher = matchers.PointerSearcher(self.mmap)
sig = []
# save first offset
last = self.mmap.start
for i in pointerSearcher: # returns the vaddr
sig.append(i - last) # save intervals between pointers
# print hex(i), 'value:', hex(self.mmap.readWord(i) )
last = i
# save it
sig = utils.int_array_save(myname, sig)
else:
log.debug("%d Signature intervals loaded from cache." % (len(sig)))
self.sig = sig
#
# previous pointer of interval 0 is start of mmap
self.addressCache[0] = self.mmap.start
self._loadAddressCache()
return
def _loadAddressCache(self):
# DO NOT SORT LIST. c'est des sequences. pas des sets.
myname = self.cacheFilenamePrefix + '.pinned.vaddr'
if os.access(myname, os.F_OK):
addressCache = pickle.load(file(myname, 'r'))
log.debug(
"%d Signature addresses loaded from cache." %
(len(addressCache)))
self.addressCache.update(addressCache)
else: # get at least 10 values
for i in xrange(0, len(self), len(self) / 10):
self.getAddressForPreviousPointer(i)
self._saveAddressCache()
return
def _saveAddressCache(self):
myname = self.cacheFilenamePrefix + '.pinned.vaddr'
pickle.dump(self.addressCache, file(myname, 'w'))
def getAddressForPreviousPointer(self, offset):
'''
sum all intervals upto the offset. that give us the relative offset.
add to dump.start , and we have the vaddr
We need to sum all up to offset not included.
it we include the offset, we get the second pointer vaddr.
'''
# use cache my friends
if offset in self.addressCache:
return self.addressCache[offset]
# get closest one
keys = sorted(self.addressCache)
keys = list(itertools.takewhile(lambda x: x < offset, keys))
last = keys[-1] # take the closest
startValue = self.addressCache[last] # == addr(last-1)
# we are not interested in adding offset interval. that would give us
# the second pointer address
subseq = self.sig[last:offset]
#newsum = startValue + reduce(lambda x,y: x+y, subseq)
#self.addressCache[offset] = newsum
# be proactive +/- 40 Mo
newsum = startValue
for i in range(last, offset):
newsum += self.sig[i]
self.addressCache[i + 1] = newsum
# be proactive
return newsum
def __len__(self):
return len(self.sig)
def __str__(self):
return "<PointerIntervalSignature '%s'>" % (self.name)
class SequencesMaker:
'''
Builds a list of sequences of interval for each interval in the signature.
[2,3,3,4,5,1,2,3,4,5] gives
[(2,3,3), (3,3,4), (3,4,5), (4,5,1), (5,1,2), (1,2,3), (2,3,4), (3,4,5)]
'''
def __init__(self, sequence, size, cacheAll=True):
self.size = size
self.seq = sequence
self.sets = {} # key is sequence len
self.cacheAll = cacheAll
self.findUniqueSequences(self.seq)
def findUniqueSequences(self, seq):
log.debug('number of intervals: %d' % (len(seq)))
sig_set = set(seq)
log.debug('number of unique intervals value: %d' % (len(sig_set)))
# create the tuple
self.sets[self.size] = set(self.getSeqs())
log.debug(
'number of unique sequence len %d : %d' %
(self.size, len(
self.sets[
self.size])))
return
def getSeqs(self):
if not hasattr(self, 'seqs'):
seqlen = self.size
self.seqs = [tuple(self.seq[i:i + seqlen])
for i in xrange(0, len(self.seq) - seqlen + 1)]
seqs = self.seqs
return seqs
def __len__(self):
return len(self.seq) - self.size
def __iter__(self):
seqlen = self.size
for i in xrange(0, len(self.seq) - seqlen + 1):
yield tuple(self.seq[i:i + seqlen])
return
class PinnedPointers:
'''
A variable length sequence of intervals between pointers.
It already pinned at a specific offset of a signature,
so you might find several instance p1 and p2 at different offset, but with the same sequence
and therefore equal signature. p1 == p2.
It is easily pin onto the initial dump/heap by getAddress()
@param sequence: the sequence of intervals between pointers
@param sig: the whole signature object linked back to the memoryMap
@param offset: the offset of this interval within the signature
'''
def __init__(self, sequence, sig, offset, word_size):
self.sequence = sequence
self.nb_bytes = sum(sequence) + word_size
self.offset = offset
self.sig = sig
self.relations = {}
self.vaddr = None
def pinned(self, nb=None):
if nb is None:
nb == len(self.sequence)
return self.sequence[:nb]
def __len__(self):
return len(self.sequence)
def structLen(self):
return self.nb_bytes
def __cmp__(self, o):
if len(self) != len(o):
return cmp(len(self), len(o))
# that means the sequence is different too
if self.structLen() != o.structLen():
return cmp(self.structLen(), o.structLen())
if self.sequence != o.sequence: # the structLen can be the same..
return cmp(self.sequence, o.sequence)
# else offset is totally useless, we have a match
return 0
def __contains__(self, other):
raise NotImplementedError
if not isinstance(other, PinnedPointers):
raise ValueError
if other.sig == self.sig: # well, not really
if other.offset >= self.offset and other.offset <= self.offset + \
len(self):
# if other.sequence in self.sequence: ## need subsearch
return True
return False
def addRelated(self, other, sig=None):
''' add a similar PinnedPointer from another offset or another sig '''
if self != other:
raise ValueError('We are not related PinnedPointers.')
if sig is None:
sig = self.sig
if sig not in self.relations:
self.relations[sig] = list()
self.relations[sig].append(other)
return
def getAddress(self, numOffset=0):
'''
return the vaddr of pointer <numOffset>.
by default numOffset == 0 , returns the vaddr of the first interval
( that migth be the first or second pointer in the struct )
'''
if self.vaddr is None:
if numOffset >= len(self.sequence):
raise IndexError
self.vaddr = self.sig.getAddressForPreviousPointer(self.offset)
if numOffset != 0:
return self.sig.getAddressForPreviousPointer(
self.offset + numOffset)
return self.vaddr
def __str__(self):
return '<PinnedPointers %s[%d:%d] +%d bytes/%d pointers>' % (
self.sig, self.offset, self.offset + len(self), self.nb_bytes, len(self.sequence) + 1)
@classmethod
def link(cls, lstOfPinned):
for i, p1 in enumerate(lstOfPinned):
for p2 in lstOfPinned[i + 1:]:
p1.addRelated(p2, p2.sig)
p2.addRelated(p1, p1.sig)
return
class AnonymousStructRange:
'''
Map a pinnedPointer sequence/signature onto a specific memory at a specific offset.
We are now able to query the structure contents.
Operators:
__contains__ : if applied by a Number, it will be understoof as a memory address.
if the memory addres is in range of this structure, return True.
in all other cases, return False
__cmp__ : if applied by a Number, it will be understoof as a memory address.
if the memory address is in range of this structure, return 0.
in all other cases, return the __cmp__ of the address compared to the start of the struct
'''
def __init__(self, pinnedPointer, word_size):
self.pinnedPointer = pinnedPointer
# by default we start at the first pointer
self.start = pinnedPointer.getAddress()
self.stop = pinnedPointer.getAddress(
len(pinnedPointer)) # by default we stop at the last pointer
# add the length of the last pointer
self.stop += word_size
self.pointers = None
self.pointersTypes = {}
self.pointersValues = None
self.typename = self.makeTypeName()
def getPointersAddr(self):
if self.pointers is None:
self.pointers = [self.pinnedPointer.getAddress(
i) for i in range(len(self.pinnedPointer) + 1)]
return self.pointers
def getPointersValues(self):
if self.pointersValues is None:
mmap = self.pinnedPointer.sig.mmap
self.pointersValues = [
mmap.read_word(addr) for addr in self.getPointersAddr()]
return self.pointersValues
def setPointerType(self, number, anonStruct):
''' set a specific pointer to a specific anonStruct type '''
if anonStruct.sig() != self.sig():
raise TypeError(
'You cant type with a AnonStruct from another PointerIntervalSignature. %s vs %s' %
(self, anonStruct))
if number in self.pointersTypes:
raise IndexError('%s Pointer number %d has already been identified as a type %s - new type : %s' % (
self, number, self.getPointerType(number).type(), anonStruct.type()))
self.pointersTypes[number] = anonStruct
myself = ''
if self == anonStruct:
myself = ' (MYSELF) '
log.debug(
'Set %s pointer number %d to type %s %s' %
(self.type(),
number,
self.getPointerType(number).type(),
myself))
return
def getPointerOffset(self, number):
return self.pinnedPointer.getAddress(number) - self.start
def getPointerType(self, number):
return self.pointersTypes[number]
def sig(self):
return self.pinnedPointer.sig
def sequence(self):
return self.pinnedPointer.sequence
def type(self):
return self.typename
def __contains__(self, other):
if isinstance(other, numbers.Number):
rel = other - self.start
if rel > len(self) or (rel < 0):
return False
return True
else:
return False
def __cmp__(self, other):
if other in self:
return 0
else:
return cmp(self.start, other)
def __len__(self):
return int(self.stop - self.start)
def makeTypeName(self):
return 'AnonStruct_%s_%s_%s_%s' % (len(self), len(
self.pinnedPointer), self.pinnedPointer.sig.name, self.pinnedPointer.offset)
def toCtypesString(self):
s = ''
return
def __str__(self):
return '<%s>' % (self.type())
class PinnedPointersMapper:
'''
a) On identifie les sequences d'intervalles longues ( taille fixe a 20 ).
b) on trouve les sequences communes a toutes les signatures.
c) pour chaque offset de chaque signature, on determine un PinnedPointer
qui couvre la plus grande sequence composee de sequence communes.
*** Erreur possible: la sequence creee en sig1 n'existe pas en sig2.
cas possible si sig2 contient A4 et A5 en deux zones distinces ( A5 == A4[1:]+...
et si sig 1 contient A4A5 en une zone distincte
on se retrouve avec sig A4A5 mais sig2.A4 et sig2.A5
on peut dans ce cas, redecouper sig1 selon le plus petit denominateur commun de sig2
-> check routine
d) on linke ces PP entres elles ( central repo serait mieux )
e) Meta info: on trouve les multiple instances ( same struct, multiple alloc)
'''
def __init__(self, word_size, sequenceLength=20):
self.cacheValues2 = {}
self.signatures = []
self.signatures_sequences = {}
self.started = False
self.common = []
self.length = sequenceLength
self.word_size = word_size
return
def addSignature(self, sig):
if self.started:
raise ValueError("Mapping has stated you can't add new signatures")
self.signatures.append(sig)
return
def _findCommonSequences(self):
log.info('Looking for common sequence of length %d' % (self.length))
common = None
# make len(sig) sub sequences of size <length> ( in .sets )
for sig in self.signatures:
self.signatures_sequences[sig] = SequencesMaker(
sig.sig,
self.length,
False)
if common is None:
common = set(self.signatures_sequences[sig].sets[self.length])
else:
common &= self.signatures_sequences[sig].sets[self.length]
log.info(
'Common sequence of length %d: %d seqs' %
(self.length, len(common)))
return common
def _mapToSignature(self, sig):
# LOL. difflib.SequenceMatcher.
# maintenant il faut mapper le common set sur l'array original,
# a) on peut iter(sig) jusqu'a trouver une sequence non common.
# b) reduce previous slices to 1 bigger sequence.
# On peut aggreger les offsets, tant que la sequence start:start+<length> est dans common.
# on recupere un 'petit' nombre de sequence assez larges, censees etre
# communes.
sig_aggregated_seqs = []
sig_uncommon_slice_offset = []
start = 0
stop = 0
i = 0
length = self.length
seqs_sig1 = self.signatures_sequences[sig]
common = self.common
# all subsequences, offset by offset
enum_seqs_sig = enumerate(seqs_sig1)
try:
while i < len(seqs_sig1): # we wont have a StopIteration...
for i, subseq in enum_seqs_sig:
if subseq in common:
start = i
#log.debug('Saving a Uncommon slice %d-%d'%(stop,start))
sig_uncommon_slice_offset.append((stop, start))
break
del subseq
# enum is on first valid sequence of <length> intervals
#log.debug('Found next valid sequence at interval offset %d/%d/%d'%(i,len(sig.sig), len(seqs_sig1) ))
for i, subseq in enum_seqs_sig:
if subseq in common:
del subseq
continue
# the last interval in the tuple of <length> intervals is
# not common
else:
# so we need to aggregate from [start:stop+length]
# there CAN be another common slice starting between stop and stop+length.
# (1,2,3,4) is common , (1,2,3,4,6) is NOT common because of the 1, (2,3,4,6) is common.
# next valid slice is at start+1
# so Yes, we can have recovering Sequences
stop = i # end aggregation slice
seqStop = stop + length - 1
# we should also pin it in sig2, sig3, and relate to
# that...
pp = savePinned(
self.cacheValues2,
sig,
start,
seqStop -
start,
self.word_size)
sig_aggregated_seqs.append(pp) # save a big sequence
#log.debug('Saving an aggregated sequence %d-%d'%(start, stop))
del subseq
break # goto search next common
# find next valid interval
# wait for end of enum
except StopIteration as e:
pass
# done
# log.debug('%s'%sig1_uncommon_slice_offset)
log.info(
'There is %d uncommon slice zones in %s' %
(len(sig_uncommon_slice_offset), sig))
log.info(
'There is %d common aggregated sequences == struct types in %s' %
(len(sig_aggregated_seqs), sig))
return sig_uncommon_slice_offset, sig_aggregated_seqs
def _findMultipleInstances(self):
allpp = sorted([v for l in self.cacheValues2.values()
for v in l], reverse=True)
unresolved = []
linkedPP = []
linked = 0
multiple = 0
for k, g in itertools.groupby(allpp):
l = list(g)
# we can have multiple instances btu not less.
if len(l) < len(mapper.signatures):
unresolved.extend(l)
# print 'not same numbers'
continue
else:
allSigs = True
# we should have all 3 signatures
found = [pp.sig for pp in l]
for s in mapper.signatures:
if s not in found:
unresolved.extend(l)
# print 'not same sigs', s
allSigs = False
break
# if ok, link them all
if allSigs:
PinnedPointers.link(l)
linkedPP.extend(l)
multiple += 1
linked += len(l)
unresolved = sorted(unresolved, reverse=True)
linkedPP = sorted(linkedPP, reverse=True)
self.unresolved = unresolved
self.resolved = linkedPP
log.info(
'Linked %d PinnedPointers across all PointerIntervalSignatures, %d unique in all Signatures ' %
(linked, multiple))
log.info(
'left with %d/%d partially unresolved pp' %
(len(unresolved), len(allpp)))
# cache to disk
# cacheToDisk(self.resolved,'pinned-resolved')
# cacheToDisk(self.unresolved,'pinned-unresolved')
return
def run(self):
self.started = True
all_common_pp = []
CACHE = 'pinned-resolved'
CACHE2 = 'pinned-unresolved'
global mapper
mapper = self
# drop 1 : find common sequences
self.common = self._findCommonSequences()
# drop 2: Map sequence to signature, and aggregate overlapping
# sequences.
for sig in self.signatures:
unknown_slices, common_pp = self._mapToSignature(sig)
all_common_pp.extend(common_pp)
# drop 3: error case, we have been too optimistic about unicity of common sequence.
# lets try and reduce the errors.
# for each structLen, find at least one pp for each sig
# chance are that only the last interval is botched, so we only have to compare between
# pp1.sequence[:-1] and pp2.sequence[:-1] to find a perfect match
# we nee to find sole pointer. pop all equals in the 3 sigs.
# drop 3: Analyze and find multiple instances of the same Sequence
self._findMultipleInstances()
# drop 4: Sequence should have been linked, cross-signature. Try to extend them
# On peut pas agrandir les sequences. il n"y a plus de common pattern,
# Par contre, on peut essayer de trouver des sequences plus courtes dans les
# intervalles uncommon_slices
# on peut se servir des pointeur en stack pour trouver les vrai
# start-of-structure.
caches = self._makeCaches()
pickle.dump(
caches,
file(
'/home/jal/Compil/python-haystack/outputs/caches',
'w'))
self._pinResolved(caches)
return
# 3 STEP 2 , pin them on the wall/heap
def _makeCaches(self):
caches = {}
for sig in self.signatures[:]:
a = Dummy()
resolved_for_sig = [pp for pp in self.resolved if pp.sig == sig]
unresolved_for_sig = [
pp for pp in self.unresolved if pp.sig == sig]
log.debug('Pin anonymous allocators on %s' % (sig))
pinned = [AnonymousStructRange(pp, self.word_size) for pp in resolved_for_sig]
log.debug('Create list of allocators addresses for %s' % (sig))
pinned_start = [pp.getAddress() for pp in resolved_for_sig]
# if sorted(pinned_start) != pinned_start:
# log.error('Damn !')
# raise ValueError('iscrewedupbadlyhere')
log.debug('Pin probable anonymous allocators on %s' % (sig))
pinned_lightly = [
AnonymousStructRange(pp, self.word_size) for pp in unresolved_for_sig]
log.debug(
'Create list of probable allocators addresses for %s' %
(sig))
pinned_lightly_start = [pp.getAddress()
for pp in unresolved_for_sig]
# save it
a.pinned = pinned
a.pinned_start = pinned_start
a.pinned_lightly = pinned_lightly
a.pinned_lightly_start = pinned_lightly_start
caches[sig] = a
return caches
def _pinResolved(self, caches):
#log.debug('Overlapping sequences can happen. we will filter them later using a tree of allocators.')
# for i, pp in enumerate(pinned):
# if pp.start in pinned[i+1:]:
# pass
# TODO stack pointers value and compare them to pinned_start,
# pinned_lightly_start
# In each anon structure Pa, get each pointers value.
# If the value is in the list of allocators head addresses, we have a start of struct (mostly true)
# we check Related Struct in the other signatures to see if everybody agrees.
# the parent in sig A (Pa) should point to children type in sig A (Ca)
# the parent in sig B (Pb) should point to children type in sig B (Cb)
# Pa and Pb are related, Ca and Cb should be related too.
sig = self.signatures[0]
pinned = caches[sig].pinned
pinned_start = caches[sig].pinned_start
pinned_lightly = caches[sig].pinned_lightly
pinned_lightly_start = caches[sig].pinned_lightly_start
# for as in pinned, get pointers values and make a tree
log.debug('Going through pointers')
startsWithPointer = 0
startsMaybeWithPointer = 0
pointsToStruct = 0
pointsToStruct2 = 0
self.startTree = []
self.startTree2 = []
self.tree = []
self.tree2 = []
startsWithPointerList = self.startTree
startsMaybeWithPointerList = self.startTree2
pointsToStructList = self.tree
pointsToStructList2 = self.tree2
for i, ap in enumerate(pinned):
ptrs = ap.getPointersValues()
crosscheck = False
# ptr is the value of pointer number j in the anonymoustruct ap
for j, ptr in enumerate(ptrs):
p_off = ap.getPointerOffset(j)
if ptr in pinned_start:
log.debug(
'--------------------------------------------------------------------------')
log.debug(
'Lucky guess s:%d, p:%d, we find a pointer to the start of %d PinnedPointer struct.' %
(i, j, pinned_start.count(ptr)))
startsWithPointerList.append((ap, j))
# check if the same struct in sig2, sig3... points to the
# same target struct
if self._crosscheckChild(caches, ap, j, ptr):
if ap == ap.getPointerType(j):
log.info(
'ID-ed %s.pointers[%d](0x%x) to type %s (MYSELF)' %
(ap, j, ap.getPointerOffset(j), ap.getPointerType(j)))
else:
log.info(
'ID-ed %s.pointers[%d](0x%x) to type %s (0x0)' %
(ap, j, ap.getPointerOffset(j), ap.getPointerType(j)))
crosscheck = True
log.debug(
'--------------------------------------------------------------------------')
elif ptr in pinned_lightly_start:
log.debug(
'Lucky guess s:%d, p:%d we find a pointer to %d maybe-PinnedPointer struct.' %
(i, j, pinned_lightly_start.count(ptr)))
startsMaybeWithPointerList.append((ap, j))
#log.info('ID-ed %s.pointers[%d] to LIGHTLY'%(ap, j))
# ptr is in the middle of a anonymous struct
elif ptr in pinned:
pointsToStructList.append((ap, j))
# check if the same struct in sig2, sig3... points to the
# same target struct
offset = self._crosscheckChildInMiddle(caches, ap, j, ptr)
if offset:
if ap == ap.getPointerType(j):
#p_off = ap.getPointerOffset(j)
# offset - p_off dans la meme structure donne une
# idee de la sequentialite des malloc
log.info(
'ID-ed %s.pointers[%d](0x%x) to type %s (0x%x) %d' %
(ap, j, p_off, ap.getPointerType(j), offset, offset - p_off))
prev_p_off = p_off
else:
log.info(
'ID-ed %s.pointers[%d](0x%x) to type %s (0x%x) ' %
(ap, j, p_off, ap.getPointerType(j), offset))
elif ptr in pinned_lightly:
pointsToStructList2.append((ap, j))
#log.info('ID-ed %s.pointers[%d] in LIGHTLY'%(ap, j))
else:
# the pointer is not in another struct. Find the next
# nearest
first_addr, anonStruct = self._findNearestStruct(
ptr, caches, sig)
# if there is at least one pointer type which crosschecked
if crosscheck:
self._relinkPointers(caches, ap)
# pointer to self means c++ object ?
sig._saveAddressCache()
log.debug(
'We have found %d pointers to pinned structs' %
(startsWithPointer))
log.debug(
'We have found %d pointers to pinned maybe-structs' %
(startsMaybeWithPointer))
return
def _findNearestStruct(self, ptr, caches, sig):
pinned = caches[sig].pinned
pinned_start = caches[sig].pinned_start
pinned_lightly = caches[sig].pinned_lightly
pinned_lightly_start = caches[sig].pinned_lightly_start
#
first_addr, anonStruct = self._findFirstStruct(
ptr, pinned_start, pinned)
first_addr_l, anonStruct_l = self._findFirstStruct(
ptr, pinned_lightly_start, pinned_lightly)
if first_addr == first_addr_l and first_addr == -1:
log.warning('No struct after ptr value 0x%x' % (ptr))
return -1, None
if first_addr_l < first_addr: # TODO ???
ret = (anonStruct, first_addr)
else:
ret = (anonStruct_l, first_addr_l)
anonStruct = anonStruct_l
if not anonStruct:
return -1, None
offset = anonStruct.start - ptr
if offset < 64:
log.debug(
'Found a probable start of struct at %d bytes earlier' %
(offset))
return ret
def _findFirstStruct(self, ptr, addresses, anons):
try:
first_addr = itertools.dropwhile(
lambda x: x < ptr,
addresses).next()
anon = anons[addresses.index(first_addr)] # same index
except StopIteration as e:
return -1, None
return first_addr, anon
def _crosscheckChild(self, cache, astruct, pointerIndex, ptr):
'''
we found a parent_1 -> child_1
check for all other parents ( from other signature) , if their n-th pointer is related to child_1
@param cache: cache for all calculated lists
@param ap: the AnonymousStructRange sequence
@param pointerIndex: the index number for the ptr
@param ptr: ptr is the value of pointer number pointerIndex
'''
perfect = []
parent_pp = astruct.pinnedPointer
child_astruct = cache[
parent_pp.sig].pinned[
cache[
parent_pp.sig].pinned.index(ptr)]
child_pp = child_astruct.pinnedPointer
perfect.append((astruct, child_astruct))
related_child_pps = []
for sig, pps in child_pp.relations.items():
related_child_pps.extend(pps)
other_parent_pps = []
for sig, pps in parent_pp.relations.items():
other_parent_pps.extend(pps)
#
for other_parent_pp in other_parent_pps:
sig = other_parent_pp.sig
other_parent_astruct = AnonymousStructRange(other_parent_pp, self.word_size)
other_parent_astruct = cache[sig].pinned[
cache[sig].pinned.index(
other_parent_astruct.start)] # get the real one
ptr_value = other_parent_astruct.getPointersValues()[pointerIndex]
# get the child at @ptr_value
try:
other_child_astruct = cache[sig].pinned[
cache[sig].pinned.index(ptr_value)]
except ValueError as e:
return False # children is not the same/ not pinned correctly
other_child_pp = other_child_astruct.pinnedPointer
# we now have the child of the other_parent_pp as per its ptr value
if other_child_pp in related_child_pps:
log.debug('Perfect Match - the other parent-child is ok')
perfect.append((other_parent_astruct, other_child_astruct))
else:
return False
for parent, child in perfect:
parent.setPointerType(pointerIndex, child)
return True
def _crosscheckChildInMiddle(self, cache, astruct, pointerIndex, ptr):
'''
we found a parent_1 -> child_1
check for all other parents ( from other signature) , if their n-th pointer is related to child_1
@param cache: cache for all calculated lists
@param ap: the AnonymousStructRange sequence
@param pointerIndex: the index number for the ptr
@param ptr: ptr is the value of pointer number pointerIndex
return the offset of the pointed bytes from the start of the identified struct
'''
perfect = []
parent_pp = astruct.pinnedPointer
child_astruct = cache[
parent_pp.sig].pinned[
cache[
parent_pp.sig].pinned.index(ptr)]
child_offset = ptr - child_astruct.start
child_pp = child_astruct.pinnedPointer
perfect.append((astruct, child_astruct))
related_child_pps = []
for sig, pps in child_pp.relations.items():
related_child_pps.extend(pps)
other_parent_pps = []
for sig, pps in parent_pp.relations.items():
other_parent_pps.extend(pps)
#
for other_parent_pp in other_parent_pps:
sig = other_parent_pp.sig
other_parent_astruct = AnonymousStructRange(other_parent_pp, self.word_size)
other_parent_astruct = cache[sig].pinned[
cache[sig].pinned.index(
other_parent_astruct.start)] # get the real one
ptr_value = other_parent_astruct.getPointersValues()[pointerIndex]
# get the child at @ptr_value
try:
other_child_astruct = cache[sig].pinned[
cache[sig].pinned.index(ptr_value)]
except ValueError as e:
return False # children is not the same/ not pinned correctly
other_child_pp = other_child_astruct.pinnedPointer
# we now have the child of the other_parent_pp as per its ptr value
if other_child_pp in related_child_pps:
other_child_offset = ptr_value - other_child_astruct.start
if other_child_offset == child_offset:
log.debug(
'Perfect Middle Match - the other parent-child is ok')
else:
log.info(
'Middle-maych diff %d %d' %
(child_offset, other_child_offset))
return False
perfect.append((other_parent_astruct, other_child_astruct))
else:
return False
for parent, child in perfect:
parent.setPointerType(pointerIndex, child)
return child_offset
def _relinkPointers(self, caches, astruct):
pass
def _checkRelationsHard(self, cache, ap, pointerIndex, ptr):
'''
go through all related pinned pointers of the other signatures.
check if the targeted pinnedpointer for the pointer number <pointerIndex> is the same pinnedPointer
than in the sig1.
if its not, find in the other signatures, what is the target struct.
@param cache: cache for all calculated lists
@param ap: the PinnedPointer sequence
@param pointerIndex: the index number for the ptr
@param ptr: ptr is the value of pointer number pointerIndex
'''
pp = ap.pinnedPointer
ok = False
mypinned = cache[pp.sig].pinned
mypinned_start = cache[pp.sig].pinned_start
# reverse found a anonstruct covering this ptr value ( start or middle
# )
anontargetPP = mypinned[mypinned.index(ptr)]
if ptr not in mypinned_start:
log.warning(' ++++++++++++++ ptr not in mypinned_start')
# reverse found a anonstruct covering this ptr value ( start ONLY )
#anontargetPP = mypinned[mypinned_start.index(ptr)]
log.debug('anontargetPP is %s' % anontargetPP)
targetPP = anontargetPP.pinnedPointer
perfect = [(ap, anontargetPP)] # get ourselves
# look in other signatures
for sig in self.signatures:
if sig == pp.sig:
continue
ok = False
# 1 - take the related PinnedPointer from the next signature to the parent PP of our first signature
# and calculate the value of the n-th pointer in that pp for that
# signature.
relatedPPs = pp.relations[sig] # parent struct
if len(relatedPPs) > 1:
log.debug('We have more than one relatedPP to target')
tgtAnons = [
AnonymousStructRange(relatedPP, self.word_size) for relatedPP in relatedPPs]
tgtPtrs = [tgtAnon.getPointersValues()[pointerIndex]
for tgtAnon in tgtAnons]
# 2 - take the related PinnedPointer from the next signature to [the n-th pointer/children PP of our first signature]
# if we find one start address that is equal to the previously calculated pointer value
# that means we find a parent-children match in both parent types
# and children types.
ok = 0
relatedTargetPPs = targetPP.relations[sig] # children struct
for relatedTargetPP in relatedTargetPPs:
addr = AnonymousStructRange(relatedTargetPP, self.word_size).start
log.debug('compare %d and %s' % (addr, tgtPtrs))
if addr in tgtPtrs:
log.debug(
'** found a perfect match between %s and %s' %
(pp.sig, relatedTargetPP.sig))
ok += 1
# on type tous les pointers possible, puis on fera des
# stats sur le ap
# TODO border case, multiple struct pointing to the same
# child
_anon_parent = tgtAnons[tgtPtrs.index(addr)]
_parentStart = _anon_parent.start
parent = cache[sig].pinned[
cache[sig].pinned_start.index(_parentStart)]
child = cache[sig].pinned[
cache[sig].pinned_start.index(addr)]
perfect.append((parent, child))
# not ok, we did not find a related match on first offset of pinneddpointer.
# that means the targeted struct is either:
# a) not starting with a pointer ( source pointer points before the target pinnedpointer)
# which is weird because, if sig1 if ok, sigX should be ok too.
# b) a bad aggregation has taken place in the target signature. target PP is too big
# maybe we can cut it in halves ?
# c) the pointer stills points to nowhere. we can't be sure of
# anything
if ok != len(relatedTargetPPs):
ok2 = False
for tgtPtr in tgtPtrs:
#log.debug('NOT found a match between %s and %s'%(pp.sig, relatedTargetPP.sig))
sub = cache[sig].pinned
if tgtPtr in sub:
afound = sub[sub.index(tgtPtr)]
found = afound.pinnedPointer
log.info(
'Found %d content-pointed struct (not start) in %s' %
(sub.count(tgtPtr), sig))
log.info(' source pp was %s' % (pp))
for myrelatedPP in relatedPPs:
log.info(
' source related pp was %s' %
(myrelatedPP))
log.info(
' -- got a ptr to %s (0x%x)' %
(found, tgtPtr - found.getAddress()))
sameseq = False
# get start == tgtpp.getAddress(n) , and comp
# tgtpp.sequence[n:n+len]
log.info(
' source target pp was %s (same seq == %s)' %
(targetPP, sameseq))
for mytargetPPrelated in relatedTargetPPs:
log.info(
" source's target's related pp was %s (0x%x)" %
(mytargetPPrelated, tgtPtr - mytargetPPrelated.getAddress()))
# we now know that type(found) should be == type(targetPP)
# can we recalculate found and targetPP so they will be related ?
# what to do with related pps of targetPP ? they can be multiple instance....
# even then, there status of related to targetPP must be severed. we have proof
# they are not the precise instance we are looking for.
seq1 = targetPP
ok2 = True
break
elif tgtPtr in cache[sig].pinned_lightly:
sub = cache[sig].pinned_lightly
afound = sub[sub.index(tgtPtr)]
found = afound.pinnedPointer
log.info(
'Found %d pointed struct in LIGHTLY %s' %
(sub.count(tgtPtr), sig))
log.info(' source pp was %s' % (pp))
for myrelatedPP in relatedPPs:
log.info(
' source related pp was %s' %
(myrelatedPP))
log.info(' source target pp was %s' % (targetPP))
for mytargetPPrelated in relatedTargetPPs:
log.info(
" source's target's related pp was %s" %
(mytargetPPrelated))
log.info(' got %s' % (found))
ok2 = True
break
if not ok2:
log.info(
'This one does not points anywhere to a common pinnedPointer struct %s' %
(sig))
break
# all sig have been parsed and we found a
# type(parent->children_in_pos_x) identical for all parent
perfectSigs = set([parent.sig() for parent, child in perfect])
if ok and len(perfectSigs) == len(self.signatures):
# save that as a perfect match
# pp and relatedPP and be Id equals.
# targetPP and all perfect[] can be id equals.
for parent, child in perfect:
_mysig = parent.pinnedPointer.sig
parent.setPointerType(pointerIndex, child)
return True
return False
def savePinned(cacheValues, sig, offset, match_len, word_size):
pinned = sig.sig[offset:offset + match_len]
pp = PinnedPointers(pinned, sig, offset, word_size)
s = pp.structLen()
if s not in cacheValues:
cacheValues[s] = list()
cacheValues[s].append(pp)
return pp
def search(opts):
#
make(opts)
pass
def argparser():
rootparser = argparse.ArgumentParser(
prog='haystack-pattern',
description='Do a discovery structure pattern search.')
rootparser.add_argument(
'--debug',
action='store_true',
help='Debug mode on.')
#rootparser.add_argument('sigfile', type=argparse.FileType('wb'), action='store', help='The output signature filename.')
rootparser.add_argument(
'dumpfiles',
type=argparse.FileType('rb'),
action='store',
help='Source memory dump by haystack.',
nargs='*')
#rootparser.add_argument('dumpfile2', type=argparse.FileType('rb'), action='store', help='Source memory dump by haystack.')
#rootparser.add_argument('dumpfile3', type=argparse.FileType('rb'), action='store', help='Source memory dump by haystack.')
rootparser.set_defaults(func=search)
return rootparser
def main(argv):
parser = argparser()
opts = parser.parse_args(argv)
level = logging.INFO
if opts.debug:
level = logging.DEBUG
logging.basicConfig(level=level)
logging.getLogger('haystack').setLevel(logging.INFO)
logging.getLogger('dumper').setLevel(logging.INFO)
logging.getLogger('dumper').setLevel(logging.INFO)
opts.func(opts)
# def tests():
# '''
#import pattern
#pattern.main('../outputs/skype.1.a ../outputs/skype.2.a ../outputs/skype.3.a'.split())
# cacheValues=pattern.cache
#common = pattern.common
#mapper = pattern.mapper
#
#'''
# pass
if __name__ == '__main__':
main(sys.argv[1:])
| StarcoderdataPython |
210560 | from sbi.radio_device.net_device import RadioNetDevice
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright (c) 2015, Technische Universitat Berlin"
__version__ = "0.1.0"
__email__ = "{<EMAIL>, <EMAIL>"
class LteNetDevice(RadioNetDevice):
'''
Base Class for LTE Network Device
'''
def configure_mimo_mode(self, mimo_mode):
'''
Configures the MIMO mode
'''
raise NotImplementedError
| StarcoderdataPython |
6438032 | <gh_stars>0
import turtle as tt
tl = tt.Screen()
st = tt.RawTurtle(tl)
cont = 0
ps = [0, 1, 2, 3, 4, 5, 6, 7]
st.speed(0)
st.width(5)
st.color('yellow', 'red')
st.ht()
st.up()
st.goto(-50, -100)
for i in range(1,9):
st.fd(60)
ps[cont] = st.pos()
# st.write(cont)
cont += 1
st.fd(60)
# st.write(cont)
st.left(360/8)
print(ps)
st.goto(ps[0])
st.down()
st.begin_fill()
st.goto(ps[6])
st.goto(ps[4])
st.goto(ps[2])
st.goto(ps[0])
st.up()
st.goto(ps[1])
st.down()
st.goto(ps[7])
st.goto(ps[5])
st.goto(ps[3])
st.goto(ps[1])
st.end_fill()
tt.exitonclick()
| StarcoderdataPython |
66187 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""`Data` sub class to be used as a base for data containers that represent base python data types."""
from functools import singledispatch
from .data import Data
__all__ = ('BaseType', 'to_aiida_type')
@singledispatch
def to_aiida_type(value):
"""Turns basic Python types (str, int, float, bool) into the corresponding AiiDA types."""
raise TypeError(f'Cannot convert value of type {type(value)} to AiiDA type.')
class BaseType(Data):
"""`Data` sub class to be used as a base for data containers that represent base python data types."""
def __init__(self, *args, **kwargs):
try:
getattr(self, '_type')
except AttributeError:
raise RuntimeError('Derived class must define the `_type` class member')
super().__init__(**kwargs)
try:
value = args[0]
except IndexError:
value = self._type() # pylint: disable=no-member
self.value = value
@property
def value(self):
return self.get_attribute('value', None)
@value.setter
def value(self, value):
self.set_attribute('value', self._type(value)) # pylint: disable=no-member
def __str__(self):
return f'{super().__str__()} value: {self.value}'
def __eq__(self, other):
if isinstance(other, BaseType):
return self.value == other.value
return self.value == other
def __ne__(self, other):
if isinstance(other, BaseType):
return self.value != other.value
return self.value != other
def new(self, value=None):
return self.__class__(value)
| StarcoderdataPython |
263670 | <gh_stars>100-1000
"""interactive SNMP tool"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
try:
from snimpy._version import __version__ # nopep8
except ImportError:
__version__ = '0.0~dev'
| StarcoderdataPython |
210374 | """
-----------------------------------------------------------------
RecSys Challenge 2018 - Team Latte
_..,---,.._
.-;'-.,___,.-'; <NAME> [<EMAIL>]
(( | | 2018.07.01
` \ /
_ `,.___.,'-
( '-----' )
-.._______..-
-----------------------------------------------------------------
"""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import time
import pickle
from collections import defaultdict
import pandas as pd
import metrics
start = time.time()
if len(sys.argv) != 2:
print()
print('----> Usage: python3 validate.py file_to_be_evaluated.csv')
print()
exit(1)
input_file = sys.argv[1]
print('- Evaluation file: {}'.format(input_file))
df = pd.read_csv(input_file, sep=',', skiprows=1, header=None)
data = df.to_dict('records')
pid_recs_map = {}
for d in data:
pid = d[0]
recs = [d[i] for i in range(1, 501)]
pid_recs_map[pid] = recs
with open('track_to_data_all.pickle', 'rb') as handle:
track_to_data = pickle.load(handle)
#Load validation set
with open('val_holdout_data_10000.pickle', 'rb') as handle:
val_holdout_tracks = pickle.load(handle)
sum_rprec = 0
sum_ndcg = 0
sum_clicks = 0
sum_rprec_arti = 0
rprec_by_group = defaultdict(float)
ndcg_by_group = defaultdict(float)
clicks_by_group = defaultdict(float)
rprec_arti_by_group = defaultdict(float)
group_counts = defaultdict(int)
total_playlists = len(pid_recs_map)
for pid in pid_recs_map:
recs = pid_recs_map[pid]
holdout_tracks = [track['track_uri'] for track in val_holdout_tracks[pid]["tracks"]]
rprec = metrics.r_precision(holdout_tracks, recs)
ndcg = metrics.ndcg(holdout_tracks, recs, 500)
clicks = metrics.playlist_extender_clicks(holdout_tracks, recs, 500)
rprec_arti = metrics.r_precision_with_artist_fallback(holdout_tracks, recs, track_to_data)
sum_rprec += rprec
sum_ndcg += ndcg
sum_clicks += clicks
sum_rprec_arti += rprec_arti
sample_type = val_holdout_tracks[pid]["sample_type"]
rprec_by_group[sample_type] += rprec
ndcg_by_group[sample_type] += ndcg
clicks_by_group[sample_type] += clicks
rprec_arti_by_group[sample_type] += rprec_arti
group_counts[sample_type] += 1
for sample_type, count in group_counts.items():
print(sample_type, "RPrec: ", rprec_by_group[sample_type] / count, ", NDCG: ", ndcg_by_group[sample_type] / count, ", Clicks: ", clicks_by_group[sample_type] / count, ", RPrec artist: ", rprec_arti_by_group[sample_type] / count)
print()
print("Overall RPrec: ", sum_rprec / total_playlists, ", NDCG: ", sum_ndcg / total_playlists, ", Clicks: ", sum_clicks / total_playlists, ", RPrec artist: ", sum_rprec_arti / total_playlists)
print()
| StarcoderdataPython |
1741492 | '''
Created on May 23, 2012
@author: <NAME>
'''
N = 8
def bin2gray(n):
return n ^ (n >> 1)
def gray2bin(n):
i = 1 << (N - 2)
while(i > 0):
n = (n ^ (n >> 1)) & i | (n & ~i)
i >>= 1
return n
def printGray(n):
'''
Print a range of gray code recursively
'''
def add0(pre, i):
if(i == 0):
print(list(pre))
return
add0(pre + "0", i - 1)
add1(pre + "1", i - 1)
def add1(pre, i):
if(i == 0):
print(list(pre))
return
add0(pre + "1", i - 1)
add1(pre + "0", i - 1)
add0("0", n - 1)
add1("1", n - 1)
def generateGray(n):
'''
Generate Gray code and store the result in a list
'''
if(n == 0):
return []
rslt = [0]
for i in range(n):
mask = 1 << i
for j in range(len(rslt) - 1, -1, -1):
rslt.append( rslt[j] | mask )
return rslt
if __name__=='__main__':
print([bin(i + (1 << N))[3:] for i in generateGray(4)]) | StarcoderdataPython |
4836509 | from enum import Enum
class ExchangeType(Enum):
"""Defines all possible exchange types
"""
DIRECT = "direct"
"""direct exchange"""
FANOUT = "fanout"
"""fanout exchange"""
TOPIC = "topic"
"""topic exchange"""
HEADERS = "headers"
"""headers exchange"""
| StarcoderdataPython |
9668305 | <reponame>Nano2PlastProject/InteractiveFullMulti
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 15 14:07:59 2021
@author: PradoDomercq
"""
import numpy as np
#Create vector of compartmet volumes per RS for each species
def volumesVector(Clist,compartments_prop):
sp_RScomp_vol_m3=[]
for sp3 in Clist:
rivS= sp3[2:-3]
cmp3=sp3[-3]
#generate location index from compartment properties
vol_index = np.where((compartments_prop['riverSection']== int(rivS)) & (compartments_prop['compartment']== int(cmp3)))[0][0]
sp_RScomp_vol_m3.append(compartments_prop.depth_m.loc[vol_index]*compartments_prop.length_m.loc[vol_index]*compartments_prop.width_m.loc[vol_index])
return sp_RScomp_vol_m3
| StarcoderdataPython |
4971251 | from aip import AipNlp
""" 你的 APPID AK SK """
APP_ID = '15437508'
API_KEY = 'N0U7NIVsKu1IOuwlij9azsv4'
SECRET_KEY = '<KEY>'
client = AipNlp(APP_ID, API_KEY, SECRET_KEY) | StarcoderdataPython |
3569886 | """DuckDB backend."""
from __future__ import annotations
from pathlib import Path
import duckdb
import sqlalchemy as sa
import ibis.expr.schema as sch
from ibis.backends.base.sql.alchemy import BaseAlchemyBackend
from .compiler import DuckDBSQLCompiler
class Backend(BaseAlchemyBackend):
name = "duckdb"
compiler = DuckDBSQLCompiler
def current_database(self) -> str:
return "main"
@property
def version(self) -> str:
# TODO: there is a `PRAGMA version` we could use instead
try:
import importlib.metadata as importlib_metadata
except ImportError:
# TODO: remove this when Python 3.9 support is dropped
import importlib_metadata
return importlib_metadata.version("duckdb")
def do_connect(
self,
path: str | Path = ":memory:",
read_only: bool = False,
) -> None:
"""Create an Ibis client connected to a DuckDB database.
Parameters
----------
path
Path to a duckdb database
read_only
Whether the database is read-only
"""
if path != ":memory:":
path = Path(path).absolute()
super().do_connect(
sa.create_engine(
f"duckdb:///{path}",
connect_args={"read_only": read_only},
)
)
self._meta = sa.MetaData(bind=self.con)
def fetch_from_cursor(
self,
cursor: duckdb.DuckDBPyConnection,
schema: sch.Schema,
):
df = cursor.cursor.fetch_df()
return schema.apply_to(df)
| StarcoderdataPython |
1793068 | #!/usr/bin/env python
# coding=utf-8
# Stan 2012-03-10
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import logging
from sqlalchemy import MetaData
from ...reg import reg_object1
from ...reg.result import *
from .stuff.backwardcompat import *
from .stuff import models
from .stuff.sheet_funcs import get_value, get_index
def suit_name(colname, column_names):
i = 1
while "{0}.{1}".format(colname, i) in column_names:
i += 1
return "{0}.{1}".format(colname, i)
def insert_dict(names, bind_dict):
sql = "INSERT INTO %s (%s) VALUES (%s)" % (table_name, keys, values)
def reg_sheet(sh, runtime, i, FILE=None):
sheet_dict = {
'_file': FILE,
# '_fileprocessing': FILE,
'name': sh.name,
'seq': i,
'ncols': sh.ncols,
'nrows': sh.nrows,
'visible': sh.visibility,
}
session = runtime.get('session')
if session:
SHEET = reg_object1(session, models.Sheet, sheet_dict, FILE)
else:
SHEET = set_object(sheet_dict, FILE)
return SHEET, session
def proceed_sheet(sh, runtime, i, FILE=None):
SHEET, session = reg_sheet(sh, runtime, i, FILE)
options = runtime.get('options', {})
flush = options.get('flush')
tablename = options.get('tablename', '_'.join(sh.name.strip().split()))
prefix = options.get('prefix', 'xls')
isolated = options.get('isolated')
names_row = options.get('names_row')
names = options.get('names', [])
ncols = options.get('ncols', sh.ncols)
nrows = options.get('nrows', sh.nrows)
start_from = options.get('start_from', 0 if names_row is None else names_row + 1)
decimal_point = options.get('decimal_point', '.')
if flush:
session.flush()
if isolated:
tablename = '{0}_{1}_{2}'.format(FILE.id, tablename, i)
if prefix:
tablename = '{0}_{1}'.format(prefix, tablename)
sh_column_names = []
w = len(str(ncols))
# Выбираем названия колонок, если указана соответствующая строка
if names_row is not None and sh.nrows > names_row:
for i in range(min(ncols, sh.ncols)):
# colname = sh.cell(names_row, i).value
colname = get_value(sh, names_row, i)
if colname:
if isinstance(colname, float):
colname = 'cf_{0}'.format(colname)
if names and colname not in names:
reg_warning(SHEET, "Extra column name '{0}' found in the sheet!".format(colname))
else:
colname = 'col_{0:0{width}}'.format(i, width=w)
if colname in sh_column_names:
if colname in names:
reg_warning(SHEET, "Possibly another column '{0}' will get lost!".format(colname))
colname = suit_name(colname, sh_column_names)
sh_column_names.append(colname)
# Расширяем ряд sh_column_names до ncols если sh.ncols < ncols
sh_column_names = sh_column_names + ['col_{0:0{width}}'.format(j, width=w) for j in range(len(sh_column_names), ncols)]
# Определяемся с колонкой, по которой будут выбираться строки для записи
col_index = None
check_name = options.get('check_name')
check_column = options.get('check_column')
if check_name:
if check_name in sh_column_names:
col_index = sh_column_names.index(check_name)
else:
msg = "Column '{0}' is not in the list of the column names".format(check_name)
logging.warning(msg)
reg_warning(SHEET, msg)
if check_column:
if col_index is None:
col_index = get_index(check_column)
else:
msg = "Parameters 'check_name' and 'check_column' set simultaneously!"
logging.warning(msg)
reg_warning(SHEET, msg)
# Выбираем из таблицы название колонок, если требуется сопоставление
if names:
metadata = MetaData(session.bind, reflect=True)
if tablename in metadata.tables.keys():
mtable = metadata.tables.get(tablename)
column_names0 = [j.name for j in mtable.c][4:]
else:
column_names0 = names
else:
column_names0 = sh_column_names
# Добавляем системные колонки
column_names = ['sh_dir', 'sh_file', 'sh_sheet', 'sh_sheets_id', 'sh_y'] + column_names0
tablecols = len(column_names)
sql = 'CREATE TABLE IF NOT EXISTS "{0}" ("{1}");'.format(tablename, '","'.join(column_names))
session.execute(sql)
reg_debug(SHEET, column_names)
# В общем, случается такой баг, что если обрабатываются два файла с одинаковым именем листа,
# и во втором листе будет больше колонок, то получим ошибку
if sh.nrows > start_from:
qmarks = ['?' for j in range(tablecols)]
sql = 'INSERT INTO "{0}" VALUES ({1});'.format(tablename, ','.join(qmarks))
for i in range(start_from, sh.nrows):
needful = get_value(sh, i, col_index) if col_index is not None else True
if needful:
sh_values = sh.row_values(i, 0, ncols)
if decimal_point != '.':
sh_values = map(lambda x: str(x).replace('.', decimal_point) if isinstance(x, float) else x, sh_values)
if names:
bind_params = []
for j in column_names0:
if j in sh_column_names:
bind_params.append(sh_values[sh_column_names.index(j)])
else:
bind_params.append(None)
else:
bind_params = sh_values + [None for j in range(len(sh_values), ncols)]
bind_params = [FILE._dir.name, FILE.name, SHEET.name, SHEET.id, i] + bind_params
try:
session.bind.execute(sql, bind_params)
reg_ok(SHEET)
except Exception as e:
reg_exception(SHEET, e)
# status.error = "Error during handle sheet '{0}'!".format(SHEET.name)
| StarcoderdataPython |
3356744 | # 누적합
def solution(board, skill):
cum_board = [[0] * (len(board[0]) + 1) for _ in range(len(board) + 1)]
for ele in skill:
t, r1, c1, r2, c2, d = ele
if t == 1:
d = -d
cum_board[r1][c1] += d
cum_board[r2+1][c2+1] += d
cum_board[r1][c2+1] -= d
cum_board[r2+1][c1] -= d
for j in range(len(board[0])):
for i in range(1, len(board)):
cum_board[i][j] += cum_board[i-1][j]
for i in range(len(board)):
for j in range(1, len(board[0])):
cum_board[i][j] += cum_board[i][j-1]
answer = 0
for i in range(len(board)):
for j in range(len(board[0])):
board[i][j] += cum_board[i][j]
if board[i][j] > 0:
answer += 1
print(board)
return answer
board = [[5,5,5,5,5],[5,5,5,5,5],[5,5,5,5,5],[5,5,5,5,5]]
skill = [[1,0,0,3,4,4],[1,2,0,2,3,2],[2,1,0,3,1,2],[1,0,1,3,3,1]]
print(solution(board, skill)) | StarcoderdataPython |
3405169 | import hashlib
def md5_string(string_input: str) -> str:
m = hashlib.md5()
m.update(string_input)
return m.hexdigest()
| StarcoderdataPython |
1981338 | <filename>version.py
short_name="godot"
name="Godot Engine"
major=2
minor=0
status="alpha"
| StarcoderdataPython |
6479982 | """This package is divided into submodules, but everything is imported in the root."""
# flake8: noqa
from .hydrator import *
from .manager import *
from .manager_factory import *
from .relation import *
from .repository import *
from .request import *
from .resource import *
__version__ = '0.1.0'
| StarcoderdataPython |
4890355 | <gh_stars>0
# Copyright 2022 <NAME>.
#
# This file is part of a4's bookgen.
# a4's bookgen is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
#
# a4's bookgen is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License along
# with a4's bookgen. If not, see <https://www.gnu.org/licenses/>.
from typing import List
import json
import requests
import backoff
BASE_URL = "https://explorer.lichess.ovh/masters"
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException)
def process_prefix(moves: List[str]):
params = {"play": ",".join(moves)}
resp = requests.get(BASE_URL, params=params)
print(f"{resp.url} -> {resp.status_code}")
resp.raise_for_status()
body = resp.json()
doc = {}
doc["total"] = body["white"] + body["draws"] + body["black"]
if doc["total"] < 50000:
return None
doc_moves = []
for mov in body["moves"]:
entry = {}
entry["count"] = mov["white"] + mov["draws"] + mov["black"]
entry["move"] = mov["uci"]
entry["children"] = process_prefix(moves + [mov["uci"]])
doc_moves.append(entry)
for mov in doc_moves:
mov["probability"] = mov["count"] / doc["total"]
doc["moves"] = doc_moves
return doc
def main():
document = process_prefix([])
with open("src/book.json", "w") as book:
json.dump(document, book, indent=2)
if __name__ == "__main__":
main()
| StarcoderdataPython |
9667578 | <reponame>1067511899/tornado-learn
import numpy as np
import matplotlib.pyplot as plt
###########1.数据生成部分##########
def f(x1, x2):
y = 0.5 * np.sin(x1) + 0.5 * np.cos(x2) + 3 + 0.1 * x1
return y
def load_data():
x1_train = np.linspace(0, 50, 500)
x2_train = np.linspace(-10, 10, 500)
data_train = np.array([[x1, x2, f(x1, x2) + (np.random.random(1) - 0.5)] for x1, x2 in zip(x1_train, x2_train)])
x1_test = np.linspace(0, 50, 100) + 0.5 * np.random.random(100)
x2_test = np.linspace(-10, 10, 100) + 0.02 * np.random.random(100)
data_test = np.array([[x1, x2, f(x1, x2)] for x1, x2 in zip(x1_test, x2_test)])
return data_train, data_test
train, test = load_data()
x_train, y_train = train[:, :2], train[:, 2] # 数据前两列是x1,x2 第三列是y,这里的y有随机噪声
x_test , y_test = test[:, :2], test[:, 2] # 同上,不过这里的y没有噪声
###########2.回归部分##########
def try_different_method(model):
model.fit(x_train, y_train)
score = model.score(x_test, y_test)
result = model.predict(x_test)
print(result)
plt.figure()
plt.plot(np.arange(len(result)), y_test, 'go-', label='true value')
plt.plot(np.arange(len(result)), result, 'ro-', label='predict value')
plt.title('score: %f' % score)
plt.legend()
plt.show()
###########3.具体方法选择##########
####3.1决策树回归####
from sklearn import tree
model_DecisionTreeRegressor = tree.DecisionTreeRegressor()
####3.2线性回归####
from sklearn import linear_model
model_LinearRegression = linear_model.LinearRegression()
####3.3SVM回归####
from sklearn import svm
model_SVR = svm.SVR()
print(model_SVR)
####3.4KNN回归####
from sklearn import neighbors
model_KNeighborsRegressor = neighbors.KNeighborsRegressor()
print(model_KNeighborsRegressor.get_params())
####3.5随机森林回归####
from sklearn import ensemble
model_RandomForestRegressor = ensemble.RandomForestRegressor(n_estimators=20) # 这里使用20个决策树
####3.6Adaboost回归####
from sklearn import ensemble
model_AdaBoostRegressor = ensemble.AdaBoostRegressor(n_estimators=50) # 这里使用50个决策树
####3.7GBRT回归####
from sklearn import ensemble
model_GradientBoostingRegressor = ensemble.GradientBoostingRegressor(n_estimators=100) # 这里使用100个决策树
####3.8Bagging回归####
from sklearn.ensemble import BaggingRegressor
model_BaggingRegressor = BaggingRegressor()
####3.9ExtraTree极端随机树回归####
from sklearn.tree import ExtraTreeRegressor
model_ExtraTreeRegressor = ExtraTreeRegressor()
# print(model_ExtraTreeRegressor.)
###########4.具体方法调用部分##########
try_different_method(model_SVR)
| StarcoderdataPython |
9687220 | import uuid
from dataclasses import dataclass, asdict, field
@dataclass
class User():
username: str
id: uuid.UUID = field(default_factory=uuid.uuid4)
follows: list = field(default_factory=list)
@classmethod
def from_dict(self, d):
return self(**d) if isinstance(d, dict) else None
def to_dict(self):
return asdict(self)
def add_follow(self, follow_user_id):
self.follows.append(follow_user_id) | StarcoderdataPython |
6641569 | from django.http import JsonResponse
def index(request):
return JsonResponse({'error': 'sup hacker'})
| StarcoderdataPython |
1936376 | <filename>openshift_tools/monitoring/dockerutil.py<gh_stars>0
#!/usr/bin/env python2
# vim: expandtab:tabstop=4:shiftwidth=4
'''
wrapper for interfacing with docker
'''
# Adding the ignore because it does not like the naming of the script
# to be different than the class name
# pylint: disable=invalid-name
from openshift_tools.timeout import timeout
from openshift_tools.cgrouputil import CgroupUtil
import re
class DockerDiskStats(object):
''' Class to store docker storage information
'''
# Reason: disable pylint too-many-instance-attributes and too-few-public-methods
# because this is a DTO
# Status: permanently disabled
# pylint: disable=too-many-instance-attributes,too-few-public-methods
def __init__(self):
''' construct the object
'''
self.data_space_used = None
self.data_space_available = None
self.data_space_total = None
self.data_space_percent_available = None
self.metadata_space_used = None
self.metadata_space_available = None
self.metadata_space_total = None
self.metadata_space_percent_available = None
self.is_loopback = None
def __repr__(self):
''' make it easy to see what's inside of this object
'''
return 'DockerDiskStats(\n' + \
' is loopback: %r\n' % self.is_loopback + \
' data_space_used: %r\n' % self.data_space_used + \
' data_space_available: %r\n' % self.data_space_available + \
' data_space_total: %r\n' % self.data_space_total + \
' data_space_percent_available: %r\n' % self.data_space_percent_available + \
' metadtata_space_used: %r\n' % self.metadata_space_used + \
' metadtata_space_available: %r\n' % self.metadata_space_available + \
' metadtata_space_total: %r\n' % self.metadata_space_total + \
' metadata_space_percent_available: %r\n' % self.metadata_space_percent_available + \
')'
class ParseError(Exception):
''' Exception class for when we can't parse the docker info
'''
pass
class DockerUtil(object):
''' Utility for interacting with Docker
'''
def __init__(self, docker_client=None, max_wait=15):
''' construct the object
'''
self._docker = docker_client
self._max_wait = max_wait
self.__docker_info = None
@property
def _cached_docker_info(self):
''' Returns 'docker info' output as long as it doesn't take too long '''
if not self.__docker_info:
with timeout(seconds=self._max_wait):
self.__docker_info = self._docker.info()
return self.__docker_info
@staticmethod
def convert_to_size_in_gb(value):
''' Parses out the number and unit type and normalizes the data to GB
'''
matches = re.match(r"^(?P<num>[0-9.]+) (?P<unit>[a-zA-Z]+)", value)
num = matches.group("num")
unit = matches.group("unit")
if unit == "TB":
return float(num) * 1024
if unit == "GB":
return float(num)
if unit == "MB":
return float(num) / 1024
if unit == "kB":
return float(num) / (1024 * 1024)
raise ParseError("Unknown Unit Size")
def _get_driver_status_attr(self, key):
''' Gets the value for the specified key from the DriverStatus hash since it's
an array of key/value pairs instead of a normal dict (PITA to work with otherwise)
'''
return [a[1] for a in self._cached_docker_info['DriverStatus'] if a[0] == key][0]
def get_disk_usage(self):
''' Gathers the docker storage disk usage stats and puts them in a DTO.
'''
dds = DockerDiskStats()
dds.data_space_used = DockerUtil.convert_to_size_in_gb( \
self._get_driver_status_attr('Data Space Used'))
dds.data_space_available = DockerUtil.convert_to_size_in_gb( \
self._get_driver_status_attr('Data Space Available'))
dds.data_space_total = DockerUtil.convert_to_size_in_gb( \
self._get_driver_status_attr('Data Space Total'))
dds.metadata_space_used = DockerUtil.convert_to_size_in_gb( \
self._get_driver_status_attr('Metadata Space Used'))
dds.metadata_space_available = DockerUtil.convert_to_size_in_gb( \
self._get_driver_status_attr('Metadata Space Available'))
dds.metadata_space_total = DockerUtil.convert_to_size_in_gb( \
self._get_driver_status_attr('Metadata Space Total'))
# Determine if docker is using a loopback device
# FIXME: find a better way than allowing this to throw
try:
self._get_driver_status_attr('Data loop file')
dds.is_loopback = True
except IndexError:
dds.is_loopback = False
# Work around because loopback lies about it's actual total space
if not dds.is_loopback:
dds.data_space_total = dds.data_space_used + dds.data_space_available
dds.metadata_space_total = dds.metadata_space_used + dds.metadata_space_available
dds.data_space_percent_available = (dds.data_space_available / dds.data_space_total) * 100
dds.metadata_space_percent_available = (dds.metadata_space_available / dds.metadata_space_total) * 100
return dds
@staticmethod
def normalize_ctr_name(docker_name):
''' Docker stores the name of the container with a leading '/'.
This method changes the name into what you normally see in Docker output.
'''
return docker_name[1:]
@staticmethod
def ctr_name_matches_regex(ctr, ctr_name_regex):
''' Returns true or false if the ctr_name_regex is in the list of names
Docker is storing for the container.
'''
result = [ctr_name
for ctr_name in ctr['Names']
if re.match(ctr_name_regex, DockerUtil.normalize_ctr_name(ctr_name))
]
return len(result) > 0
def get_ctrs_matching_names(self, ctr_name_regexes):
''' Returns all of the containers that match any of the regexes passed in.
'''
retval = {}
for ctr in self._docker.containers():
for ctr_name_regex in ctr_name_regexes:
if DockerUtil.ctr_name_matches_regex(ctr, ctr_name_regex):
retval[DockerUtil.normalize_ctr_name(ctr['Names'][0])] = ctr
return retval
@staticmethod
def _get_cgroup_entity_name(docker_id):
''' Takes a docker id and returns the cgroup name for that container. '''
return "docker-%s.scope" % docker_id
def get_ctr_stats(self, ctr, use_cgroups=False):
''' Gathers and returns the container stats in an easy to consume fashion.
'''
raw_stats = None
if use_cgroups:
cgroup_name = DockerUtil._get_cgroup_entity_name(ctr['Id'])
cgu = CgroupUtil(cgroup_name)
raw_stats = cgu.raw_stats()
else:
raw_stats = self._docker.stats(ctr['Id'], stream=False)
return CgroupUtil.raw_stats_to_dtos(raw_stats)
| StarcoderdataPython |
9736955 | #!/usr/bin/env python3
#--coding:utf-8 --
"""
findTargets.py
cLoops2 loops-centric analysis module.
Find the target genes for a set of regions, such as loops anchors or SNPs.
- []
"""
#sys
#3rd
import pandas as pd
import networkx as nx
from tqdm import tqdm
#cLoops2
from cLoops2.ds import Peak
from cLoops2.io import parseTxt2Loops
def parseIv(item):
chrom = item.split(":")[0]
start = int(item.split("|")[0].split(":")[1].split("-")[0])
end = int(item.split("|")[0].split(":")[1].split("-")[1])
return chrom, start, end
def readNet(f):
"""
Read the enhancer-promoter networks.
@return nx.Graph
@return cov, {chrom:{i:itemId}}
"""
cov = {}
ns = set()
G = nx.Graph()
for line in open(f):
line = line.split("\n")[0].split("\t")
G.add_edge(line[0], line[2], type=line[1])
#left anchor coverage
if line[0] not in ns:
lc, ls, le = parseIv(line[0])
if lc not in cov:
cov[lc] = {}
for i in range(ls, le + 1):
cov[lc][i] = line[0]
ns.add(line[0])
#right anchor coverage
if line[2] not in ns:
rc, rs, re = parseIv(line[2])
if rc not in cov:
cov[rc] = {}
for i in range(rs, re + 1):
cov[rc][i] = line[2]
ns.add(line[2])
for node in G.nodes():
n = node.split("|")[-1]
G.nodes[node]["type"] = n
return G, cov
def readTargets(f):
"""
Read the promoter target genes.
"""
ds = {}
for i, line in enumerate(open(f)):
if i == 0:
continue
line = line.split("\n")[0].split("\t")
ds[line[0]] = line[1]
return ds
def readBed(f):
"""
Read regions
"""
regions = []
for line in open(f):
line = line.split("\n")[0].split("\t")
if len(line) < 3:
continue
if len(line) > 3 and line[3] != "." or line[3] != "":
k = line[3]
else:
k = "|".join(line[:3])
peak = Peak()
peak.chrom = line[0]
peak.start = int(line[1])
peak.end = int(line[2])
peak.id = k
regions.append(peak)
return regions
def getTargets(G, cov, tgs, rs, fnOut):
"""
Get region targets through enhancer promoter linkage network.
"""
j = 0
k = 0
ds = {}
pathes = {}
for r in tqdm(rs):
ts = set()
if r.chrom not in cov:
continue
for i in range(r.start, r.end):
if i in cov[r.chrom]:
ts.add(cov[r.chrom][i])
#searching the net for targets
if len(ts) == 0:
continue
for t in ts:
if t.split("|")[-1] == "Promoter":
#direct targets
dt = [t]
#indirect targets
idts = {}
ns = list(nx.descendants(G, t))
#find all releated nodes
for n in ns:
if n == t:
continue
p = nx.algorithms.shortest_path(G, source=t, target=n)
if n.split("|")[-1] == "Promoter":
idts[n] = p
else:
dt = []
idts = {}
ns = list(nx.descendants(G, t))
#find all releated nodes
for n in ns:
if n == t:
continue
p = nx.algorithms.shortest_path(G, source=t, target=n)
#if n.split("|")[-1] == "Promoter" and len(p) > pathLengthCut:
if n.split("|")[-1] == "Promoter":
if len(p) == 2:
dt.append(n)
else:
idts[n] = p
if len(dt) == 0 and len(idts) == 0:
continue
dt = [tgs[tmp] for tmp in dt if tmp in tgs]
idt = [tgs[tmp] for tmp in idts.keys() if tmp in tgs]
ds[j] = {
"queryId": r.id,
"queryChrom": r.chrom,
"queryStart": r.start,
"queryEnd": r.end,
"overlappedAnchor": t,
"directTargetGenes": ",".join(dt),
"indirectTargetGenes": ",".join(idt),
}
j += 1
#record pathes
if len(idt) > 0:
for g, p in idts.items():
if g in tgs:
pathes[k] = {
"queryId": r.id,
"overlappedAnchor": t,
"indirectTargetGenes": tgs[g],
"path": ",".join(p),
}
k += 1
ds = pd.DataFrame(ds).T
ds = ds[[
"queryId", "queryChrom", "queryStart", "queryEnd", "overlappedAnchor",
"directTargetGenes", "indirectTargetGenes"
]]
pathes = pd.DataFrame(pathes).T
pathes = pathes[[
"queryId", "overlappedAnchor", "indirectTargetGenes", "path"
]]
ds.to_csv(fnOut + "_targetGenes.txt", sep="\t", index_label="recordId")
pathes.to_csv(fnOut + "_indirectTargetGenesPathes.txt",
sep="\t",
index_label="recordId")
def findTargets(
netf,
tgf,
fnOut,
fbed="",
):
"""
Find targets of a set of regions.
@param netf: str, output of cLoops2 anaLoops, _ep_net.sif file
@param tgf: str, output of cLoops2 anaLoops, _targets.txt file
@param bed: str, input querying bed file.
"""
print("reading networks and anchors")
G, cov = readNet(netf)
tgs = readTargets(tgf)
if fbed != "":
print("finding target genes of %s" % fbed)
#find regions targets
rs = readBed(fbed)
getTargets(G, cov, tgs, rs, fnOut)
| StarcoderdataPython |
3442276 | <reponame>Raghav714/compiler-programs
from collections import OrderedDict
def reverse_dict(dictx):
reverse_dict = OrderedDict()
key_list = []
for key in dictx:
key_list.append(key)
key_list.reverse()
for key in key_list:
reverse_dict[key] =dictx[key]
return reverse_dict
def first(grammar,terminal):
first_dict = OrderedDict()
grammar_first = reverse_dict(grammar)
for key in grammar_first:
each_prod = []
for element in grammar.copy()[key]:
if element[0:1] in terminal:
each_prod.append(element[0:1])
elif element in terminal:
each_prod.append(element)
if not each_prod:
each_prod = first_dict[element[0:1]]
first_dict[key] = each_prod
return first_dict
def check_prod(check_key,grammar):
dic = {}
for key in grammar:
pos_list = []
for element in grammar[key]:
pos = element.find(check_key)
if pos >= 0:
pos_list.append(pos)
if pos_list:
dic[key] = pos_list
return dic
def follow(grammar,terminal,first_dict):
follow_dict = OrderedDict()
for key in grammar:
each_prod = []
if key == "E":
each_prod.append("$")
elif check_prod(key,grammar):
pos = check_prod(key,grammar)
for found_key in pos:
for element in grammar[found_key]:
if key in element:
string = element
if (int(pos[found_key][0])+1)==len(string) and (found_key!=key):
each_prod.extend(follow_dict[found_key])
elif (int(pos[found_key][0])+1)!=len(string) and (key != found_key) :
each_prod.extend(first_dict[grammar[found_key][0][int(pos[found_key][0])+1]])
if "epsln" in each_prod:
each_prod.remove("epsln")
each_prod.extend(follow_dict[found_key])
elif key == found_key and (int(pos[found_key][0])+1)!=len(string):
each_prod.append(grammar[key][0][int(pos[found_key][0])+1])
follow_dict[key]=list(set(each_prod))
return follow_dict
def table(grammar,first_grammar,follow_grammar):
table = OrderedDict()
non_terminal = []
terminal = []
for nt in grammar:
non_terminal.append(nt)
terminal.extend(first_grammar[nt])
terminal.extend(follow_grammar[nt])
non_terminal = list(set(non_terminal))
terminal = list(set(terminal))
terminal.remove("epsln")
print "non terminal",non_terminal
print "terminal",terminal
for nt in non_terminal:
prod = []
for ter in terminal:
if ter in first_grammar[nt]:
if len(grammar[nt])==1:
prod.append(grammar[nt])
else:
for single_prod in grammar[nt]:
if single_prod[0] is ter:
prod.append([single_prod])
elif single_prod is ter:
prod.append([single_prod])
elif "epsln" in first_grammar[nt] and ter in follow_grammar[nt]:
prod.append(["epsln"])
else:
prod.append("error")
table[nt]=prod
return table
'''grammar = {}
le = input("length")
for i in range(le):
key = raw_input("key")
l = input("number of production")
lis = list()
for i in range(0,l):
lis.append(raw_input())
grammar.update({key:lis})'''
grammar = OrderedDict()
grammar["E"] = ["TA"]
grammar["A"] = ["+TA","epsln"]
grammar["T"] = ["FP"]
grammar["P"] = ["*FP","epsln"]
grammar["F"] = ["(F)","id"]
terminal = ["+","*","(",")","id","epsln"]
print "Original grammar"
for key, value in grammar.items():
print(key, value)
print "----------------------------------------------"
print "First"
first_grammar = first(grammar,terminal)
for key, value in first_grammar.items():
print(key, value)
print "----------------------------------------------"
print "Follow"
follow_grammar = follow(grammar,terminal,first_grammar)
for key, value in follow_grammar.items():
print(key, value)
print "----------------------------------------------"
ll1table=table(grammar,first_grammar,follow_grammar)
for key, value in ll1table.items():
print(key, value)
| StarcoderdataPython |
1809431 | #!/usr/bin/env python3
# Copyright (C) 2015-2018 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import sys, platform
# Automatically download setuptools if not available
try:
from setuptools import *
except ImportError:
from distribute_setup import use_setuptools
use_setuptools()
finally:
from setuptools import *
from glob import glob
if sys.version_info < (3, 5):
print >> sys.stderr, "ERROR: dammit! requires python 3.5 or greater"
sys.exit()
version = open('dammit/VERSION').read().strip()
def main():
setup( name = 'dammit',
version = version,
description = 'dammit!',
url = 'https://github.com/camillescott/dammit',
author = '<NAME>',
author_email = '<EMAIL>',
license = 'BSD',
test_suite = 'pytest-runner',
tests_require = ['pytest',
'codecov'],
packages = find_packages(),
scripts = glob('bin/*'),
install_requires = ['setuptools>=0.6.35',
'pandas>=0.18.1',
'khmer>=2.0',
'doit>=0.29.0',
'ficus>=0.1',
'matplotlib>=1.0',
'numexpr>=2.3.1',
'shmlast>=1.2',
'filelock'],
include_package_data = True,
zip_safe = False, )
if __name__ == "__main__":
main()
| StarcoderdataPython |
11373764 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Built with code/inspiration from MapFish, OpenLayers & <NAME>
#
import sys
sys.path.append("./")
# For JS
import getopt
import jsmin, mergejs
# For CSS
import re
# For file moves
import shutil
import os
def mergeCSS(inputFilenames, outputFilename):
output = ""
for inputFilename in inputFilenames:
output += file(inputFilename, "r").read()
file(outputFilename, "w").write(output)
return outputFilename
def cleanline(theLine):
# Kills line breaks, tabs, and double spaces
p = re.compile("(\n|\r|\t|\f|\v)+")
m = p.sub("", theLine)
# Kills double spaces
p = re.compile("( )+")
m = p.sub(" ", m)
# Removes last semicolon before }
p = re.compile("(; }|;})+")
m = p.sub("}", m)
# Removes space before {
p = re.compile("({ )+")
m = p.sub("{", m)
# Removes all comments
p = re.compile("/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/")
m = p.sub("", m)
# Strip off the Charset
p = re.compile("@CHARSET .*;")
m = p.sub("", m)
# Strip spaces before the {
p = re.compile(" {")
m = p.sub("{", m)
# Strip space after :
p = re.compile(": ")
m = p.sub(":", m)
# Strip space after ,
p = re.compile(", ")
m = p.sub(",", m)
# Strip space after ;
p = re.compile("; ")
m = p.sub(";", m)
return m
def compressCSS(inputFilename, outputFilename):
theFile = file(inputFilename, "r").read()
output = ""
for line in theFile:
output = output + cleanline(line)
# Once more, clean the entire file string
_output = cleanline(output)
file(outputFilename, "w").write(_output)
return
mfbase = "../../mfbase"
def dojs(dogis = False):
""" Minifies the js"""
# Define which files we want to include
# also need to amend sahana.js.cfg
configDictCore = {
"web2py": "..",
"T2": "..",
"S3": ".."
}
configFilename = "sahana.js.cfg"
outputFilename = "S3.min.js"
# Merge JS files
print "Merging Core libraries."
(files, order) = mergejs.getFiles(configDictCore, configFilename)
merged = mergejs.run(files, order)
# Compress JS files
print "Compressing - JS"
minimized = jsmin.jsmin(merged)
# Add license
print "Adding license file."
minimized = file("license.txt").read() + minimized
# Print to output files
print "Writing to %s." % outputFilename
file(outputFilename, "w").write(minimized)
# Remove old JS files
print "Deleting %s." % outputFilename
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
# Move new JS files
print "Moving new JS files"
shutil.move("S3.min.js", "../S3")
if dogis:
# also need to amend sahana.js.gis.cfg
configDictGIS = {
"gis": ".."
}
configDictGeoExt = {
"GeoExt.js": "../gis/geoext/lib",
"GeoExt": "../gis/geoext/lib",
"ux": "../gis/geoext"
}
configDictOpenLayers = {
"OpenLayers.js": "../gis/openlayers/lib",
"OpenLayers": "../gis/openlayers/lib",
"Rico": "../gis/openlayers/lib",
"Gears": "../gis/openlayers/lib"
}
configDictGlobalGIS = {}
configDictGlobalGIS.update(configDictOpenLayers)
configDictGlobalGIS.update(configDictGIS)
configFilenameGIS = "sahana.js.gis.cfg"
configFilenameGeoExt = "geoext.js.gis.cfg"
outputFilenameGIS = "OpenLayers.js"
outputFilenameGeoExt = "GeoExt.js"
# Merge GIS JS Files
print "Merging GIS libraries."
(files, order) = mergejs.getFiles(configDictGlobalGIS, configFilenameGIS)
mergedGIS = mergejs.run(files, order)
print "Merging GeoExt libraries."
(files, order) = mergejs.getFiles(configDictGeoExt, configFilenameGeoExt)
mergedGeoExt = mergejs.run(files, order)
# Compress JS files
print "Compressing - GIS JS"
minimizedGIS = jsmin.jsmin(mergedGIS)
print "Compressing - GeoExt JS"
minimizedGeoExt = jsmin.jsmin(mergedGeoExt)
# Add license
minimizedGIS = file("license.gis.txt").read() + minimizedGIS
# Print to output files
print "Writing to %s." % outputFilenameGIS
file(outputFilenameGIS, "w").write(minimizedGIS)
print "Writing to %s." % outputFilenameGeoExt
file(outputFilenameGeoExt, "w").write(minimizedGeoExt)
# Move new JS files
print "Deleting %s." % outputFilenameGIS
try:
os.remove("../gis/%s" % outputFilenameGIS)
except:
pass
print "Moving new GIS JS files"
shutil.move("OpenLayers.js", "../gis")
print "Deleting %s." % outputFilenameGeoExt
try:
os.remove("../gis/%s" % outputFilenameGeoExt)
except:
pass
print "Moving new GeoExt JS files"
shutil.move("GeoExt.js", "../gis")
def docss(dogis = True):
"""Compresses the CSS files"""
listCSS = [
"../../styles/S3/sahana.css",
"../../styles/S3/jquery.autocomplete.css",
"../../styles/S3/jquery.cluetip.css",
"../../styles/S3/jquery.dataTables.css",
"../../styles/S3/jquery.jqplot.css",
"../../styles/S3/jquery.ui.core.css",
"../../styles/S3/jquery.ui.datepicker.css",
"../../styles/S3/jquery.ui.theme.css",
"../../styles/S3/ajaxS3.css",
"../../styles/T2/t2.css",
"../../styles/web2py/calendar.css",
"../../styles/S3/s3.multiselect.widget.css",
"../../styles/S3/jquery.multiSelect.css"
]
outputFilenameCSS = "sahana.min.css"
# Merge CSS files
print "Merging Core styles."
mergedCSS = mergeCSS(listCSS, outputFilenameCSS)
# Compress CSS files
print "Writing to %s." % outputFilenameCSS
compressCSS(mergedCSS, outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../../styles/S3/%s" % outputFilenameCSS)
except:
pass
shutil.move(outputFilenameCSS, "../../styles/S3")
if dogis:
listCSSGIS = [
"../../styles/gis/gis.css",
"../../styles/gis/popup.css",
"../../styles/gis/layerlegend.css",
#mfbase+"/ext/resources/css/ext-all.css", # would need to copy images if included here
"../../styles/gis/google.css",
#"../../styles/gis/style.css",
"../../styles/gis/ie6-style.css"
]
outputFilenameCSSGIS = "gis.min.css"
# Merge GIS CSS files
print "Merging GIS styles."
mergedCSSGIS = mergeCSS(listCSSGIS, outputFilenameCSSGIS)
# Compress GIS CSS files
print "Writing to %s." % outputFilenameCSSGIS
compressCSS(mergedCSSGIS, outputFilenameCSSGIS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSSGIS
try:
os.remove("../../styles/gis/%s" % outputFilenameCSSGIS)
except:
pass
shutil.move(outputFilenameCSSGIS, "../../styles/gis")
def main(argv):
try:
parameter1 = argv[0]
except:
parameter1 = "ALL"
try:
if(argv[1] == "DOGIS"):
parameter2 = True
else:
parameter2 = False
except:
parameter2 = True
if parameter1 == "ALL":
dojs()
docss()
else:
if parameter1 == "CSS":
docss(parameter2)
else:
dojs(parameter2)
print "Done."
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| StarcoderdataPython |
255820 | <reponame>dmmoura/PAA-2021<gh_stars>0
from paa191t1.dijkstra.datastructs.vector import Vector
from paa191t1.tests.dijkstra.test_dijkstra import TestDijkstraBase
class TestDijkstraVector(TestDijkstraBase):
def setUp(self):
self.struct = Vector()
| StarcoderdataPython |
4953779 | # This is a troll indeed ffs *facepalm*
# Ported from xtra-telegram by @heyworld
import asyncio
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import ChannelParticipantsAdmins
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, bot, owner
from userbot.utils import edit_or_reply, bdrl_cmd
@bdrl_cmd(pattern="fgban(?: |$)(.*)")
async def gbun(event):
if event.fwd_from:
return
gbunVar = event.text
gbunVar = gbunVar[6:]
mentions = f"**Warning!! User 𝙂𝘽𝘼𝙉𝙉𝙀𝘿 By** {owner}\n"
await edit_or_reply(event, "**Summoning out the mighty gban hammer ☠️**")
asyncio.sleep(3.5)
chat = await event.get_input_chat()
async for _ in bot.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
replied_user = await event.client(GetFullUserRequest(reply_message.from_id))
firstname = replied_user.user.first_name
usname = replied_user.user.username
idd = reply_message.from_id
# make meself invulnerable cuz why not xD
if idd == 1036951071:
await reply_message.reply(
"`Wait a second, This is my master!`\n**How dare you threaten to ban my master nigger!**\n\n__Your account has been hacked! Pay 6969$ to my master__ [Heyworld](tg://user?id=1036951071) __to release your account__😏"
)
else:
jnl = (
"**Warning!!**"
"[{}](tg://user?id={})"
f"** 𝙂𝘽𝘼𝙉𝙉𝙀𝘿 By** {owner}\n\n"
"**Name: ** __{}__\n"
"**ID : ** `{}`\n"
).format(firstname, idd, firstname, idd)
if usname is None:
jnl += "**Username: ** `Doesn't own a username!`\n"
elif usname != "None":
jnl += "**Username** : @{}\n".format(usname)
if len(gbunVar) > 0:
gbunm = "`{}`".format(gbunVar)
gbunr = "**Reason: **" + gbunm
jnl += gbunr
else:
no_reason = "**Reason: **`Jamet`"
jnl += no_reason
await reply_message.reply(jnl)
else:
mention = f"**Warning!! User 𝙂𝘽𝘼𝙉𝙉𝙀𝘿 By** {owner} \n**Reason:** `Jamet` "
await event.reply(mention)
await event.delete()
CMD_HELP.update(
{
"fakegban": f"**Plugin : **`fakegban`\
\n\n • **Syntax :** `{cmd}fgban` <reply> <reason>\
\n • **Function : **Untuk melakukan aksi Fake global banned , just for fun\
"
}
)
| StarcoderdataPython |
8101485 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from selenium import webdriver
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Chrome()
def tearDown(self):
self.browser.quit()
def test_title(self):
self.browser.get('http://localhost:5001')
self.assertIn('Token', self.browser.title)
if __name__ == '__main__':
unittest.main(warnings='ignore')
| StarcoderdataPython |
249831 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""CSI ratioB: Plot CSI ratio of two antennas in real time(Linux 802.11n CSI Tool)
Usage:
1. python3 csiratioB.py
2. python3 csiserver.py ../material/5300/dataset/sample_0x5_64_3000.dat 3000 10000
Note:
1. this is just a demo, because it's too slow :(
2. You need right data to get beautiful result
3. csiserver.py: Use a large delay value to avoid too much packets loss
Ref:
realtime_plot: [MATPLOTLIB UNCHAINED](https://matplotlib.org/3.1.3/gallery/animation/unchained.html#matplotlib-unchained)
csi ratio: [FarSense: Pushing the Range Limit of WiFi-based Respiration Sensing with CSI Ratio of Two Antennas](https://arxiv.org/pdf/1907.03994v1.pdf)
"""
import socket
import threading
import csiread
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['toolbar'] = "None"
cache = np.zeros([30, 800])
mutex = threading.Lock()
class GetDataThread(threading.Thread):
def __init__(self):
super(GetDataThread, self).__init__()
self.__state = True
def run(self):
"""get data in real time
Note:
If you want to run this script on the host with Intel 5300 NIC, rewrite code as
csiuserspace.py
"""
csidata = csiread.Intel(None, 3, 2)
# config
global cache, mutex
count = 0
address_src = ('127.0.0.1', 10086)
address_des = ('127.0.0.1', 10010)
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.bind(address_des)
s.settimeout(0.1)
while self.__state:
try:
data, address_src = s.recvfrom(4096)
except socket.timeout:
continue
msg_len = len(data)
code = csidata.pmsg(data)
if code == 0xbb:
csi = csidata.get_scaled_csi_sm()
scaled_csi_sm = np.abs(csi[0, :, 0, 0] / csi[0:, :, 1, 0])
mutex.acquire()
cache[:, :-1] = cache[:, 1:]
cache[:, -1] = scaled_csi_sm
mutex.release()
count += 1
if count % 100 == 0:
print('receive %d bytes [msgcnt=%u], seq=%d' % (msg_len, count, csidata.seq))
def stop(self):
self.__state = False
def realtime_plot():
fig = plt.figure(figsize=(16/2, 9/2), facecolor='black')
ax = plt.subplot(111, frameon=False)
ax.set_ylim(-10, 70)
# ax.set_xticks([])
ax.set_yticks([])
ax.tick_params(axis='x', colors='white')
ax.xaxis.label.set_color('white')
ax.set_xlabel('Packets')
ax.text(0.5, 1.0, "Ratio of CSI(30x)", transform=ax.transAxes,
ha="center", va="bottom", color="w",
family="sans-serif", fontweight="light", fontsize=16)
lens = cache.shape[-1]
X = np.linspace(-int(lens/2), int(lens/2), lens)
lines = []
for i in range(len(cache)):
xscale = 1 - i / 200.
lw = 1.0 - i / 75.0
line, = ax.plot(xscale * X, i/2 + cache[i], lw=lw, alpha=0.5)
lines.append(line)
def animate(i):
global cache, mutex
mutex.acquire()
for i in range(len(cache)):
lines[i].set_ydata(i/2 + cache[i])
mutex.release()
return lines
ani = animation.FuncAnimation(fig, animate, interval=1000/30, blit=False)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
task = GetDataThread()
task.start()
realtime_plot()
task.stop()
| StarcoderdataPython |
4901509 | # -*- coding:utf-8 -*-
#
# Copyright 2018, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import traceback
from collections import defaultdict
from typing import *
from unittest import SkipTest
from couchbase.fulltext import IMetaData, ISearchResult
from couchbase_core import recursive_reload
from couchbase_core._pyport import ANY_STR
try:
from abc import ABC
except:
from abc import ABCMeta
import logging
import time
from couchbase_core.transcodable import Transcodable
import couchbase_core.connstr
import couchbase.exceptions
from couchbase import JSONDocument, DeltaValue, SignedInt64, MutateInResult
from couchbase_core.durability import Durability
from couchbase.cluster import Cluster, ClusterOptions
from couchbase import ReplicateTo, PersistTo, FiniteDuration, copy, \
Seconds, ReplicaNotConfiguredException, DocumentConcurrentlyModifiedException, \
DocumentMutationLostException, ReplicaNotAvailableException, MutateSpec, CASMismatchException, \
Durations, \
MutateInOptions
from couchbase import CBCollection, GetOptions, RemoveOptions, ReplaceOptions
from couchbase import Bucket
from couchbase_tests.base import ConnectionTestCase, ConnectionTestCaseBase
import couchbase.subdocument as SD
import couchbase.admin
import couchbase_core._bootstrap
import couchbase_core._libcouchbase as _LCB
import couchbase_core.cluster
import couchbase.admin
import couchbase_core.tests.analytics_harness
from couchbase_core.cluster import ClassicAuthenticator
from couchbase_core.connstr import ConnectionString
from couchbase.diagnostics import ServiceType
import couchbase_core.fulltext as FT
from couchbase.exceptions import KeyNotFoundException, KeyExistsException, NotSupportedError
class ClusterTestCase(ConnectionTestCase):
def setUp(self, **kwargs):
self.factory = Bucket
super(ClusterTestCase, self).setUp()
connargs = self.cluster_info.make_connargs()
connstr_abstract = ConnectionString.parse(connargs.pop('connection_string'))
bucket_name = connstr_abstract.bucket
connstr_abstract.bucket = None
connstr_abstract.set_option('enable_collections', 'true')
self.cluster = Cluster(connstr_abstract, ClusterOptions(ClassicAuthenticator(self.cluster_info.admin_username, self.cluster_info.admin_password)))
self.admin = self.make_admin_connection()
self.bucket = self.cluster.bucket(bucket_name, **connargs)
self.bucket_name=bucket_name
class CollectionTestCase(ClusterTestCase):
coll = None # type: CBCollection
initialised = defaultdict(lambda: {})
def setUp(self, mock_collections, real_collections):
# prepare:
# 1) Connect to a Cluster
super(CollectionTestCase,self).setUp()
cm = couchbase.admin.CollectionManager(self.admin, self.bucket_name)
my_collections = mock_collections if self.is_mock else real_collections
for scope_name, collections in my_collections.items():
CollectionTestCase._upsert_scope(cm, scope_name)
scope = self.bucket.scope(scope_name) if scope_name else self.bucket
for collection_name, dest in collections.items():
CollectionTestCase._upsert_collection(cm, collection_name, scope_name)
# 2) Open a Collection
coll = scope.collection(collection_name) if collection_name else scope.default_collection()
setattr(self, dest, coll)
@staticmethod
def _upsert_collection(cm, collection_name, scope_name):
if not collection_name in CollectionTestCase.initialised[scope_name].keys():
try:
cm.insert_collection(collection_name, scope_name)
CollectionTestCase.initialised[scope_name][collection_name]=None
except:
pass
@staticmethod
def _upsert_scope(cm, scope_name):
try:
if scope_name and not scope_name in CollectionTestCase.initialised.keys():
cm.insert_scope(scope_name)
except:
pass
class Scenarios(CollectionTestCase):
def setUp(self, **kwargs):
super(Scenarios, self).setUp({None: {None: "coll"}}, {"bedrock": {"flintstones": 'coll'}})
def test_scenario_A(self):
# 1) fetch a full document that is a json document
self.coll.upsert("id",{"kettle":"fish"})
doc = self.coll.get("id", GetOptions().timeout(Seconds(10)))
# 2) Make a modification to the content
content = doc.content_as[JSONDocument].put("field", "value")
# 3) replace the document on the server
# not formally allowed syntax - can't mix OptionBlocks and named params
result = self.coll.replace(doc.id, content, ReplaceOptions().timeout(Seconds(10)), cas=doc.cas)
result = self.coll.replace(doc.id, content, ReplaceOptions().timeout(Seconds(10)).cas(result.cas))
result = self.coll.replace(doc.id, content, expiration=Seconds(10), cas=result.cas)
# Default params also supported for all methods
doc2 = self.coll.get("id", expiration=Seconds(10))
content2 = doc2.content_as[dict].update({"value": "bar"})
self.coll.replace(doc2.id, content2, cas=doc2.cas, expiration=Seconds(10))
# I include type annotations and getOrError above to make things clearer,
# but it'd be more idiomatic to write this:
try:
self.coll.get("cheese", GetOptions(replica=True))
self.coll.get("cheese", replica=True)
# invalid syntax:
self.coll.get("cheese", options=GetOptions(replica=True), replica=True)
result = self.coll.get("id", GetOptions().timeout(Seconds(10)))
self.coll.replace(result.id,
result.content
.put("field", "value")
.put("foo", "bar"),
cas=result.cas,
expiration=Seconds(10))
except:
print("could not get doc")
def test_scenario_B(self):
"""
Scenario B:
1) fetch a document fragment which is a json array with elements
2) make modifications to the content
3) replace the fragment in the original document
"""
self.coll.upsert("id",{'someArray':['wibble','gronk']})
subdoc = self.coll.get("id", GetOptions().project("someArray"))
result = None
if subdoc:
arr = subdoc.content_as_array()
arr.append("foo")
result = self.coll.mutate_in("id", [SD.upsert("someArray", arr)],
MutateInOptions().timeout(Seconds(10)))
self.assertIsInstance(result, MutateInResult)
def test_mutatein(self):
count = 0
for durability in Durability:
somecontents = {'some': {'path': 'keith'}}
key="somekey_{}".format(count)
try:
self.coll.remove(key)
except:
pass
self.coll.insert(key, somecontents)
inserted_value = "inserted_{}".format(count)
replacement_value = "replacement_{}".format(count)
count += 1
try:
self.coll.mutate_in(key, (
SD.replace('some.path', replacement_value),
SD.insert('some.other.path', inserted_value, create_parents=True),
), durability_level=durability)
somecontents['some']['path'] = replacement_value
somecontents['some'].update({'other': {'path': inserted_value}})
self.assertEqual(somecontents, self.coll.get(key).content)
except NotSupportedError as e:
if not self.is_mock:
raise
else:
logging.error("Assuming failure is due to mock not supporting durability")
def test_scenario_C_clientSideDurability(self):
"""
Scenario C:
1) Remove a document with Durability Requirements, both variants, thinking about error handling
"""
# Use a helper wrapper to retry our operation in the face of durability failures
# remove is idempotent iff the app guarantees that the doc's id won't be reused (e.g. if it's a UUID). This seems
# a reasonable restriction.
self.coll.upsert("id","test")
self.assertEqual(self.coll.get("id").content_as[str],"test")
try:
self.retry_idempotent_remove_client_side(lambda replicateTo:
self.coll.remove("id",
RemoveOptions().dur_client(replicateTo,
PersistTo.ONE)),
ReplicateTo.TWO, ReplicateTo.TWO, FiniteDuration.time() + Seconds(30))
except NotSupportedError:
raise SkipTest("Skipping as not supported")
@staticmethod
def retry_idempotent_remove_client_side(callback, # type: Callable[[ReplicateTo.Value],Any]
replicate_to, # type: ReplicateTo.Value
original_replicate_to, # type: ReplicateTo.Value
until # type: FiniteDuration
):
# type: (...)->None
"""
* Automatically retries an idempotent operation in the face of durability failures
* TODO this is quite complex logic. Should this be folded into the client as a per-operation retry strategy?
* @param callback an idempotent remove operation to perform
* @param replicate_to the current ReplicateTo setting being tried
* @param original_replicate_to the originally requested ReplicateTo setting
* @param until prevent the operation looping indefinitely
"""
success = False
while not success:
if time.time() >= float(until):
# Depending on the durability requirements, may want to also log this to an external system for human review
# and reconciliation
raise RuntimeError("Failed to durably write operation")
try:
callback(replicate_to)
success = True
except couchbase.exceptions.KeyNotFoundException:
print("Our work here is done")
break
except ReplicaNotConfiguredException:
print("Not enough replicas configured, aborting")
break
except DocumentConcurrentlyModifiedException:
# Just retry
# self.retryIdempotentRemoveClientSide(callback, replicate_to, original_replicate_to, until)
continue
except DocumentMutationLostException:
# Mutation lost during a hard failover. If enough replicas
# still aren't available, it will presumably raise ReplicaNotAvailableException and retry with lower.
# self.retryIdempotentRemoveClientSide(callback, original_replicate_to, original_replicate_to, until)
replicate_to = original_replicate_to
continue
except (ReplicaNotAvailableException, couchbase.ArgumentError):
newReplicateTo = {ReplicateTo.ONE: ReplicateTo.NONE,
ReplicateTo.TWO: ReplicateTo.ONE,
ReplicateTo.THREE: ReplicateTo.TWO}.get(replicate_to, ReplicateTo.NONE)
print("Temporary replica failure, retrying with lower durability {}".format(newReplicateTo))
replicate_to = newReplicateTo
def test_scenario_c_server_side_durability(self):
# Use a helper wrapper to retry our operation in the face of durability failures
# remove is idempotent iff the app guarantees that the doc's id won't be reused (e.g. if it's a UUID). This seems
# a reasonable restriction.
for durability_type in Durability:
self.coll.upsert("id","fred",durability_level=Durability.NONE)
self.retry_idempotent_remove_server_side(
lambda: self.coll.remove("id", RemoveOptions().dur_server(durability_type)))
def retry_idempotent_remove_server_side(self, # type: Scenarios
callback, # type: Callable[[],Any]
until=Durations.seconds(10) # type: FiniteDuration
):
"""
* Automatically retries an idempotent operation in the face of durability failures
* TODO Should this be folded into the client as a per-operation retry strategy?
* @param callback an idempotent remove operation to perform
* @param until prevent the operation looping indefinitely
*/"""
deadline=FiniteDuration.time()+until
while FiniteDuration.time() < deadline:
try:
callback()
return
except couchbase.exceptions.DurabilitySyncWriteAmbiguousException:
if self.coll.get("id").success():
continue
logging.info("Our work here is done")
return
except couchbase.exceptions.KeyNotFoundException:
logging.info("Our work here is done")
return
# Depending on the durability requirements, may want to also log this to an external system for human review
# and reconciliation
raise RuntimeError("Failed to durably write operation")
def test_scenario_D(self):
""" Scenario D (variation of A):
#1) do the same thing as A, but handle the "cas mismatch retry loop"
"""
entry=JSONDocument()
entry=entry.put("field","value")
self.coll.upsert("id",entry)
def respond():
result = self.coll.get("id", expiration=Seconds(10))
if result:
self.coll.replace(result.id,
result.content_as[JSONDocument]
.put("field", "value")
.put("foo", "bar"),
cas=result.cas,
expiration=Seconds(10))
else:
logging.error("could not get doc")
self.retry_operation_on_cas_mismatch(respond, guard=50)
def retry_operation_on_cas_mismatch(self,
callback, # type: Callable[[],None]
guard # type: int
):
# type: (...) -> None
if guard <= 0:
raise RuntimeError("Failed to perform exception")
try:
callback()
except CASMismatchException:
self.retry_operation_on_cas_mismatch(callback, guard - 1)
class UserPartial(Transcodable):
def __init__(self,
name=None, # type: str
age=None, # type: int
**kwargs
):
self.name = name
self.age = age
def with_attr(self, **kwargs):
result = copy.deepcopy(self)
for k, v in kwargs.items():
setattr(result, k, v)
return result
def __eq__(self, other):
return self.name==other.name and self.age==other.age
@classmethod
def decode_canonical(cls, input):
return cls(**input)
def encode_canonical(self):
return dict(name=self.name,age=self.age)
class AddressedUser(UserPartial):
def __init__(self,
name=None, # type: str
age=None, # type: int
address=None, # type: str
):
super(Scenarios.AddressedUser,self).__init__(name,age)
self.address = address
def __eq__(self, other):
return super(Scenarios.AddressedUser,self).__eq__(other) and self.address==other.address
def encode_canonical(self):
result=super(Scenarios.AddressedUser,self).encode_canonical()
result.update(address=self.address)
return result
class PhonedUser(AddressedUser):
def __init__(self,
name=None, # type: str
age=None, # type: int
address=None, # type: str
phoneNumber=None # type: str
):
super(Scenarios.PhonedUser,self).__init__(name,age,address)
self.phoneNumber = phoneNumber
def __eq__(self, other):
return super(Scenarios.AddressedUser,self).__eq__(other) and self.address==other.address
def encode_canonical(self):
result=super(Scenarios.AddressedUser,self).encode_canonical()
result.update(phoneNumber=self.phoneNumber)
return result
def test_scenario_E(self):
"""
Scenario E (if applicable):
1) Fetch a full Document and marshal it into a language entity rather than a generic json type
2) Modify the entity
3) store it back on the server with a replace
"""
self.coll.upsert("id",dict(name="fred"))
result = self.coll.get("id", expiration=Seconds(10))
if result:
entry = result.content_as[Scenarios.AddressedUser]
entry=entry.with_attr(age=25)
self.coll.replace(result.id, entry, cas=result.cas, expiration=Seconds(10))
else:
logging.error("could not get doc")
def test_scenario_F_fulldoc(self):
"""
Scenario F (if applicable):
1) Fetch a Document fragment and marshal it into a language entity rather than a generic json type
2) Modify the entity
3) store it back on the server with a replace
"""
item=Scenarios.AddressedUser("fred",21,"45 Dupydaub Street")
self.coll.upsert("id",item)
doc = self.coll.get("id")
if doc:
result = doc.content_as[Scenarios.AddressedUser]
self.assertEqual(result,item)
result=result.with_attr(age=25)
self.assertNotEqual(result,item)
else:
logging.error("could not find doc")
def test_scenarioF_subdoc(self):
item=Scenarios.AddressedUser("fred",21,"45 Dupydaub Street")
self.coll.upsert("id", item)
subdoc = self.coll.get("id", project=("name", "age"))
user = subdoc.content_as[Scenarios.UserPartial]
altuser=self.coll.lookup_in("id", (SD.get("name"), SD.get("age")))
self.assertEqual("fred",altuser.content_as[str](0))
self.assertEqual(21,altuser.content_as[int](1))
changed = user.with_attr(age=25)
self.assertEqual(Scenarios.UserPartial("fred", 25), changed)
self.coll.mutate_in(subdoc.id, [MutateSpec().upsert("user", changed)])
def test_upsert(self):
self.coll.upsert("fish", "banana")
self.assertEquals("banana", self.coll.get("fish").content_as[str])
def test_unsigned_int(self):
self.assertRaises(couchbase.exceptions.ArgumentError, DeltaValue, -1)
self.assertRaises(couchbase.exceptions.ArgumentError, DeltaValue, 0x7FFFFFFFFFFFFFFF + 1)
x=DeltaValue(5)
self.assertEqual(5,x.value)
def test_signed_int_64(self):
self.assertRaises(couchbase.exceptions.ArgumentError, SignedInt64, -0x7FFFFFFFFFFFFFFF-2)
self.assertRaises(couchbase.exceptions.ArgumentError, SignedInt64, 0x7FFFFFFFFFFFFFFF + 1)
x=SignedInt64(0x7FFFFFFFFFFFFFFF)
self.assertEqual(0x7FFFFFFFFFFFFFFF,x.value)
x=SignedInt64(-0x7FFFFFFFFFFFFFFF-1)
self.assertEqual(-0x7FFFFFFFFFFFFFFF-1,x.value)
def test_decrement(self):
try:
self.coll.remove("counter")
except:
pass
self.coll.decrement("counter", DeltaValue(0), initial=SignedInt64(43))
self.assertEqual(43,self.coll.get("counter").content_as[int])
self.coll.decrement("counter", DeltaValue(1))
self.assertEqual(42,self.coll.get("counter").content_as[int])
self.coll.remove("counter")
self.coll.upsert("counter", 43)
self.coll.decrement("counter", DeltaValue(1))
self.assertEqual(42,self.coll.get("counter").content_as[int])
self.assertRaises(couchbase.exceptions.ArgumentError, self.coll.decrement, "counter", DeltaValue(5), initial=10)
self.assertRaises(couchbase.exceptions.ArgumentError, self.coll.decrement, "counter", 5)
self.assertRaises(couchbase.exceptions.ArgumentError, self.coll.decrement, "counter",-3)
def test_increment(self):
try:
self.coll.remove("counter",quiet=True)
except:
pass
self.coll.increment("counter", DeltaValue(0), initial=SignedInt64(43))
self.assertEqual(43,self.coll.get("counter").content_as[int])
self.coll.increment("counter", DeltaValue(1))
self.assertEqual(44,self.coll.get("counter").content_as[int])
self.coll.remove("counter")
self.coll.upsert("counter", 43)
self.coll.increment("counter", DeltaValue(1))
self.assertEqual(44,self.coll.get("counter").content_as[int])
self.assertRaises(couchbase.exceptions.ArgumentError, self.coll.increment, "counter", DeltaValue(5), initial=10)
self.assertRaises(couchbase.exceptions.ArgumentError, self.coll.increment, "counter", 5)
self.assertRaises(couchbase.exceptions.ArgumentError, self.coll.increment, "counter", -3)
def test_cluster_query(self):
if not self.is_mock:
# TODO: fix for real server
raise SkipTest()
result = self.cluster.query("SELECT mockrow")
self.assertEquals([{"row": "value"}], result.rows())
self.assertEquals([{"row": "value"}], list(result))
self.assertEquals([{"row": "value"}], list(result))
self.assertEquals([{"row": "value"}], result.rows())
def test_cluster_search(self # type: ClusterTestCase
):
if self.is_mock:
raise SkipTest("FTS not supported by mock")
most_common_term_max = 10
initial=time.time()
x = self.cluster.search_query("beer-search", FT.TermQuery("category"),
facets={'fred': FT.TermFacet('category', most_common_term_max)})
first_entry = x.hits()[0]
self.assertEquals("brasserie_de_brunehaut-mont_st_aubert", first_entry.get('id'))
min_hits = 6
metadata = x.metadata()
duration=time.time()-initial
self.assertIsInstance(metadata, IMetaData)
self.assertIsInstance(metadata.error_count(), int)
self.assertIsInstance(metadata.max_score(), float)
self.assertIsInstance(metadata.success_count(), int)
took=metadata.took()
self.assertIsInstance(took, Seconds)
self.assertAlmostEqual(took.value, duration, delta=0.1)
self.assertGreater(took.value, 0)
self.assertIsInstance(metadata.total_hits(), int)
self.assertGreaterEqual(metadata.success_count(), min_hits)
self.assertGreaterEqual(metadata.total_hits(), min_hits)
self.assertGreaterEqual(len(x.hits()), min_hits)
fred_facet = x.facets()['fred']
self.assertIsInstance(fred_facet, ISearchResult.Facet)
self.assertEqual(len(fred_facet['terms']), most_common_term_max)
self.assertRaises(couchbase.exceptions.SearchException, self.cluster.search_query, "beer-search",
FT.TermQuery("category"),
facets={'fred': None})
def test_diagnostics(self # type: Scenarios
):
try:
diagnostics = self.cluster.diagnostics(timeout=(5 if self.is_mock else None))
except couchbase.exceptions.TimeoutError:
if self.is_mock:
raise SkipTest("LCB Diagnostics still blocks indefinitely with mock: {}".format(traceback.format_exc()))
self.assertRegex(diagnostics.sdk(), r'.*PYCBC.*')
self.assertGreaterEqual(diagnostics.version(), 1)
self.assertIsNotNone(diagnostics.id())
config = diagnostics.services().get('config')
self.assertEquals(config.type(), ServiceType.Config)
for key, value in diagnostics.services().items():
self.assertIn(type(value.type()), (ServiceType, str))
self.assertIn(type(value.id()), ANY_STR)
self.assertIn(type(value.local()), ANY_STR)
@staticmethod
def get_multi_result_as_dict(result):
return {k: v.content for k, v in result.items()}
@staticmethod
def get_multi_mutationresult_as_dict(result):
return {k: v.success() for k, v in result.items()}
def test_multi(self):
test_dict = {"Fred": "Wilma", "Barney": "Betty"}
for dur_level in [Durability.NONE] if self.is_mock else Durability:
try:
self.coll.remove_multi(test_dict.keys())
except:
pass
mutate_kwargs = dict(durability_level=dur_level)
self.assertRaises(KeyNotFoundException, self.coll.get, "Fred")
self.assertRaises(KeyNotFoundException, self.coll.get, "Barney")
self.coll.upsert_multi(test_dict, **mutate_kwargs)
result = self.coll.get_multi(test_dict.keys())
self.assertEquals(Scenarios.get_multi_result_as_dict(result), test_dict)
self.coll.remove_multi(test_dict.keys(), **mutate_kwargs)
self.assertRaises(KeyNotFoundException, self.coll.get_multi, test_dict.keys())
self.coll.insert_multi(test_dict, **mutate_kwargs)
self.assertRaises(KeyExistsException, self.coll.insert_multi, test_dict)
result = self.coll.get_multi(test_dict.keys())
self.assertEquals(Scenarios.get_multi_result_as_dict(result), test_dict)
self.assertEquals(self.coll.get("Fred").content, "Wilma")
self.assertEquals(self.coll.get("Barney").content, "Betty")
self.coll.remove_multi(test_dict.keys(), **mutate_kwargs)
self.assertRaises(KeyNotFoundException, self.coll.get_multi, test_dict.keys())
self.coll.insert_multi(test_dict)
test_dict_2 = {"Fred": "Cassandra", "Barney": "Raquel"}
result = self.coll.replace_multi(test_dict_2)
expected_result = {k: True for k, v in test_dict_2.items()}
self.assertEquals(Scenarios.get_multi_mutationresult_as_dict(result), expected_result)
self.assertEquals(Scenarios.get_multi_result_as_dict(self.coll.get_multi(test_dict_2.keys())),test_dict_2)
def test_PYCBC_607(self # type: Scenarios
):
messed_helpers = copy.deepcopy(couchbase_core._bootstrap._default_helpers)
def dummy_call(*args, **kwargs):
raise Exception("failed")
messed_helpers['json_encode'] = dummy_call
messed_helpers['pickle_encode'] = dummy_call
_LCB._init_helpers(**messed_helpers)
def do_upsert():
self.coll.upsert('king_arthur', {'name': 'Arthur', 'email': '<EMAIL>',
'interests': ['Holy Grail', 'African Swallows']})
self.assertRaises(Exception, do_upsert)
recursive_reload(couchbase)
do_upsert()
class AnalyticsTest(couchbase_core.tests.analytics_harness.CBASTestSpecific, ClusterTestCase):
def setUp(self):
self.factory=Bucket
ClusterTestCase.setUp(self)
couchbase_core.tests.analytics_harness.CBASTestSpecific.setUp(self)
def get_fixture(self):
return self.cluster
def do_analytics_query(self, query, **kwargs):
return self.cluster.analytics_query(query, **kwargs)
| StarcoderdataPython |
8001682 | <filename>worsecrossbars/utilities/auth_dropbox.py<gh_stars>0
"""auth_dropbox:
An internal module used to establish a secure connection
to Dropbox and authenticate against a Dropbox App.
Uses OAuth 2.
"""
import json
import os
import sys
from pathlib import Path
from dropbox import Dropbox
from dropbox import DropboxOAuth2FlowNoRedirect
from dropbox.oauth import BadRequestException
from dropbox.oauth import BadStateException
from dropbox.oauth import NotApprovedException
from dropbox.oauth import ProviderException
from worsecrossbars.utilities import io_operations
def authenticate() -> None:
"""Authenticates a user against the Dropbox OAuth 2.0
Interface. Uses APP_KEY and APP_SECRET from the user's
config. The resulting user_secrets are stored in the HOME folder.
"""
if not os.path.exists(Path.home().joinpath("worsecrossbars", "config", "app_keys.json")):
io_operations.store_dropbox_keys()
authenticate()
return
with open(
str(Path.home().joinpath("worsecrossbars", "config", "app_keys.json")),
encoding="utf8",
) as json_file:
app_keys = json.load(json_file)
auth_flow = DropboxOAuth2FlowNoRedirect( # nosec
app_keys["APP_KEY"],
consumer_secret=app_keys["APP_SECRET"],
token_access_type="offline",
scope=["account_info.read", "files.content.read", "files.content.write"],
)
authorize_url = auth_flow.start()
print(f"1. Go to: {authorize_url}")
print('2. Click "Allow" (you might have to log in first).')
print("3. Copy the authorization code.")
auth_code = input("Enter the authorization code here: ").strip()
try:
oauth_result = auth_flow.finish(auth_code)
except (
NotApprovedException,
BadStateException,
BadRequestException,
ProviderException,
) as error:
print(f"Error: {error}")
sys.exit(1)
with Dropbox(oauth2_access_token=oauth_result.access_token) as dbx:
dbx.users_get_current_account()
print("Successfully set up client!")
secret = {
"dropbox_auth_code": oauth_result.access_token,
"dropbox_expiration": oauth_result.expires_at.__str__(),
"dropbox_refresh": oauth_result.refresh_token,
}
with open(
str(Path.home().joinpath("worsecrossbars", "config", "user_secrets.json")),
"w",
encoding="utf8",
) as outfile:
json.dump(secret, outfile)
| StarcoderdataPython |
1640571 | import re
import string
from typing import List, Tuple
from nltk.corpus import words
ALPHABET = string.ascii_lowercase
def get_wordbase(num_letters) -> List[str]:
import nltk
nltk.download("words")
word_base = [word for word in words.words() if len(word) == num_letters]
return word_base
def constrain_regex(
knows_at_letters: str,
known_not_at_letters: List[str],
restricted_letters: List[str],
) -> str:
alphabet = [alpha for alpha in ALPHABET if alpha not in restricted_letters]
letter_options = []
for known_at, known_not_at in zip(knows_at_letters, known_not_at_letters):
if len(known_at) != 0:
letter_options.append(known_at)
else:
letter = "".join([l for l in alphabet if l not in known_not_at])
letter_options.append(letter)
rgx = ("^" + "[{}]{{1}}" * len(letter_options) + "$").format(*letter_options)
return rgx
def display(items: List[str], chunk_size: int = 25) -> None:
print("\nChoose one from:\n")
for chunk_ix in range(0, len(items), chunk_size):
print(*items[chunk_ix : chunk_ix + chunk_size], sep=" | ")
def get_player_input(
known_at_letters, known_not_at_letters, restricted_letters
) -> Tuple[List[str], List[str], List[str]]:
kal_input = input(f"Known at letters, e.g *it**. Current ='{known_at_letters}' : ")
knal_input = input(
f"Known not at letters, e.g ****r. Current = '{known_not_at_letters}' : "
)
rl_input = input(f"New restricted letters (current= '{restricted_letters}'): ")
kal = [l if l != "*" else "" for l in list(kal_input or known_at_letters)]
knal = list(known_not_at_letters)
for ix, l in enumerate(list(knal_input or "")):
if l != "*":
knal[ix].append(l)
rl = list(rl_input or "") + restricted_letters
return kal, knal, rl
def play(num_letters=5):
wordbase = get_wordbase(num_letters)
# Initial values
known_at_letters = ["" for _ in range(num_letters)]
known_not_at__letters = [[] for _ in range(num_letters)]
restricted_letters = []
for iter_ix in range(1, 7):
print(f"\n\nIteration {iter_ix} ~~~~~~~~~~ {known_at_letters}\n")
known_at_letters, known_not_at__letters, restricted_letters = get_player_input(
known_at_letters, known_not_at__letters, restricted_letters
)
should_include = input("Word should include letters: ") or []
rgx = constrain_regex(
known_at_letters, known_not_at__letters, restricted_letters
)
print("Regex: ", rgx)
solvers = [
word
for word in wordbase
if re.search(rgx, word)
]
solvers_ = []
for word in solvers:
if all((c in word for c in should_include)):
solvers_.append(word)
display(solvers_)
if __name__ == "__main__":
play()
| StarcoderdataPython |
1915024 | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class GenerateLogListRequest(Request):
def __init__(self):
super(GenerateLogListRequest, self).__init__(
'cdn', 'qcloudcliV1', 'GenerateLogList', 'cdn.api.qcloud.com')
def get_endDate(self):
return self.get_params().get('endDate')
def set_endDate(self, endDate):
self.add_param('endDate', endDate)
def get_hostId(self):
return self.get_params().get('hostId')
def set_hostId(self, hostId):
self.add_param('hostId', hostId)
def get_startDate(self):
return self.get_params().get('startDate')
def set_startDate(self, startDate):
self.add_param('startDate', startDate)
| StarcoderdataPython |
6589394 | <filename>code/server/test_code_works.py
import re
import json
import time
import subprocess
import shlex
import platform
from pathlib import Path
from operator import itemgetter
import pytest
WIN = platform.system() == 'Windows'
HOST = f"{platform.node() if WIN else '0.0.0.0'}:8080"
SYSTEM_SUFFIXES = ('_win.txt', '_unix.txt')
CURRENT_SYSTEM_SUFFIX = '_win.txt' if WIN else '_unix.txt'
def parse_response(text):
text = text.replace('\r\n', '\n')
head, body = text.split('\n\n') if '\n\n' in text else (text, None)
head = head.replace('application/json; charset=UTF-8', 'application/json')
head_lines = head.splitlines()
head_lines = [h for h in head_lines if not h.lower().startswith('date:')]
status = int(re.search(r' (\d{3}) ', head_lines[0]).group(1))
headers = head_lines[1:]
body = (body.strip() if '/json' in head else body) if body and body.strip() else None
body_json = json.loads(body) if body and '/json' in head else None
return dict(status=status, headers=headers, body=body, body_json=body_json)
def parse_test(text, filename=None):
lines = text.splitlines()
command = replace_host(lines[0][2:])
response = parse_response('\n'.join(lines[1:]) + '\n')
# Temporarily parse headers into a dict
headers = dict([header.split(': ') for header in response['headers']])
# Replace host in the Location header which usually contains a link
if 'Location' in headers:
headers['Location'] = replace_host(headers['Location'])
# Because we'll be altering body and the Content-Length header a few lines
# below, let's verify first that the original file has it right.
if response['body'] is not None:
assert int(headers['Content-Length']) == len(response['body']), filename
# The body can contain links, so we must replace host even there. That
# changes the Content-Length though, so it must be re-calculated.
response['body'] = replace_host(response['body'])
headers['Content-Length'] = len(response['body'])
# We must update the parsed JSON body as well
if response['body_json'] is not None:
response['body_json'] = json.loads(response['body'])
# Squash headers back
response['headers'] = [f'{name}: {value}' for name, value
in headers.items()]
return dict(command=command, response=response)
def replace_host(text):
return (text.replace('0.0.0.0:8080', HOST)
.replace('MY-COMPUTER:8080', HOST)
.replace('api.example.com', HOST))
def is_test(basename):
if basename.startswith(('test', 'example')):
if basename.endswith(SYSTEM_SUFFIXES):
return basename.endswith(CURRENT_SYSTEM_SUFFIX)
return True
return False
def generate(dir):
for path in sorted(dir.iterdir()):
if not path.is_dir() or path.name == '__pycache__':
continue
tests = [
parse_test(f.read_text(), filename=str(f.resolve()))
for f in sorted(path.iterdir()) if is_test(f.name)
]
yield dict(src=path, name='code/server/' + path.name, tests=tests)
def run_test(command):
client = subprocess.run(shlex.split(command),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return parse_response(client.stdout.decode('utf-8'))
test_batches = list(generate(Path(__file__).parent))
@pytest.mark.parametrize('test_batch', test_batches, ids=itemgetter('name'))
def test_code_works(test_batch):
server = subprocess.Popen(['waitress-serve', 'index:app'],
cwd=test_batch['src'])
time.sleep(0.5)
try:
tests = [
(test['command'], run_test(test['command']), test['response'])
for test in test_batch['tests']
]
finally:
server.terminate()
for command, response, expected_response in tests:
print(command)
assert response['status'] == expected_response['status']
for header in expected_response['headers']:
assert header in response['headers']
if response['body_json']:
assert response['body_json'] == expected_response['body_json']
else:
assert response['body'] == expected_response['body']
| StarcoderdataPython |
1891438 | <filename>celery_progress/__init__.py
from django.conf import settings
# from django.utils.module_loading import import_by_path
from django.utils.module_loading import import_string
BACKEND = getattr(settings, 'CELERY_PROGRESS_BACKEND',
'celery_progress.backends.CeleryBackend')
def get_backend():
return import_string(BACKEND)
backend = get_backend()()
| StarcoderdataPython |
9663751 | <filename>src/managementgroups/azext_managementgroups/_client_factory.py<gh_stars>1-10
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def cf_managementgroups(cli_ctx, **_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azext_managementgroups.managementgroups import ManagementGroupsAPI
return get_mgmt_service_client(
cli_ctx,
ManagementGroupsAPI,
subscription_bound=False)
def management_groups_client_factory(cli_ctx, _):
return cf_managementgroups(cli_ctx).management_groups
def management_group_subscriptions_client_factory(cli_ctx, _):
return cf_managementgroups(cli_ctx).management_group_subscriptions
| StarcoderdataPython |
4997082 | <reponame>Endurance-Robotics/ChatBots
# -*- coding: utf-8 -*-
# File: responses.py
# Description: Responses module for processing Botlibre response-lists
# Author: ValV
import re, errno, json
from os import makedirs
from os.path import join
from math import log10, floor
from urllib.request import urlopen, Request
from urllib.error import URLError, HTTPError
from urllib.parse import urlencode, quote_plus
try:
from memkalkul import total_size
except:
pass
# Response dictionary key names (not defined by response-lists)
question_key = "question"
response_key = "response"
# Global token variable
token = None
# Regex patterns for response-lists processing
xt_html = re.compile(r"<[^>]+>")
xpuncts = re.compile(r"[^-\w\s]|\s+[^\w]+\s+")
xspaces = re.compile(r"\s+")
xextens = re.compile(r"\.res$|\.$|$")
# Function: parse_responses
def parse_responses(list_file):
# Regex patterns for BotLibre response lists (simplified)
xdivide = re.compile(r"^$")
xmeta_key = re.compile(r"^[a-z ]+: ")
xmeta_value = re.compile(r": .+$")
# Responses are a list of disctionaries
responses = []
the_response = {}
for line in list_file:
if xdivide.match(line):
responses.append(the_response)
the_response = {}
if not the_response.get(question_key):
the_response[question_key] = line.strip()
elif not xmeta_key.search(line):
if the_response.get(response_key):
the_response[response_key] += line.strip()
else:
the_response[response_key] = line.strip()
else:
the_response[xmeta_value.sub('', line).strip()] =\
xmeta_key.sub('', line.strip())
return responses
# Function: parse_responses
# Function: show_responses
def show_responses(responses=None, field=question_key):
# Check responses
if responses:
counter = 0
pad = floor(log10(len(responses))) + 1
for the_response in responses:
counter += 1
print(str(counter).zfill(pad), the_response.get(field))
# Display total
print("Total number of entries:", counter)
try:
print("Memory consumption:", total_size(responses))
except:
pass
return None
# Function: show_responses
# Function: dump_responses
def dump_responses(responses, path="./dump_responses"):
try:
makedirs(path, 0o755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if responses:
counter = 0
pad = floor(log10(len(responses))) + 1
for the_response in responses:
counter += 1
file_name = str(counter).zfill(pad)
file_name += " - " + the_response[question_key]
try:
with open(join(path, file_name), "w+") as response_file:
#response_file.write(strip_tags(the_response[response_key]))
response_file.write(normalize(the_response[response_key]))
response_file.close()
except IOError:
print("Error writing to:", file_name)
return None
# Function: dump_responses
# Function: strip_tags
def strip_tags(text=None):
if text:
return xt_html.sub("", text)
return None
# Function: strip_tags
# Function: normalize
def normalize(text=None):
if text:
return (xspaces.sub(" ", (xpuncts.sub(" ", strip_tags(text))))
).lower().strip()
return None
# Function: normalize
# Function: fetch_token
def fetch_token(login=None, password=None):
""" ParaPhraser server requires login and password to be supplied
in order to get an API token.
This function also reads login and password from file in the
working directory called "authentication". This file must be
of json format: {"login": "User", "password": "Password"}
"""
payload = ""
if login and password:
payload = json.loads({"login": login, "password": password})
else:
with open("authentication", "r") as authfile:
payload = json.load(authfile)
print("fetch_token.payload:", payload)
payload = urlencode(payload).encode("utf-8")
print("fetch_token.payload:", payload)
request = Request("https://paraphraser.ru/token/", data=payload)
try:
response = json.loads(urlopen(request).read().decode("utf-8"))
print("fetch_token.response:", response)
return response["token"]
except (URLError, HTTPError) as e:
print(e.reason)
if type (e) is HTTPError:
print(json.loads(e.read().decode("utf-8")))
except Exception as e:
print(e)
return None
# Function: fetch_token
# Function: submit_query
def submit_query(payload):
payload = urlencode(payload).encode()
print("submit_query.params:", payload)
request = Request("http://paraphraser.ru/api/", data=payload,)
#headers={"Content-Type": "application/json"}, method="POST")
try:
return json.loads(urlopen(request).read().decode("utf-8"))
except (URLError, HTTPError) as e:
print(e.reason)
if type(e) is HTTPError:
return json.loads(e.read().decode("utf-8"))
except Exception as e:
print(e)
return dict()
# Function: submit_query
# Function: get_keywords
def get_keywords(text=""):
print("get_keywords.text:", text)
global token
if not token:
token = fetch_token()
if token is not None:
result = submit_query({
"c": "keywords",
"query": "по отношению к виртуальным собеседникам также употребляется термин программа-собеседник",
"top": 3,
"pos": "NOUN",
"clusters": 0,
"vecth": 0.68,
"synth": 0.1,
"expand": 0,
"mwe": 1,
"forms": 0,
"lang": "ru",
"format": "json",
"token": token,
})
# Display Paraphraser response
if result.get("code") == 0:
response = result.get("response")
for item in response:
for value in response[item].get("keywords"):
print(value)
return response
else:
print("Error executing query.\nRESPONSE:", result.get("msg"))
else:
print("No API token. Perhaps, wrong login or password...")
return None
# Function: get_keywords
# vim: se et ts=2 sw=2 number syntax=python:
| StarcoderdataPython |
315712 |
import os
import json
from json import JSONEncoder
class ClassEncoder(JSONEncoder):
def default(o):
return o.__dict__
class PersistenceHanlder:
relative_path_root = r'C:\Users\Utente 39\Documents\temp\github\projects\bank_managment_system_pro\Data'
def __write_on_file__(self, value, path):
f = open(path, 'a')
f.write(value)
f.close()
def __read_file_bank__(self, value, path):
f = open(path, "r")
f.read(value)
f.close()
def __remove_file__(self, path):
os.remove(path)
def bank_save_all(self, value):
bankJSONData = json.dumps(value, indent = 4, cls = ClassEncoder)
self.__write_on_file__(bankJSONData, self.relative_path_root + "/data_bank.json")
def client_save_all(self, value):
bankJSONData = json.dumps(value, indent = 4, cls = ClassEncoder)
self.__write_on_file__(bankJSONData, self.relative_path_root + "/data_client.json")
def count_save_all(self, value):
bankJSONData = json.dumps(value, indent = 4, cls = ClassEncoder)
self.__write_on_file__(bankJSONData, self.relative_path_root + "/data_count.json")
| StarcoderdataPython |
8088651 | <gh_stars>10-100
import re
import unicodedata
def validate_label(label):
label = unicodedata.normalize('NFKC', label)
if re.search(r"[0-9]", label) is not None:
return None
if '*' in label:
return None
skip_foreign_chars = [
'い',
'た',
'つ',
'ぬ',
'の',
'乃',
'京',
'北',
'扬',
'星',
'术',
'杜',
'美',
'馆',
]
for skip in skip_foreign_chars:
if skip in label:
return None
label = label.strip()
label = label.lower()
label = label.replace("=", "")
label = label.replace("|", "")
label = label.replace("-", " ")
label = label.replace("–", " ")
label = label.replace("—", " ")
label = label.replace("’", " ")
label = label.replace("^", "")
label = label.replace("'", " ")
label = label.replace("º", "")
label = label.replace("…", " ")
label = label.replace("_", " ")
label = label.replace(".", "")
label = label.replace(",", "")
label = label.replace("?", "")
label = label.replace("!", "")
label = label.replace("\"", "")
label = label.replace("(", "")
label = label.replace(")", "")
label = label.replace("{", "")
label = label.replace("}", "")
label = label.replace("/", " ")
label = label.replace(":", "")
label = label.replace(";", "")
label = label.replace("«", "")
label = label.replace("»", "")
label = label.replace("%", "")
label = label.replace("`", "")
label = label.replace("°", "")
label = label.replace("+", "")
label = label.replace("±", "")
label = label.replace("·", "")
label = label.replace("×", "")
label = label.replace("ă", "a")
label = label.replace("ắ", "a")
label = label.replace("ầ", "a")
label = label.replace("å", "a")
label = label.replace("ä", "a")
label = label.replace("ą", "a")
label = label.replace("ā", "a")
label = label.replace("ả", "a")
label = label.replace("ạ", "a")
label = label.replace("ậ", "a")
#label = label.replace("æ", "")
label = label.replace("ć", "c")
#label = label.replace("č", "c")
label = label.replace("ċ", "c")
label = label.replace("đ", "d")
#label = label.replace("ḍ", "d")
label = label.replace("ð", "o")
label = label.replace("ễ", "e")
label = label.replace("ě", "e")
label = label.replace("ė", "e")
label = label.replace("ę", "e")
label = label.replace("ē", "e")
label = label.replace("ệ", "e")
#label = label.replace("ğ", "g")
label = label.replace("ġ", "g")
label = label.replace("ħ", "h")
label = label.replace("ʻ", "")
label = label.replace("ì", "i")
label = label.replace("ī", "i")
label = label.replace("ị", "")
label = label.replace("ı", "")
label = label.replace("ľ", "l'")
label = label.replace("ļ", "l")
label = label.replace("ł", "l")
label = label.replace("ǹ", "n")
label = label.replace("ň", "n")
label = label.replace("ṅ", "n")
label = label.replace("ņ", "n")
label = label.replace("ṇ", "n")
label = label.replace("ŏ", "o")
label = label.replace("ồ", "o")
label = label.replace("ổ", "o")
label = label.replace("ő", "o")
label = label.replace("õ", "o")
label = label.replace("ø", "o")
label = label.replace("ǫ", "o")
label = label.replace("ơ", "")
label = label.replace("ợ", "")
label = label.replace("ộ", "o")
label = label.replace("ř", "r")
label = label.replace("ś", "s")
label = label.replace("š", "s")
label = label.replace("ş", "s")
#label = label.replace("ṣ", "s")
label = label.replace("ș", "s")
label = label.replace("ß", "ss")
label = label.replace("ť", "t")
#label = label.replace("ṭ", "t")
#label = label.replace("ț", "t")
label = label.replace("ṯ", "t")
label = label.replace("ú", "u")
label = label.replace("ų", "u")
label = label.replace("ư", "u")
label = label.replace("ử", "u")
label = label.replace("ʉ", "")
label = label.replace("ý", "y")
label = label.replace("ỳ", "y")
label = label.replace("ź", "z")
label = label.replace("ž", "z")
label = label.replace("ż", "z")
label = label.replace("þ", "")
label = label.replace("ʼ", "")
label = label.replace("ʾ", "")
label = label.replace("ʿ", "")
label = label.replace("ǃ", "")
label = label.replace("δ", "delta")
label = label.replace("ζ", "")
label = label.replace("κ", "kappa")
label = label.replace("ν", "")
label = label.replace("π", "pi")
label = label.replace("σ", "sigma")
label = label.replace("τ", "tau")
label = label.replace("υ", "")
label = label.replace("ω", "omega")
label = label.replace("а", "a")
label = label.replace("г", "r")
label = label.replace("е", "e")
label = label.replace("з", "")
label = label.replace("и", "")
label = label.replace("к", "")
label = label.replace("м", "")
label = label.replace("н", "")
label = label.replace("ҫ", "c")
label = label.replace("я", "")
label = label.replace("א", "")
label = label.replace("ደ", "")
label = label.replace("ጠ", "")
label = label.replace("α", "alpha")
label = label.replace("γ", "ɣ")
label = label.replace("μ", "mu")
label = label.replace("‘", "")
label = label.replace("“", "")
label = label.replace("”", "")
label = label.replace("„", "")
label = label.replace("†", "")
label = label.replace("′", "")
label = label.replace("‹", "")
label = label.replace("›", "")
label = label.replace("⁄", "")
label = label.replace("∅", "")
label = label.replace("∈", "")
label = label.replace("∞", "")
label = label.replace("≥", "")
label = label.replace("☉", "")
label = label.replace("ː", "")
label = label.replace("§", "taseddaṛt")
label = label.replace("$", "dullaṛ")
label = label.replace("£", "liṛa")
label = label.replace("€", "luṛu")
label = label.replace("β", "")
label = label.replace("σ", "")
label = label.replace("½", "azgen")
label = label.replace("¼", "")
label = label.replace("&", "akked")
label = label.replace("æ", "")
label = label.replace("nºˢ", "")
label = label.replace("nº", "uṭṭun")
label = label.replace("n°", "uṭṭun")
label = label.replace(" ", " ")
label = label.replace(" ", " ")
label = label.replace(" ", " ")
label = label.replace(" ", " ")
label = label.replace(" ", " ")
label = label.replace(" ", " ")
label = label.replace(" ", " ")
label = label.replace(" ", " ")
label = label.replace(u"\u0301", "")
label = label.replace(u"\u0307", "")
label = label.replace(u"\u0320", "")
label = label.replace(u"\u0331", "")
label = label.strip()
label = label.lower()
return label if label else None
| StarcoderdataPython |
1856042 | <reponame>julianlore/RedCogs<filename>owin/owin.py
import aiohttp
import discord
from redbot.core import Config, commands
class Owin(commands.Cog):
"""Display Overwatch wins/losses and relative changes since last refresh."""
__author__ = ["julianlore"]
__version__ = "1.0.0"
def __init__(self, bot):
self.bot = bot
self.config: Config = Config.get_conf(self, 9077625945313)
default: dict = {"userstats": {}}
self.config.register_guild(**default)
@staticmethod
async def get_data_for_user(ctx: commands.Context, battleTag: str):
uri = 'https://ovrstat.com/stats/pc/' + battleTag.replace('#', '-')
async with aiohttp.ClientSession() as session:
async with session.get(uri) as response:
if response.status != 200:
ctx.send(f"{uri} returned response {str(response.status)}.")
raise ValueError(f"{uri} returned response {str(response.status)}.")
userJson = await response.json()
level = userJson["level"] + (100 * userJson["prestige"])
qp = userJson["quickPlayStats"]["careerStats"]["allHeroes"]["game"]
winPercent = round(qp["gamesWon"]/qp["gamesPlayed"] * 100, 3)
return [level, qp["gamesWon"], qp["gamesLost"], winPercent]
@staticmethod
async def format_stats_table(statsTable):
# Get max width of each column
maxWidthList = [len(stat) for stat in statsTable[0]]
for statsTableRow in statsTable:
for j in range(len(statsTableRow)):
maxWidthList[j] = max(maxWidthList[j], len(statsTableRow[j]))
# Format the table as a string
strTable = "```"
for i in range(len(statsTable)):
strTable += '\n'
for j in range(len(statsTable[i])):
# Add '|' as separators and left justify the max width
# of this column
strTable += f"|{statsTable[i][j]:<{maxWidthList[j]}}"
# Closing last separator
strTable += '|'
# Add a row of dashes to separate column header for
# first row
if i == 0:
# 1 dash for each column width (maxWidthList), 1 dash
# for each column (account for |) and 1 for closing |
strTable += '\n' + ((sum(maxWidthList)
+ len(maxWidthList) + 1) * '-')
strTable += "\n```"
return strTable
@staticmethod
async def refresh_stats(ctx: commands.Context, userstatsDict):
# Add a table and cumulate new stats with difference from old
# stats. Keep track of maximum width for each column.
statsTable = [["Player", "Level", "QP Wins", "QP Losses",
"QP Win %" ]]
for user, oldStatsList in userstatsDict.items():
try:
newStatsList = await Owin.get_data_for_user(ctx, user)
# Invalid old data, do not compare
if len(oldStatsList) != len(newStatsList):
userstatsDict[user] = newStatsList
# Add user as first column
statsTable.append([user] + [str(stat) for stat in newStatsList])
continue
curRow = [user]
for i in range(len(newStatsList)):
# Get change since last refresh
# Prefix positive changes with a +
change = round(newStatsList[i] - oldStatsList[i], 3)
if change > 0:
sign = '+'
else:
sign = ''
# No need to show a change if it is 0
if change != 0:
changeStr = f" ({sign}{str(change)})"
else:
changeStr = ''
curRow.append(str(newStatsList[i]) + changeStr)
statsTable.append(curRow)
userstatsDict[user] = newStatsList
except ValueError:
continue # Error message sent in get_data_for_user
return await Owin.format_stats_table(statsTable)
@commands.command()
async def owin(self, ctx: commands.Context, *args) -> None:
"""Display Overwatch wins/losses and relative changes since last
refresh. Subcommand `[p]owin add <BattleTag>` to add a new BattleTag to
track."""
async with self.config.guild(ctx.guild).userstats() as userstatsDict:
if len(args) <= 0:
if not userstatsDict:
return await ctx.send("No players to track. Add BattleTags using the subcommand add. `[p]owin add <BattleTag>`")
strTable = await Owin.refresh_stats(ctx, userstatsDict)
return await ctx.send(strTable)
command = args[0]
if command == "add":
if len(args) < 2:
return await ctx.send("Invalid number of arguments. Example usage: `[p]owin add <BattleTag>`")
battleTag = args[1]
if '#' not in battleTag:
return await ctx.send("Please specify the BattleTag #, i.e. `BattleTag#1234`.")
if battleTag in userstatsDict:
return await ctx.send("BattleTag already present in users to track.")
try:
statsList = await Owin.get_data_for_user(ctx, battleTag)
userstatsDict[battleTag] = statsList
return await ctx.send("BattleTag added successfully!")
except ValueError:
pass # Error message sent in get_data_for_user
elif command == "clear":
await self.config.guild(ctx.guild).userstats.clear()
await ctx.send("Cleared data successfully.")
elif command == "delete":
battleTag = args[1]
if len(args) < 2:
return await ctx.send("Invalid number of arguments. Example usage: `[p]owin delete <BattleTag>`")
userstatsDict[battleTag].pop()
return await ctx.send("BattleTag deleted successfully.")
else:
await ctx.send(f"Invalid command {command}. Supported commands: add, clear, delete")
| StarcoderdataPython |
5086057 | <filename>shaded_libraries/third_party_flink_ai_extended/flink-ml-tensorflow/src/test/python/global_step_direct.py<gh_stars>1000+
from __future__ import print_function
from datetime import datetime
import tensorflow as tf
import sys
import time
import json
from tensorflow.python.summary.writer.writer_cache import FileWriterCache as SummaryWriterCache
import tensorflow_on_flink.tensorflow_on_flink_ops as tff_ops
import traceback
def log_speed(steps, start):
duration = time.time() - start
speed = steps / duration
print ("Read directly: " + str(steps) + " steps, at " + '%.2f' % speed + " steps/second")
sys.stdout.flush()
def map_fun(context):
print(tf.__version__)
sys.stdout.flush()
tf.logging.set_verbosity(tf.logging.ERROR)
jobName = context.jobName
index = context.index
clusterStr = context.properties["cluster"]
delim = context.properties["SYS:delim"]
epochs = int(context.properties["epochs"])
data_file = context.properties["data.file"]
print (index, clusterStr)
sys.stdout.flush()
clusterJson = json.loads(clusterStr)
cluster = tf.train.ClusterSpec(cluster=clusterJson)
server = tf.train.Server(cluster, job_name=jobName, task_index=index)
sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False,
device_filters=["/job:ps", "/job:worker/task:%d" % index])
with tf.device(tf.train.replica_device_setter(worker_device='/job:worker/task:' + str(index), cluster=cluster)):
filename_queue = tf.train.string_input_producer([data_file], num_epochs=epochs)
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
global_step = tf.contrib.framework.get_or_create_global_step()
global_step_inc = tf.assign_add(global_step, 1)
is_chief = (index == 0)
print (datetime.now().isoformat() + " started ------------------------------------")
t = time.time()
total_step = 0
try:
with tf.train.MonitoredTrainingSession(master=server.target, is_chief=is_chief, config=sess_config,
checkpoint_dir="./target/tmp/input_output/" + str(t)) as mon_sess:
# while not mon_sess.should_stop():
while True:
total_step, _, _ = mon_sess.run([global_step_inc, key, value])
if (total_step % 10000 == 0):
log_speed(total_step, t)
except Exception as e:
print('traceback.print_exc():')
traceback.print_exc()
sys.stdout.flush()
finally:
print (datetime.now().isoformat() + " ended --------------------------------------")
log_speed(total_step, t)
SummaryWriterCache.clear()
if __name__ == "__main__":
map_fun(context)
| StarcoderdataPython |
313768 | import turtle
from BackOfPC import PC_BACK
from Apple_Logo import AppleLogo
from General_Drawing import generalDrawing
from Progress_Bar import ProgressBar
import time
from Image import Image
turtle.colormode(255)
class PC(generalDrawing):
def __init__(self, size, topLeft, topRight, bottomRight, bottomLeft ):
# Initializing Turtle object
self.t = turtle.Turtle(visible=False)
self.scr = turtle.Screen()
self.t.pensize(4)
self.t.hideturtle()
self.scr.tracer(False)
self.scr.bgcolor("light yellow")
self.scale = size
self.topLeft = topLeft
self.topRight = [topRight[0] * self.scale, topRight[1]]
self.bottomLeft = [bottomLeft[0], bottomLeft[1] * self.scale]
self.bottomRight = [bottomRight[0] * self.scale, bottomRight[1] * self.scale]
self.pcScreenDimension = [[self.topLeft[0], self.topLeft[1]-5], [self.topRight[0], self.topRight[1]-5], self.bottomRight, self.bottomLeft]
self.screenColor = (6, 6, 8)
self.colorMap = {"lightGrey": (188, 188, 188), "nearBlack": (6, 6, 8)}
self.letters = [
["esc", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "F10","F11", "F12", "-->>"],
["~~", "1", "2", "3", "4", "5", "6", "7", "8", "9", "0","--", "+", " <<---"],
["-->|", "Q", "W", "E", "R", "T", "Y", "U", "I", "O", "P","[", "]", " "],
["CAPS", "A", "S", "D", "F", "G", "H", "J", "K", "L", ":",'"', "\\", "<<-|"],
["SHIFT", "~~", "Z", "X", "C", "V", "B", "N", "M", "<", ">","?", " SHIFT"],
["fn", "ctrl", "\ =", "cmd", " ", "cmd", "\ =", "<<", "--", " ^^", ">>"]]
self.openingAppleLogo = AppleLogo(20, (self.topLeft[0] + self.topRight[0]) / 2, 200, "black","white") # Apple Logo while opening
self.openingProgressBar = ProgressBar(10, (self.topLeft[0] + self.topRight[0]) / 2 + 105, 70.5, 200, 6, "grey", "black") # Progress Bar while opening
self.wallpaper = Image(".//Images//screen1.gif", 0, 0)
generalDrawing.__init__(self, self.t)
def drawQuadrilateral(self, topLeft, topRight, bottomRight, bottomLeft, curve=0, lineColor=None, colorFill=None):
return super().drawQuadrilateral(topLeft, topRight, bottomRight, bottomLeft, curve=curve, lineColor=lineColor, colorFill=colorFill)
def gotoInv(self, turtleCursor, x, y):
return super().gotoInv(turtleCursor, x, y)
def setTracerFast(self):
self.scr.tracer(0)
def draw(self):
self.drawScreen()
self.drawKeyBoard()
self.drawPad()
self.drawPowBut()
# self.drawQuadrilateral((-194, -27), (182, -22), (187, -22), (-190, -27), 5, (22,26,31))
# self.drawQuadrilateral((-240, -5), (240, -5), (240, -21), (-240, -21), 0, "black")
self.t.color([125] * 3)
self.gotoInv(self.t, (self.bottomLeft[0] + self.bottomRight[0]) / 2, self.bottomRight[1] + 10 )
self.t.write("MacBook Pro", False, 'center', ("Arial", 9))
self.drawKeys()
def clear(self):
self.openingProgressBar.clear()
self.openingAppleLogo.clear()
self.t.clear()
def drawPowBut(self):
self.scr.tracer(True)
powerButton = Image(".//Images//power.gif", self.bottomRight[0] - 20, self.bottomRight[1] - 20)
powerButton.draw()
self.scr.tracer(False)
def drawDesktop(self):
self.wallpaper.draw()
self.wallpaper.scr.mainloop()
def drawScreen(self, screenColor=(6, 6, 8)):
self.screenColor = screenColor
self.drawQuadrilateral(self.topLeft, self.topRight, self.bottomRight, self.bottomLeft, 5, self.colorMap["lightGrey"])
self.t.pensize(3)
self.drawQuadrilateral(*self.pcScreenDimension, 5, "black", self.screenColor)
def drawKeyBoard(self):
self.y_diff = self.bottomLeft[1] - self.topLeft[1]
self.keyboardTopLeft = (self.topLeft[0] + 25, self.topLeft[1] + self.y_diff - 2)
self.keyboardTopRight = (self.topRight[0] - 28, self.topRight[1] + self.y_diff - 2)
self.keyboardBottomRight = (self.bottomRight[0] + 40 , -2 * self.bottomRight[1] + self.y_diff + 30)
self.keyboardBottomLeft = (self.bottomLeft[0] - 40, -2 * self.bottomLeft[1] + self.y_diff + 15)
self.drawQuadrilateral(self.keyboardTopLeft, self.keyboardTopRight, (self.keyboardBottomRight[0]+2, self.keyboardBottomRight[1]-10), (self.keyboardBottomLeft[0]-2, self.keyboardBottomLeft[1]-10), 5, (218,217,222))
self.drawQuadrilateral(self.keyboardTopLeft, self.keyboardTopRight, self.keyboardBottomRight, self.keyboardBottomLeft, 5, (194,195,197))
def drawPad(self):
self.t.pensize(1)
prevHeading = self.t.heading()
self.touchPadWidth = (150 ) // self.scale
self.touchPadHeight = (75) // self.scale
bottomWidthMid = (self.bottomLeft[0] + self.bottomRight[0]) // 2
self.touchPadPos = []
self.gapUnderTouchpad = 10
touchPadCurve = 10
self.touchPadPos.append((bottomWidthMid - (self.touchPadWidth / 2) - touchPadCurve, self.keyboardBottomLeft[1] + self.touchPadHeight + self.gapUnderTouchpad))
self.touchPadPos.append((bottomWidthMid + (self.touchPadWidth / 2) , self.keyboardBottomLeft[1] + self.touchPadHeight + self.gapUnderTouchpad + touchPadCurve))
self.touchPadPos.append((bottomWidthMid + (self.touchPadWidth / 2) + touchPadCurve, self.keyboardBottomLeft[1] + self.gapUnderTouchpad + touchPadCurve))
self.touchPadPos.append((bottomWidthMid - (self.touchPadWidth / 2) , self.keyboardBottomLeft[1] + self.gapUnderTouchpad))
print(self.touchPadPos)
self.drawQuadrilateral(*self.touchPadPos, touchPadCurve, (154,155,157), (201,202,204))
self.t.setheading(prevHeading)
def drawKeys(self):
keyStartPos = [self.keyboardTopLeft[0] + 40, self.keyboardTopLeft[1] - 30]
for i in range(len(self.letters)):
extraSpace = 0
for j in range(len(self.letters[i])):
keyWidth = 29
keyHeight = 20
horizontalSpacing = 5
verticalSpacing = 8
keyCurve = 2
bendConst = 0.3
singleKey = self.letters[i][j]
keyPos = (keyStartPos[0] + j * keyWidth - (i * 3) + extraSpace, keyStartPos[1] - i * keyHeight)
if i == 0:
keyHeight -= 2
keyWidth += 1
if i == 1 and j == 13:
keyWidth += 6
if i == 2:
if j == 0:
keyWidth += 5
extraSpace += 5
elif j == 13:
keyWidth += 6
if i == 3:
if j == 0:
keyWidth += 14
extraSpace += 14
elif j == 12:
extraSpace += 9
elif j == 13:
keyWidth -= 7
keyHeight += 10
if i == 4:
if j == 0:
keyWidth += 3
extraSpace += 3
elif j == 12:
keyWidth += 50
if i == 5:
if j == 4:
keyWidth += 120
extraSpace += 120
# elif j > 7:
# keyWidth -= 5
# horizontalSpacing -= 5
# keyHeight -= 4
# print(keyPos, (keyPos[0] + keyWidth - horizontalSpacing, keyPos[1]), (keyPos[0] + keyWidth - horizontalSpacing, keyPos[1] + keyHeight - verticalSpacing), (keyPos[0] , keyPos[1] + keyHeight - verticalSpacing))
self.drawQuadrilateral((keyPos[0], keyPos[1] + keyHeight - verticalSpacing), (keyPos[0] + keyWidth - horizontalSpacing , keyPos[1] + keyHeight - verticalSpacing + keyCurve), (keyPos[0] + keyWidth - horizontalSpacing + (j * bendConst), keyPos[1] + keyCurve), (keyPos[0] + (j * bendConst) , keyPos[1]), keyCurve, (12,13,17))
self.t.color("white")
self.gotoInv(self.t, keyPos[0] + 15, keyPos[1])
self.t.write(singleKey, False, 'center', ("Arial", 6))
if __name__ == "__main__":
isPCOpen = False
isPCLoading = False
pcPosition = [(-265, 295), (260, 300), (240, -15), (-235, -20)]
# pcPosition = [(0, 295), (525, 300), (505, -15), (30, -20)]
pcTopXCenter = (pcPosition[0][0] + pcPosition[1][0])//2
pcBottomXCenter = (pcPosition[3][0] + pcPosition[2][0])//2
pcTopYCenter = (pcPosition[0][1] + pcPosition[1][1]) // 2
pcBack = PC_BACK(*pcPosition)
pcFront = PC(1, *pcPosition)
def next1(x, y):
print("Click Position", x, y)
global isPCOpen
global isPCLoading
if not isPCLoading:
print(pcBack.topRight)
if x in range(pcTopXCenter-50, pcTopXCenter+50) and y in range(pcTopYCenter-15, pcTopYCenter+15) and isPCOpen:
pcFront.clear()
pcBack.draw()
isPCOpen = False
elif x in range(pcBottomXCenter-50, pcBottomXCenter+50) and y in range(pcBack.bottomLeft[1]-15,pcBack.bottomLeft[1]) and not isPCOpen:
pcBack.clear()
pcFront.draw()
isPCOpen = True
elif x in range(pcBack.topRight[0]-12, pcBack.topRight[0]+2) and y in range(pcBack.topRight[1] - 20, pcBack.topRight[1] - 3) and isPCOpen and not isPCLoading:
isPCLoading = True
pcFront.drawScreen("white")
pcFront.openingAppleLogo.draw()
pcFront.openingProgressBar.draw()
isPCLoading = False
pcBack.draw()
turtle.Screen().onclick(next1)
pcFront.scr.mainloop()
| StarcoderdataPython |
6508131 | <gh_stars>0
import RPi.GPIO as GPIO
from time import sleep
#inpired by instructable https://www.instructables.com/id/Ultimate-Arduino-Paper-Piano/
#buzzer pin
BUZZER_PIN=25
#key pins
NOTE_F_PIN=2
NOTE_FS_PIN=3
NOTE_G_PIN=4
NOTE_GS_PIN=7
NOTE_A_PIN=9
NOTE_AS_PIN=10
NOTE_B_PIN=11
NOTE_C_PIN=17
NOTE_CS_PIN=22
NOTE_D_PIN=23
NOTE_DS_PIN=24
NOTE_E_PIN=27
#notes
NOTE_F=123
NOTE_FS=116
NOTE_G=110
NOTE_GS=104
NOTE_A=98
NOTE_AS=92
NOTE_B=87
NOTE_C=82
NOTE_CS=77
NOTE_D=73
NOTE_DS=69
NOTE_E=65
#the multiply voltageue is defined. This voltageue determines the octave that is being played and can be changed to get higher or lower pitched octaves
MULTIPLY=4
def is_key_e(voltage) :
return voltage > 70 and voltage < 80
def is_key_ds(voltage) :
return voltage > 145 and voltage < 165
def is_key_d(voltage) :
return voltage > 230 and voltage < 240
def is_key_cs(voltage) :
return voltage > 305 and voltage < 320
def is_key_c(voltage) :
return voltage > 385 and voltage < 395
def is_key_b(voltage) :
return voltage > 465 and voltage < 475
def is_key_as(voltage) :
return voltage > 545 and voltage < 555
def is_key_a(voltage) :
return voltage > 625 and voltage < 635
def is_key_gs(voltage) :
return voltage > 700 and voltage < 715
def is_key_g(voltage) :
return voltage > 770 and voltage < 790
def is_key_fs(voltage) :
return voltage > 860 and voltage < 875
def is_key_f(voltage) :
return voltage > 935 and voltage < 955
def is_interval_tone(pin, voltage):
return voltage > 70 and voltage < 955
def generate_note(voltage):
note = 0
if is_key_e(voltage):
note = NOTE_E * MULTIPLY
elif is_key_ds(voltage):
note = NOTE_DS * MULTIPLY
elif is_key_d(voltage):
note = NOTE_D * MULTIPLY
elif is_key_cs(voltage):
note = NOTE_CS * MULTIPLY
elif is_key_c(voltage):
note = NOTE_C * MULTIPLY
elif is_key_b(voltage):
note = NOTE_B * MULTIPLY
elif is_key_as(voltage):
note = NOTE_AS * MULTIPLY
elif is_key_a(voltage):
note = NOTE_A * MULTIPLY
elif is_key_gs(voltage):
note = NOTE_GS * MULTIPLY
elif is_key_g(voltage):
note = NOTE_G * MULTIPLY
elif is_key_fs(voltage):
note = NOTE_FS * MULTIPLY
elif is_key_f(voltage):
note = NOTE_F * MULTIPLY
return note
def play_key(pin, voltage, duration_in_micro):
if is_interval_tone(pin, voltage):
note = generate_note(voltage)
play_tone(pin, note, duration_in_micro)
else:
sleep(duration_in_micro)
def setup() :
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(BUZZER_PIN, GPIO.OUT)
def play_buzzer(buzzer, delay_in_micro) :
GPIO.output(buzzer, GPIO.HIGH)
delay_micro(delay_in_micro)
GPIO.output(buzzer, GPIO.LOW)
delay_micro(delay_in_micro)
def play_tone(pin, note, duration):
print("tone "+str(note))
if note == 0 :
return
#This is the semiperiod of each note.
beepDelay = long(1000000.0/note)
#This is how much time we need to spend on the note.
time = long((duration*1000.0)/(beepDelay*2))
i = 0
while i < time:
play_buzzer(pin,beepDelay)
i += 1
GPIO.output(pin, GPIO.LOW)
DELAY_IN_MS = 20*1000
delay_micro(DELAY_IN_MS)
def delay_micro(delay_in_micro) :
sleep(delay_in_micro/1000000.0)
MAX = 10
DEFAULT_DELAY_IN_MS = 300
DEFAULT_DELAY_MICRO=DEFAULT_DELAY_IN_MS * 1000
counter = 0
try:
setup()
keyboard_keys = [71, 146, 231, 306, 386, 466, 546, 626, 701, 771, 861, 936]
while counter < MAX :
print ("Execucao "+str(counter+1) )
for keyboard_key in keyboard_keys:
play_key(BUZZER_PIN, keyboard_key, 200)
sleep(0.5)
counter += 1
sleep(0.5)
except Exception as ex:
print(ex)
finally:
GPIO.cleanup()
raw_input("Press any key to end")
| StarcoderdataPython |
6593871 | <gh_stars>1-10
from django.contrib import admin
from datatrans.models import KeyValue
class KeyValueAdmin(admin.ModelAdmin):
list_display = (
"content_type",
"object_id",
"field",
"value",
"language",
"edited",
"fuzzy",
)
ordering = ("digest", "language")
search_fields = (
"content_type__app_label",
"content_type__model",
"value",
)
list_filter = ("content_type", "language", "edited", "fuzzy")
admin.site.register(KeyValue, KeyValueAdmin)
| StarcoderdataPython |
9774159 | <reponame>filimor/uri-online-judge
import math
for i in range(int(input())):
(pa, pb, g1, g2) = map(float, input().split(' '))
g1 /= 100
g2 /= 100
tempo = 'Mais de 1 seculo.'
for j in range(1, 101):
pa += math.floor(pa * g1)
pb += math.floor(pb * g2)
if pa > pb:
tempo = f'{j} anos.'
break
print(tempo)
| StarcoderdataPython |
9607884 | """Some small utility functions."""
from __future__ import annotations
import functools
import itertools
import logging
import os
import re
import time
import unicodedata
from pathlib import Path
from typing import Any
from typing import Callable
from typing import Generator
from typing import Iterable
from typing import Iterator
from typing import TYPE_CHECKING
from typing import TypeVar
from flask import abort
from flask import send_file
from flask.wrappers import Response
from werkzeug.urls import url_quote
if TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIEnvironment
BASEPATH = Path(__file__).parent.parent
def filter_api_changed(record: Any) -> bool:
"""Filter out LogRecords for requests that poll for changes."""
return "/api/changed HTTP" not in record.msg
def setup_logging() -> None:
"""Setup logging for Fava."""
logging.basicConfig(level=logging.INFO, format="%(message)s")
logging.getLogger("werkzeug").addFilter(filter_api_changed)
def resource_path(relative_path: str) -> Path:
"""Get absolute path to resource."""
return BASEPATH / relative_path
Item = TypeVar("Item")
def listify(
func: Callable[..., Generator[Item, None, None]]
) -> Callable[..., list[Item]]:
"""Decorator to make generator function return a list."""
@functools.wraps(func)
def _wrapper(*args: Any, **kwargs: Any) -> list[Item]:
return list(func(*args, **kwargs))
return _wrapper
def timefunc(
func: Any,
) -> Any: # pragma: no cover - only used for debugging so far
"""Decorator to time function for debugging."""
@functools.wraps(func)
def _wrapper(*args: Any, **kwargs: Any) -> Any:
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(f"Ran {func.__name__} in {end - start}")
return result
return _wrapper
def pairwise(iterable: Iterable[Item]) -> Iterator[tuple[Item, Item]]:
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
left, right = itertools.tee(iterable)
next(right, None)
return zip(left, right)
def next_key(basekey: str, keys: dict[str, Any]) -> str:
"""Returns the next unused key for basekey in the supplied dictionary.
The first try is `basekey`, followed by `basekey-2`, `basekey-3`, etc
until a free one is found.
"""
if basekey not in keys:
return basekey
i = 2
while f"{basekey}-{i}" in keys:
i = i + 1
return f"{basekey}-{i}"
def slugify(string: str) -> str:
"""Slugify a string.
Args:
string: A string.
Returns:
A 'slug' of the string suitable for URLs. Retains non-ascii
characters.
"""
string = unicodedata.normalize("NFKC", string)
# remove all non-word characters (except '-')
string = re.sub(r"[^\s\w-]", "", string).strip().lower()
# replace spaces (or groups of spaces and dashes) with dashes
string = re.sub(r"[-\s]+", "-", string)
return string
def simple_wsgi(
_: WSGIEnvironment, start_response: StartResponse
) -> list[bytes]:
"""A simple wsgi app that always returns an empty response."""
start_response("200 OK", [("Content-Type", "text/html")])
return [b""]
def send_file_inline(filename: str) -> Response:
"""Send a file inline, including the original filename.
Ref: http://test.greenbytes.de/tech/tc2231/.
"""
try:
response: Response = send_file(filename)
except FileNotFoundError:
return abort(404)
basename = os.path.basename(filename)
cont_disp = f"inline; filename*=UTF-8''{url_quote(basename)}"
response.headers["Content-Disposition"] = cont_disp
return response
| StarcoderdataPython |
6685657 | import numpy as np
# equivalent to MATLAB sph2cart & cart2sph
def cart2sph(x,y,z):
azimuth = np.arctan2(y,x)
elevation = np.arctan2(z,np.sqrt(x**2 + y**2))
r = np.sqrt(x**2 + y**2 + z**2)
return azimuth, elevation, r
def sph2cart(azimuth,elevation,r):
x = r * np.cos(elevation) * np.cos(azimuth)
y = r * np.cos(elevation) * np.sin(azimuth)
z = r * np.sin(elevation)
return x, y, z
# get the angular distances from a single point to a list of points
def getDistances(p_az, p_el, grid_az, grid_el, input_format='deg', return_format='rad'):
if input_format == 'deg':
p_az = p_az * np.pi / 180
p_el = p_el * np.pi / 180
grid_az = grid_az * np.pi / 180
grid_el = grid_el * np.pi / 180
x1, y1, z1 = sph2cart(p_az, p_el, 1);
x2, y2, z2 = sph2cart(grid_az, grid_el, 1);
# make the single point value a matrix with same dimensions as the grid
x1 = x1 * np.ones_like(x2)
y1 = y1 * np.ones_like(z2)
z1 = z1 * np.ones_like(z2)
dotProduct = np.einsum('ji,ji->i', [x1, y1, z1], [x2, y2, z2])
distances = np.arccos(np.clip(dotProduct, -1.0, 1.0));
if return_format == 'deg':
distances = distances * 180 / np.pi
return distances
# get the angular distance from one point to another
def angularDistance(az1, el1, az2, el2, input_format='deg', return_format='deg'):
if input_format == 'deg':
az1 = az1 * np.pi / 180
az2 = az2 * np.pi / 180
el1 = el1 * np.pi / 180
el2 = el2 * np.pi / 180
x1, y1, z1 = sph2cart(az1, el1, 1);
x2, y2, z2 = sph2cart(az2, el2, 1);
# distance = np.arctan2(np.linalg.norm(np.cross(xyz1, xyz2)), np.dot(xyz1, xyz2)) / 180;
distance = np.arccos(np.clip(np.dot([x1, y1, z1], [x2, y2, z2]), -1.0, 1.0));
# distance = np.arccos(np.clip(0.5, -1.0, 1.0)) / np.pi;
if return_format == 'deg':
distance = distance * 180 / np.pi
return distance | StarcoderdataPython |
11346779 | import json
import re
from django.conf import settings
from django.shortcuts import render, redirect
from django.views.decorators.cache import never_cache
from django.views.generic import TemplateView
from django.contrib.auth import logout
from django_countries import countries
from core.models import TransientUser
from core.utils import (
validate,
get,
set_cookie,
)
from core.constants import ALERT_MAP
from trade_remedies_client.mixins import TradeRemediesAPIClientMixin
from core.validators import (
registration_validators,
base_registration_validators,
)
from core.utils import internal_redirect
from config.constants import SECURITY_GROUP_THIRD_PARTY_USER
def logout_view(request):
if "token" in request.session:
del request.session["token"]
if "user" in request.session:
del request.session["user"]
logout(request)
return redirect("/accounts/login/")
class BaseRegisterView(TemplateView):
def reset_session(self, request, initial_data=None):
initial_data = initial_data or {}
request.session["registration"] = initial_data
request.session.modified = True
return request.session
def update_session(self, request, update_data):
request.session.setdefault("registration", {})
request.session["registration"].update(update_data)
request.session.modified = True
return request.session
def vaidate_session(self, request, fields, message=None):
message = message or "Required"
request.session["registration"].setdefault("errors", {})
for key in fields:
if not request.session["registration"][key]:
request.session["registration"]["errors"][key] = message
request.session.modified = True
return request.session
def default_session(self, request):
if "registration" not in request.session:
request.session["registration"] = {}
request.session.modified = True
return request.session
class LoginChoiceView(BaseRegisterView):
template_name = "registration/login_choice.html"
@never_cache
def get(self, request, code=None, case_id=None, *args, **kwargs):
error = request.GET.get("error")
if error is None:
request.session["errors"] = None
return render(
request,
self.template_name,
{
"code": code,
"case_id": case_id,
"errors": request.session.get("errors", None),
},
)
class LoginView(BaseRegisterView, TradeRemediesAPIClientMixin):
template_name = "registration/login.html"
@never_cache
def get(self, request, code=None, case_id=None, *args, **kwargs):
error = request.GET.get("error")
email_verified = request.session.get("email_verified")
request.session["next"] = request.GET.get("next")
if error is None:
request.session["errors"] = None
if email_verified:
request.session["email_verified"] = None
request.session.modified = True
request.session.cycle_key()
if code and case_id:
# We're processing an invite URL
return redirect("register_invite", code=code, case_id=case_id)
return render(
request,
self.template_name,
{
"all_organisations": True,
"email": request.GET.get("email") or "",
"code": code,
"case_id": case_id,
"short_code": request.GET.get("short_code"),
"welcome": request.GET.get("welcome"),
"expired": request.GET.get("expired"),
"errors": request.session.get("errors", None),
"email_verified": email_verified,
"next": request.GET.get("next"),
},
)
def post(self, request, *args, **kwargs): # noqa: C901
email = request.POST.get("email")
password = request.POST.get("password")
code = request.POST.get("code")
short_code = request.POST.get("short_code")
case_id = request.POST.get("case_id")
errors = validate({"email": email, "password": password}, base_registration_validators)
if errors:
request.session["errors"] = errors
return redirect("/accounts/login/?error")
try:
response = self.trusted_client.authenticate(
email,
password,
user_agent=request.META["HTTP_USER_AGENT"],
ip_address=request.META["REMOTE_ADDR"],
code=code,
case_id=case_id,
)
if response and response.get("token"):
# TODO: Temporary application state initialisation
request.session.clear()
request.session["application"] = {}
# TODO: Tmp app state end
# Force 2fa for every public login
request.session["force_2fa"] = True
request.session["token"] = response["token"]
request.session["user"] = response["user"]
request.session["version"] = response.get("version")
redirection_url = request.POST.get("next") or "/dashboard/"
if len(request.session["user"].get("organisations", [])) == 1:
request.session["organisation_id"] = request.session["user"]["organisations"][
0
]["id"]
request.session.modified = True
request.session.cycle_key()
return internal_redirect(redirection_url, "/dashboard/")
else:
if case_id and code:
return redirect(f"/accounts/login/{code}/{case_id}/?error=t")
elif short_code:
return redirect(f"/accounts/login/?error=t&short_code={short_code}")
else:
return redirect("/accounts/login/?error=t")
except Exception as exc:
detail = ""
if hasattr(exc, "response"):
try:
if exc.response.status_code == 401:
try:
detail = exc.response.json().get("detail")
except Exception:
detail = """You have entered an incorrect email address or password.
Please try again or click on the
Forgotten password link below."""
else:
response = exc.response.json()
detail = response.get("detail")
except json.decoder.JSONDecodeError:
detail = exc.response.text
else:
detail = str(exc)
request.session["errors"] = {"email": detail}
request.session.modified = True
if case_id and code:
return redirect(f"/accounts/login/{code}/{case_id}/?error")
else:
return redirect("/accounts/login/?error")
class RegisterView(BaseRegisterView, TradeRemediesAPIClientMixin):
template_name = "registration/register.html"
@never_cache
def get(self, request, errors=None, code=None, case_id=None, *args, **kwargs):
confirm_invited_org = request.session.get("registration", {}).get("confirm_invited_org")
self.default_session(request)
template_name = self.template_name
if (
"error" not in request.GET and confirm_invited_org is None
): # Only clear the session if this is not a return with 'error' set on the url
self.reset_session(request)
initial_context = {
"countries": countries,
"country": "GB",
**request.session.get("registration", {}),
}
if code and case_id:
invite_details = self.trusted_client.get_case_invitation_by_code(code, case_id)
confirm_invited_org = request.session["registration"].get("confirm_invited_org")
if confirm_invited_org is None:
template_name = "registration/invited_organisation.html"
initial_context.update(
{
"code": code,
"case_id": case_id,
"confirm_invited_org": confirm_invited_org,
"invite": invite_details,
}
)
if confirm_invited_org == "true":
initial_context.update(
{
"name": invite_details.get("contact", {}).get("name", ""),
"email": invite_details.get("contact", {}).get("email", ""),
"phone": invite_details.get("contact", {}).get("phone", ""),
}
)
self.update_session(request, initial_context)
return render(request, template_name, request.session["registration"])
def post(self, request, code=None, case_id=None, *args, **kwargs): # noqa: C901
self.default_session(request)
redirect_postfix = f"{code}/{case_id}/" if code and case_id else ""
confirm_invited_org = request.POST.get("confirm_invited_org")
if confirm_invited_org is not None:
request.session["registration"]["confirm_invited_org"] = confirm_invited_org
request.session.modified = True
return redirect(f"/accounts/register/{code}/{case_id}/")
request.session["registration"].update(request.POST.dict())
errors = validate(request.session["registration"], registration_validators)
if request.session["registration"].get("password") != request.session["registration"].get(
"password_confirm"
):
errors["password_confirm"] = "Passwords do not match"
if not request.session["registration"].get("email"):
errors["email"] = "Email is required"
if not errors:
session_reg = request.session.get("registration", {})
if (
session_reg.get("code")
and session_reg.get("case_id")
and session_reg.get("confirm_invited_org") == "true"
):
invitee_sec_group = get(
request.session["registration"], "invite/organisation_security_group"
)
if invitee_sec_group == SECURITY_GROUP_THIRD_PARTY_USER:
# Use the third party invitee's organisation
organisation_id = get(
request.session["registration"], "invite/contact/organisation/id"
)
else:
organisation_id = get(request.session["registration"], "invite/organisation/id")
organisation = self.trusted_client.get_organisation(organisation_id=organisation_id)
field_map = {
"id": "organisation_id",
"name": "organisation_name",
"address": "organisation_address",
}
out = {}
organisation_country_code = get(organisation, "country/code")
if organisation_country_code:
out["organisation_country_code"] = organisation_country_code
out["uk_company"] = "yes" if organisation_country_code == "GB" else "no"
for field, value in organisation.items():
out_field = field_map.get(field) or field
out[out_field] = value
self.update_session(request, out)
if organisation_country_code:
return redirect("/accounts/register/3/")
return redirect("/accounts/register/2/")
elif (
session_reg.get("code")
and session_reg.get("case_id")
and session_reg.get("confirm_invited_org") == "false"
):
return redirect(f"/accounts/register/2/{redirect_postfix}")
return redirect("/accounts/register/2/")
return redirect(f"/accounts/register/{redirect_postfix}?error")
class RegisterOrganisationCountryView(BaseRegisterView):
template_name = "registration/register_organisation_country.html"
validators = [
{"key": "uk_company", "message": "You must make a selection", "re": "(?:yes)|(?:no)"}
]
@never_cache
def get(self, request, code=None, case_id=None, *args, **kwargs):
return render(request, self.template_name, request.session["registration"])
def post(self, request, code=None, case_id=None, *args, **kwargs):
self.update_session(request, request.POST.dict())
errors = validate(request.session["registration"], self.validators)
redirect_postfix = f"{code}/{case_id}/" if code and case_id else ""
if not errors:
return redirect(f"/accounts/register/3/{redirect_postfix}")
else:
return redirect(f"/accounts/register/2/{redirect_postfix}")
class RegisterOrganisationView(BaseRegisterView):
template_name = "registration/register_organisation.html"
validators = [
{"key": "organisation_name", "message": "Company name is mandatory", "re": ".+"},
{"key": "organisation_address", "message": "Company address is mandatory", "re": ".+"},
{"key": "same_contact_address", "message": "You must make a selection", "re": ".+"},
{"key": "companies_house_id", "message": "Company number is mandatory", "re": ".+"},
]
country_validator = [
{"key": "organisation_country", "message": "You must select a country", "re": ".+"}
]
@never_cache
def get(self, request, code=None, case_id=None, errors=None, *args, **kwargs):
return render(
request, self.template_name, {"countries": countries, **request.session["registration"]}
)
def post(self, request, code=None, case_id=None, *args, **kwargs):
redirect_postfix = f"{code}/{case_id}/" if code and case_id else ""
if "registration" not in request.session:
return redirect(f"/accounts/register/{redirect_postfix}")
self.update_session(request, request.POST.dict())
request.session["registration"].pop("errors", None) # Clear existing
errors = validate(request.session["registration"], self.validators) or {}
if get(request.session["registration"], "uk_company") == "no":
errors.update(validate(request.session["registration"], self.country_validator) or {})
if not errors:
next_page = (
"5" if request.session["registration"].get("same_contact_address") == "yes" else "4"
)
return redirect(f"/accounts/register/{next_page}/{redirect_postfix}")
else:
request.session["registration"]["errors"] = errors
request.session.modified = True
return redirect(f"/accounts/register/3/{redirect_postfix}")
class RegisterContactAddressView(BaseRegisterView, TradeRemediesAPIClientMixin):
template_name = "registration/register_contact_address.html"
validators = [{"key": "contact_address", "message": "Contact address is mandatory", "re": ".+"}]
@never_cache
def get(self, request, errors=None, code=None, case_id=None, *args, **kwargs):
return render(
request, self.template_name, {"countries": countries, **request.session["registration"]}
)
def post(self, request, code=None, case_id=None, *args, **kwargs):
redirect_postfix = f"{code}/{case_id}/" if code and case_id else ""
if "registration" not in request.session:
return redirect(f"/accounts/register/{redirect_postfix}")
self.update_session(request, request.POST.dict())
errors = validate(request.session["registration"], self.validators)
if not errors:
return redirect(f"/accounts/register/5/{redirect_postfix}")
else:
request.session["registration"]["errors"] = errors
request.session.modified = True
return redirect(f"/accounts/register/4/{redirect_postfix}")
class RegisterIdsView(BaseRegisterView, TradeRemediesAPIClientMixin):
template_name = "registration/register_organisation_extras.html"
required_fields = [
"name",
"email",
"password",
"uk_company",
"organisation_name",
"organisation_country",
"organisation_address",
]
validators = [
{
"key": "duns_number",
"message": "The duns number should be exactly 9 digits",
"re": "^(?:\\d{9})?$",
},
{
"key": "organisation_website",
"message": "Your website should be a complete, valid URL.",
"re": "^(?:http(s)?:\\/\\/[\\w.-]+(?:\\.[\\w\\.-]+)+[\\w\\-\\._~:/?#[\\]@!\\$&'\\(\\)\\*\\+,;=.]+)?$", # noqa: E501
},
]
@never_cache
def get(self, request, errors=None, code=None, case_id=None, *args, **kwargs):
return render(
request, self.template_name, {"countries": countries, **request.session["registration"]}
)
def post(self, request, code=None, case_id=None, *args, **kwargs): # noqa: C901
redirect_postfix = f"{code}/{case_id}/" if code and case_id else ""
if "registration" not in request.session:
return redirect("/accounts/register/")
self.update_session(request, request.POST.dict())
# prepend http to the url if not provided
_website = request.session["registration"].get("organisation_website")
if _website and not _website.startswith("http"):
request.session["registration"]["organisation_website"] = "http://" + _website
request.session.modified = True
errors = validate(request.session["registration"], self.validators)
if not errors:
if "countries" in request.session["registration"]:
del request.session["registration"]["countries"]
if all(
[bool(request.session["registration"].get(key)) for key in self.required_fields]
):
session_reg = request.session["registration"]
response = self.trusted_client.register_public(**session_reg)
if response.get("success"):
request.session.modified = True
if settings.AUTO_LOGIN:
auth_response = self.trusted_client.authenticate(
session_reg["email"], session_reg["password"]
)
if auth_response:
if auth_response.get("needs_verify"):
request.session["registration"] = {}
return redirect("/email/verify/")
elif auth_response.get("token"):
request.session.clear()
request.session["token"] = auth_response["token"]
request.session["user"] = auth_response["user"]
return redirect("/dashboard/?welcome=true")
else:
request.session["registration"]["errors"] = response.get("error")
request.session.modified = True
return redirect(f"/accounts/register/5/{redirect_postfix}")
class UpdateUserDetailsView(TemplateView, TradeRemediesAPIClientMixin):
template_name = "registration/update.html"
def get(self, request, invite_id, *args, **kwargs):
invite = self.client(request.user).get_invite_details(invite_id)
context = {
"invite": invite,
}
return render(request, self.template_name, context)
class ForgotPasswordView(TemplateView, TradeRemediesAPIClientMixin):
def get(self, request, *args, **kwargs):
return render(
request, "registration/forgot_password.html", {"requested": "requested" in request.GET}
)
def post(self, request, *args, **kwargs):
email = request.POST.get("email")
if email:
response = self.trusted_client.request_password_reset(email)
return redirect("/accounts/forgotpassword/?requested")
class ResetPasswordView(TemplateView, TradeRemediesAPIClientMixin):
def get(self, request, code, *args, **kwargs):
code_valid = self.trusted_client.validate_password_reset(code)
error_code = request.GET.get("error")
error_message = ALERT_MAP.get(error_code) if error_code else ""
if kwargs.get("error"):
error_message = f"{error_message}<br/>{kwargs['error']}"
return render(
request,
"registration/reset_password.html",
{
"invalid_code": not code_valid,
"code": code,
"error": error_message,
},
)
def post(self, request, code, *args, **kwargs):
password = <PASSWORD>.<PASSWORD>("password")
password_confirm = request.POST.get("password_confirm")
if password and password_confirm and password == password_confirm:
try:
response = self.trusted_client.reset_password(code, password)
except Exception as exc:
return self.get(request, code, error=str(exc), *args, **kwargs)
elif password != password_confirm:
return redirect(f"/accounts/password/reset/{code}/?error=pass_conf")
return redirect("/accounts/login/choice/?next=/dashboard/")
class TermsAndConditionsView(TemplateView):
def get(self, request, *args, **kwargs):
return render(request, "registration/terms_and_conditions.html", {})
class CookiePolicyView(TemplateView):
def get(self, request, *args, **kwargs):
return render(request, "registration/cookie_policy.html", {})
class CookieSettingsView(BaseRegisterView):
def get(self, request, *args, **kwargs):
redirect_url = request.GET.get("url") or ""
cookie_policy = {"accept_gi": "off"}
try:
cookie_policy = json.loads(request.COOKIES.get("cookie_policy"))
except Exception as exception:
print("Bad one", exception)
return render(
request,
"registration/cookies.html",
{
"cookie_policy": cookie_policy,
"redirect_url": redirect_url,
},
)
def post(self, request, *args, **kwargs):
accept_gi = request.POST.get("accept_gi")
redirect_url = request.POST.get("redirect_url") or "/dashboard/"
separator = "?" if redirect_url.find("?") == -1 else "#"
redirect_url = f"{redirect_url}{separator}cookie-policy-updated=1"
response = internal_redirect(redirect_url, "/dashboard/")
policy = json.dumps({"accept_gi": accept_gi})
if accept_gi != "on":
# delete ga cookies by regex
regex = r"^_g(a|i)"
for key, value in request.COOKIES.items():
if re.search(regex, key):
response.delete_cookie(key)
set_cookie(response, "cookie_policy", policy)
return response
class AccessibilityStatementView(TemplateView):
def get(self, request, *args, **kwargs):
return render(request, "registration/accessibility_statement.html", {})
class RegistrationCompletionView(BaseRegisterView, TradeRemediesAPIClientMixin):
"""
Complete a registration triggered by a user creation and invite
"""
def get(self, request, code, org_id, user_id=None, *args, **kwargs):
template_name = "registration/registration_completion.html"
context = {
"errors": kwargs.get("errors", {}),
"countries": countries,
}
try:
invite_validation = self.trusted_client.validate_user_invitation(code, org_id)
context.update(invite_validation)
except Exception:
context["errors"]["invalid_invite"] = "Invalid invitation details"
return render(request, template_name, context)
def post(self, request, code, org_id, user_id=None, *args, **kwargs):
errors = {}
invite_validation = {}
try:
invite_validation = self.trusted_client.validate_user_invitation(code, org_id)
except Exception as exc:
errors["invalid_invite"] = "Invalid invitation details"
invite = invite_validation["invite"]
params = {
"password": request.POST.get("password"),
"password_confirm": request.POST.get("password_confirm"),
"email": invite["meta"]["email"],
"country_code": request.POST.get("country"),
"phone": request.POST.get("phone"),
"name": request.POST.get("name"),
"terms": request.POST.get("terms"),
}
if errors:
return self.get(request, code, org_id, errors=errors)
response = None
try:
invited_by = TransientUser(token=invite["invited_by"])
client = self.client(invited_by)
response = client.complete_user_registration(invite["id"], org_id, params=params)
except Exception as exc: # TODO: Refactor this exc handling.
if hasattr(exc, "detail") and exc.detail and isinstance(exc.detail, dict):
errors = exc.detail["errors"]
return self.get(request, code, org_id, errors=errors)
return redirect("/email/verify/")
| StarcoderdataPython |
4928841 | <reponame>GabrielRojas74/Talleres-AyP<filename>Taller estructuras/punto#12.py<gh_stars>0
"""
entradad
nota_tarea1_mates-->int-->num
nota_tarea2_mates-->int-->ndm
nota_tarea3_mates-->int-->ntm
examen_mates-->float-->xm
nota_tarea1_fisik-->int-->nuf
nota_tarea2_fisik-->int-->ndf
examen_f-->float-->xf
nota_tarea1_q-->int-->nuq
nota_tarea2_q-->int-->ndq
nota_tarea3_q-->int-->ntq
examen_q-->float-->xq
salidas
promedio_mates-->str-->pm
promedio_fisik-->str-->pf
promedio_quimik-->str-->pq
promedio_total-->str-->pg
"""
num=int(input("digite la nota #1 de su tareas de matematicas "))
ndm=int(input("digite la nota #2 de su tareas de matematicas "))
ntm=int(input("digite la nota #3 de su tareas de matematicas "))
xm=int(input("digite la nota del examen de matematicas "))
nuf = int(input("digite la nota #1 de su tareas de fisica "))
ndf = int(input("digite la nota #2 de su tareas de fisica "))
xf = int(input("digite la nota del examen de fisica "))
nuq = int(input("digite la nota #1 de su tareas de quimica "))
ndq = int(input("digite la nota #2 de su tareas de quimica "))
ntq = int(input("digite la nota #3 de su tareas de quimica "))
xq = int(input("digite la nota del examen de quimica "))
pm=(((num+ndm+ntm)/3)*0.1)+(xm*0.9)
pf = (((nuf+ndf)/2)*0.2)+(xf*0.8)
pq = (((nuq+ndq+ntq)/3)*0.15)+(xq*0.85)
pg=(pm+pf+pq)/3
print("su promedio en matematicas es de "+str (pm))
print("su promedio en fisica es de "+str(pf))
print("su promedio en quimica es de "+str(pq))
print("su promedio en general en estas tres materias es de "+str(pg))
| StarcoderdataPython |
6512308 | <reponame>ankitmlive/sign-up-api
from django.dispatch import Signal
# New user has registered.
user_registered = Signal(providing_args=["user", "request"])
# User has activated his or her account.
user_activated = Signal(providing_args=["user", "request"]) | StarcoderdataPython |
12837790 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
('legislative', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ImportObjects',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('object_type', models.CharField(max_length=20, choices=[('jurisdiction', 'Jurisdiction'), ('person', 'Person'), ('organization', 'Organization'), ('post', 'Post'), ('membership', 'Membership'), ('bill', 'Bill'), ('vote_event', 'VoteEvent'), ('event', 'Event')])),
('insert_count', models.PositiveIntegerField()),
('update_count', models.PositiveIntegerField()),
('noop_count', models.PositiveIntegerField()),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RunPlan',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('success', models.BooleanField(default=True)),
('jurisdiction', models.ForeignKey(to='core.Jurisdiction')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='importobjects',
name='report',
field=models.ForeignKey(to='pupa.RunPlan'),
preserve_default=True,
),
migrations.CreateModel(
name='ScrapeObjects',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('object_type', models.CharField(max_length=20, choices=[('jurisdiction', 'Jurisdiction'), ('person', 'Person'), ('organization', 'Organization'), ('post', 'Post'), ('membership', 'Membership'), ('bill', 'Bill'), ('vote_event', 'VoteEvent'), ('event', 'Event')])),
('count', models.PositiveIntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ScrapeReport',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('scraper', models.CharField(max_length=300)),
('args', models.CharField(max_length=300)),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField()),
('plan', models.ForeignKey(to='pupa.RunPlan')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='scrapeobjects',
name='report',
field=models.ForeignKey(to='pupa.ScrapeReport'),
preserve_default=True,
),
]
| StarcoderdataPython |
1798828 | <filename>dnnv/_manage/linux/verifiers/mipverify.py
from __future__ import annotations
import subprocess as sp
from ..environment import (
Environment,
Dependency,
HeaderDependency,
LibraryDependency,
ProgramDependency,
Installer,
GNUInstaller,
GurobiInstaller,
)
from ...errors import InstallError, UninstallError
mipverify_runner = """#!/bin/bash
export GUROBI_HOME={gurobi_home}
cd {venv_path}
./julia --project=. $@
"""
class MIPVerifyInstaller(Installer):
def run(self, env: Environment, dependency: Dependency):
commit_hash = "36fd890"
cache_dir = env.cache_dir / f"mipverify-{commit_hash}"
cache_dir.mkdir(exist_ok=True, parents=True)
verifier_venv_path = env.env_dir / "verifier_virtualenvs" / "mipverify"
verifier_venv_path.parent.mkdir(exist_ok=True, parents=True)
installation_path = env.env_dir / "bin"
installation_path.mkdir(exist_ok=True, parents=True)
libjulia_path = LibraryDependency("libjulia").get_path(env)
assert libjulia_path is not None
julia_dir = libjulia_path.parent.parent
envvars = env.vars()
commands = [
"set -ex",
f"cd {verifier_venv_path.parent}",
"rm -rf mipverify",
"mkdir mipverify",
"cd mipverify",
f"cp -r {julia_dir} .",
f"ln -s {julia_dir}/bin/julia julia",
'./julia --project=. -e \'using Pkg; Pkg.add("Gurobi"); Pkg.build("Gurobi")\'',
'./julia --project=. -e \'using Pkg; Pkg.add("MathOptInterface"); Pkg.build("MathOptInterface")\'',
'./julia --project=. -e \'using Pkg; Pkg.add("JuMP"); Pkg.build("JuMP")\'',
'./julia --project=. -e \'using Pkg; Pkg.add("MAT"); Pkg.build("MAT")\'',
f'./julia --project=. -e \'using Pkg; Pkg.add(PackageSpec(url="https://github.com/vtjeng/MIPVerify.jl", rev="{commit_hash}"))\'',
"./julia --project=. -e 'using Pkg; Pkg.update(); Pkg.precompile()'",
]
install_script = "; ".join(commands)
proc = sp.run(install_script, shell=True, env=envvars)
if proc.returncode != 0:
raise InstallError(f"Installation of MIPVerify failed")
with open(installation_path / "mipverify", "w+") as f:
f.write(
mipverify_runner.format(
venv_path=verifier_venv_path,
gurobi_home=envvars.get("GUROBI_HOME", "."),
)
)
(installation_path / "mipverify").chmod(0o700)
class JuliaInstaller(Installer):
def run(self, env: Environment, dependency: Dependency):
version = "1.6.1"
major_minor = ".".join(version.split(".")[:2])
cache_dir = env.cache_dir / f"julia-{version}"
cache_dir.mkdir(exist_ok=True, parents=True)
env.paths.append(cache_dir / f"julia-{version}" / "bin")
env.include_paths.append(cache_dir / f"julia-{version}" / "include")
env.ld_library_paths.append(cache_dir / f"julia-{version}" / "lib")
if dependency.is_installed(env):
return
commands = [
"set -ex",
f"cd {cache_dir}",
f"curl -o julia-{version}.tar.gz -L https://julialang-s3.julialang.org/bin/linux/x64/{major_minor}/julia-{version}-linux-x86_64.tar.gz",
f"tar xf julia-{version}.tar.gz",
]
install_script = "; ".join(commands)
proc = sp.run(install_script, shell=True, env=env.vars())
if proc.returncode != 0:
raise InstallError(f"Installation of julia failed")
def install(env: Environment):
zlib_installer = GNUInstaller(
"zlib", "1.2.11", "https://www.zlib.net/zlib-1.2.11.tar.xz"
)
gurobi_installer = GurobiInstaller("9.1.2")
env.ensure_dependencies(
ProgramDependency(
"mipverify",
installer=MIPVerifyInstaller(),
dependencies=(
ProgramDependency("julia", installer=JuliaInstaller()),
ProgramDependency("git"),
HeaderDependency("zlib.h", installer=zlib_installer),
LibraryDependency("libz", installer=zlib_installer),
HeaderDependency("gurobi_c.h", installer=gurobi_installer),
LibraryDependency("libgurobi91", installer=gurobi_installer),
ProgramDependency("grbgetkey", installer=gurobi_installer),
),
)
)
def uninstall(env: Environment):
exe_path = env.env_dir / "bin" / "mipverify"
verifier_venv_path = env.env_dir / "verifier_virtualenvs" / "mipverify"
commands = [
f"rm -f {exe_path}",
f"rm -rf {verifier_venv_path}",
]
install_script = "; ".join(commands)
proc = sp.run(install_script, shell=True, env=env.vars())
if proc.returncode != 0:
raise UninstallError("Uninstallation of planet failed")
__all__ = ["install", "uninstall"]
| StarcoderdataPython |
8124978 | <reponame>rafiberlin/clp-sose21-pm-vision
"""
Avatar action routines
"""
from avatar_sgg.config.util import get_config
from avatar_sgg.dataset.ade20k import get_preprocessed_image_graphs_for_map_world
from avatar_sgg.game_avatar_abstract import Avatar
from avatar_sgg.image_retrieval.scene_graph_similarity_model import TextGraphVectorizer, SGEncode, get_scene_graph_encoder
import random
import os
from avatar_sgg.image_retrieval.evaluation import calculate_normalized_cosine_similarity_on_tensor
import torch
class GraphAvatar(Avatar):
"""
The Baseline Avatar, using captioning models and BertSentences for Image Retrieval
"""
def __init__(self, image_directory):
config = get_config()
if image_directory is None:
image_directory = os.path.join(config["ade20k"]["root_dir"], "images", "training")
config = config["game_setup"]
self.debug = config["debug"]
config = config["avatar"]
self.max_number_of_interaction = config["max_number_of_interaction"]
self._print(f"The avatar will allow only {self.max_number_of_interaction} interactions with the human player.")
self.image_directory = image_directory
self.map_world_preprocessed_image_graphs = get_preprocessed_image_graphs_for_map_world()
self.current_image_graphs = None
self.text_graph_encoder: TextGraphVectorizer = TextGraphVectorizer()
self.img_graph_encoder: SGEncode = get_scene_graph_encoder()
self._print(f"Avatar using Graphs for similarity.")
self.similarity_threshold = config["similarity_threshold"]
self.minimum_similarity_threshold = config["minimum_similarity_threshold"]
self.aggregate_interaction = config["aggregate_interaction"]
self._print(f"Threshold for similarity based retrieval: {self.similarity_threshold}")
self._print(f"Aggregate Interaction: {self.aggregate_interaction}")
self.number_of_interaction = None
self.observation = None
self.map_nodes = None
self.generated_captions = None
self.map_nodes_real_path = None
self.vectorized_captions = None
self.vectorized_interactions = None
self.current_candidate_similarity = None
self.current_candidate_ranks = None
self.reset()
def reset(self):
"""
Reset important attributes for the avatar.
:return:
"""
self.number_of_interaction = 0
self.observation = None
self.map_nodes = None
self.map_nodes_real_path = {}
self.vectorized_captions = None
self.vectorized_interactions = []
self.current_candidate_similarity = 0.0
self.current_candidate_ranks = None
self.interactions = []
self.room_found = False
self.current_image_graphs = None
def is_interaction_allowed(self):
"""
check if the avatar is still allowed to process messages.
:return:
"""
if self.room_found:
return False
return (self.number_of_interaction < self.max_number_of_interaction)
def get_prediction(self):
"""
Should return the room identified by the avatar
:return:
"""
prediction = None
if (self.current_candidate_ranks is not None) and (
self.current_candidate_similarity > self.minimum_similarity_threshold):
prediction = self.map_nodes[self.current_candidate_ranks]
# choice = random.choice(list(self.map_nodes.items()))
return prediction
def __increment_number_of_interaction(self):
self.number_of_interaction += 1
def set_map_nodes(self, map_nodes: dict):
"""
Only called once, when the labyrinth is initialized.
example of entry in map_nodes:
0: 'w/waiting_room/ADE_train_00019652.jpg'
:param map_nodes:
:return:
"""
# As dictionary is sent with socket io, the int keys were converted into string.
self.map_nodes = {int(k): map_nodes[k] for k in map_nodes.keys()}
self.__load_image_graphs()
def __load_image_graphs(self):
# adding a '/' is a work around, as I preprocessed all the image graphs with the starting '/' unfortunately...
self.current_image_graphs = {item: self.map_world_preprocessed_image_graphs['/'+item] for k, item in self.map_nodes.items()}
def step(self, observation: dict) -> dict:
if self.debug:
print(observation)
actions = dict()
if observation["image"]:
self.__update_observation(observation)
if observation["message"]:
self.__update_actions(actions, observation["message"])
return actions
def __update_observation(self, observation: dict):
self.observation = observation
def __update_actions(self, actions, message):
# if "go" in message.lower():
# actions["move"] = self.__predict_move_action(message)
# else:
actions["response"] = self.__generate_response(message)
def __set_room_found(self):
if self.current_candidate_similarity >= self.similarity_threshold:
self.room_found = True
def __generate_response(self, message: str) -> str:
message = message.lower()
self.interactions.append(message)
text_graphs = self.text_graph_encoder.vectorize(self.interactions)
if text_graphs is not None:
self.__increment_number_of_interaction()
for k in self.current_image_graphs.keys():
self.current_image_graphs[k].update(text_graphs)
self.img_graph_encoder.eval()
test_results = []
with torch.no_grad():
for k, graph_dict in self.current_image_graphs.items():
res = self.img_graph_encoder(graph_dict)
test_results.append(res)
stacked_vectors = torch.stack(test_results)
similarity = calculate_normalized_cosine_similarity_on_tensor(stacked_vectors)
values, ranks = torch.topk(similarity, 1, dim=0)
values = float(values[0][0].to("cpu").numpy())
ranks = int(ranks[0][0].to("cpu").numpy())
if values > self.current_candidate_similarity:
self.current_candidate_similarity = values
self.current_candidate_ranks = ranks
self.__set_room_found()
found_msg = ""
if self.room_found:
found_msg = " I believe I found the room based on your description."
return f"You interacted {self.number_of_interaction} times with me.{found_msg}"
else:
last_idx = len(self.interactions) - 1
self.interactions.pop(last_idx)
return "Sorry, could you try to describe the scene more precisely? (I can't infer a useful text graph.)"
def __predict_move_action(self, message: str) -> str:
if "north" in message:
return "n"
if "east" in message:
return "e"
if "west" in message:
return "w"
if "south" in message:
return "s"
return "nowhere"
| StarcoderdataPython |
1758357 | import os
import tensorflow as tf
from meta import Meta
from evaluator import Evaluator
from train import read_h5py_file
tf.app.flags.DEFINE_string('data_dir_test', './data', 'Directory to read TFRecords files')
tf.app.flags.DEFINE_string('checkpoint_dir', './logs/train', 'Directory to read checkpoint files')
tf.app.flags.DEFINE_string('eval_logdir', './logs/train/eval', 'Directory to write evaluation logs')
FLAGS = tf.app.flags.FLAGS
def _eval(path_to_checkpoint_dir, image_test, length_test,digits_test,path_to_test_eval_log_dir):
evaluator = Evaluator(path_to_test_eval_log_dir)
checkpoint_paths = tf.train.get_checkpoint_state(path_to_checkpoint_dir).all_model_checkpoint_paths
for global_step, path_to_checkpoint in [(path.split('-')[-1], path) for path in checkpoint_paths]:
print(global_step)
try:
global_step_val = int(global_step)
except ValueError:
continue
accuracy = evaluator.evaluate(path_to_checkpoint, image_test, length_test,digits_test,global_step_val)
print ('Evaluate %s on Test Set, accuracy = %f' % (path_to_checkpoint, accuracy))
def main(_):
path_to_test_h5py_file = os.path.join(FLAGS.data_dir_test, 'test_set.h5')
path_to_checkpoint_dir = FLAGS.checkpoint_dir
path_to_test_eval_log_dir = os.path.join(FLAGS.eval_logdir, 'test')
image_test,length_test,digits_test=read_h5py_file(path_to_test_h5py_file,Flag=0)
_eval(path_to_checkpoint_dir, image_test,length_test,digits_test,path_to_test_eval_log_dir)
if __name__ == '__main__':
tf.app.run(main=main)
| StarcoderdataPython |
5033716 | import tempfile
import os
import subprocess
from flask import request, make_response, jsonify
from Classes.Services.RDF_Service import TripleStore_Service
class Service:
def __init__(self, request_data, id):
self.request = request_data # request sent to the api
self.id = id # id of TD and graph
self.uploaded_file = None # uploaded file
self.file_type = None # type of the file
self.type_name = None # type/name
self.graph = None # rdf graph generated from file provided or rdf file provided by the request
self.triple_store_credentials = {'username': 'dba', 'password': '<PASSWORD>'} # credentials for triple store
self.graph_generator = None # graph generator
self.response = {} # response sent to the client
self.sparql_query = None # sparql query for get statement
def get(self):
"""
Get statement
"""
self.request.get_data()
self.response = {'status': 'success'} # TODO: create get method
def post(self, graph_generator):
"""
Post file, generate graph and TDs and store them inside their respectives databases
"""
if "file" not in request.files:
return make_response(jsonify({"response": "No file part"}))
self.file_type = self.request.args.get('file_type','') # retrieve file type
self.uploaded_file = self.request.files['file'].read() # retrieve uploaded file
self.graph_generator = graph_generator # execute graph generator class and generate graph
self.generate_graph = True # change generate_graph to True because in this step we need to generate the graph
self.temporal_file_generation_process() # generate graph and store it in triple store
# generate TDs using sparql query retrieving all elements of the graph
return self.response
def post_enrichment(self, enricher):
"""
Enrich TDs with images or extra files
"""
# call enricher class
pass
def put(self):
"""
Insert graph given as argument in body of the request in triple store and generate TDs
"""
if "rdf" not in request.files:
return make_response(jsonify({"response": "No rdf file part"})) # Change to array with serialization that we need ttl, n3 and xml -> if not provide error
self.uploaded_file = self.request.files['rdf'].read()
self.generate_graph = False
triple_store_service = TripleStore_Service()
triple_store_service.store_rdf(self.uploaded_file, self.id)
query = ""
triple_store_service.query_graph(query)
#self.response = self.insert_graph(temp.name, self.id, self.triple_store_credentials)
#self.temporal_file_generation_process()
# Query SPARQL -> Triple Store
# Generate TDs
return self.response
def delete(self):
"""
Delete statement
"""
self.response = {'status': 'success'} # TODO: create delete method
def insert_graph(self,file_name):
"""
Insert graph in triple store
"""
proc = subprocess.Popen(["curl",
"--digest",
"--verbose" ,
"--user",
self.triple_store_credentials['username'] + ":" + self.triple_store_credentials['password'],
"--url",
'http://virtuoso_db:8890/sparql-graph-crud-auth?graph-uri=http://virtuoso_db:8890/' + self.id,
"-T",
file_name +".ttl"],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
## https://stackoverflow.com/questions/50376348/http-request-authentication-for-sparql-insert-in-virtuoso-endpoint
if os.path.exists(file_name +'.ttl'):
os.remove(file_name +".ttl")
def temporal_file_generation_process(self):
"""
Create temporal file, generate graph and store it in triple store
"""
temp = tempfile.NamedTemporaryFile()
try:
temp.write(self.uploaded_file)
temp.seek(0)
self.graph_generator.generate_graph(self.file_type, temp.name) # remove after graph file path to not make trash files
self.response = self.insert_graph(self.graph_generator.graph_path, self.id, self.triple_store_credentials)
finally:
temp.close() | StarcoderdataPython |
5035872 | #!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
import argparse
import sys
from pgsanity import sqlprep
from pgsanity import ecpg
def get_config(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description='Check syntax of SQL for PostgreSQL')
parser.add_argument('--add-semicolon', action='store_true')
parser.add_argument('files', nargs='*', default=None)
return parser.parse_args(argv)
def check_file(filename=None, show_filename=False, add_semicolon=False):
"""
Check whether an input file is valid PostgreSQL. If no filename is
passed, STDIN is checked.
Returns a status code: 0 if the input is valid, 1 if invalid.
"""
# either work with sys.stdin or open the file
if filename is not None:
with open(filename, "r") as filelike:
sql_string = filelike.read()
else:
with sys.stdin as filelike:
sql_string = sys.stdin.read()
success, msg = check_string(sql_string, add_semicolon=add_semicolon)
# report results
result = 0
if not success:
# possibly show the filename with the error message
prefix = ""
if show_filename and filename is not None:
prefix = filename + ": "
print(prefix + msg)
result = 1
return result
def check_string(sql_string, add_semicolon=False):
"""
Check whether a string is valid PostgreSQL. Returns a boolean
indicating validity and a message from ecpg, which will be an
empty string if the input was valid, or a description of the
problem otherwise.
"""
prepped_sql = sqlprep.prepare_sql(sql_string, add_semicolon=add_semicolon)
success, msg = ecpg.check_syntax(prepped_sql)
return success, msg
def check_files(files, add_semicolon=False):
if files is None or len(files) == 0:
return check_file(add_semicolon=add_semicolon)
else:
# show filenames if > 1 file was passed as a parameter
show_filenames = (len(files) > 1)
accumulator = 0
for filename in files:
accumulator |= check_file(filename, show_filenames, add_semicolon=add_semicolon)
return accumulator
def main():
config = get_config()
return check_files(config.files, add_semicolon=config.add_semicolon)
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| StarcoderdataPython |
12864984 | # -*- coding: utf-8 -*-
'''
Docer
~~~~~~
A document viewing platform.
:copyright: (c) 2015 by Docer.Org.
:license: MIT, see LICENSE for more details.
'''
from flask import Flask
from flask.ext.mongoengine import MongoEngine
from app.frontend import frontend as frontend_blueprint
from app.backend import backend as backend_blueprint
app = Flask(__name__)
app.config.from_object('config.DevelopmentConfig')
# mongodb
mongo = MongoEngine(app)
def create_app():
"""Create a flask app with a config."""
app.register_blueprint(frontend_blueprint)
app.register_blueprint(backend_blueprint, name = 'admin', url_prefix = '/admin')
return app | StarcoderdataPython |
219993 | <reponame>kfortin/buttchan
import discord, time, json, sys, shlex, asyncio
from hashlib import sha256
from random import choice, randrange
from math import floor
def stringtime(secs):
amts = {1: "second", 60: "minute", 3600: "hour"}
times = []
for i in sorted(amts)[::-1]:
amt = floor(secs / i)
secs -= amt * i
if not amt: continue
times.append("{} {}{}".format(amt, amts[i], "s" if amt != 1 else ""))
if times:
if len(times) > 1:
last = times.pop()
return "{} and {}".format(", ".join(times), last)
else:
return times[0]
else:
return "no time"
def intervals():
n = 0
while True:
ints = [60*1, 60*2, 60*5, 60*10, 60*30]
if n < 5:
yield ints[n]
else:
yield 3600 * (n - 4)
n += 1
class JsonStorage():
def __init__(self, filename):
self.filename = filename
def read(self):
try:
f = open(self.filename)
data = json.load(f)
return data
except:
print("Couldn't read save data.")
finally:
f.close()
def write(self, data):
f = open(self.filename, "w")
json.dump(data, f)
f.close()
class ChallengePool():
def __init__(self, amt=5, filename="pool.dat"):
self.amt = 5
self.store = JsonStorage(filename)
self.challenges = {}
self.votes = {}
try:
self.load()
except:
print("Failed to load challenge pool on init")
self.fill()
def generate(self):
keys = "A Bb B C C# D Eb E F F# G Ab".split()
scales = "minor major".split()
genres = "70s music|80s music|90s music|vaporwave|hip hop|future bass|dubstep|trap|neuro|jazz|future funk|chiptune|cinematic|minimal|chill".split("|")
challenge = "{} in {} {} at {}bpm".format(choice(genres), choice(keys), choice(scales), randrange(60, 220, 2))
chalk = sha256(challenge.encode()).hexdigest()[:4]
if chalk in self.challenges:
self.generate()
else:
self.challenges[chalk] = challenge
def fill(self):
amt = self.amt - len(self.challenges)
if amt < 0:
self.challenges = dict((i, self.challenges[i]) for i in list(self.challenges)[:5])
return
for i in range(amt):
self.generate()
self.votes = dict((i, self.votes[i]) for i in self.votes if self.votes[i] in self.challenges)
self.save()
def save(self):
self.store.write(self.challenges)
def load(self):
self.challenges = self.store.read()
def vote(self, challenge, user):
if challenge in self.challenges:
self.votes[user.id] = challenge
else:
raise
def __str__(self):
agnv = list(self.votes[i] for i in self.votes)
msg = ["challenge ideas:", ""]
msg += list("**{}** {} *({} votes)*".format(i, self.challenges[i], agnv.count(i)) for i in self.challenges)
return "\n".join(msg)
class Buttchan():
def __init__(self, interface, loop = asyncio.get_event_loop(), filename = "botchan.dat"):
self.interface = interface
self.sessid = sha256(str(time.time()).encode()).hexdigest()[:16]
self.last_id = 0
self.loop = loop
self.pool = ChallengePool()
self.running = {}
self.challenges = {}
self.storage = JsonStorage(filename)
try: self.load()
except: print("Failed to load Buttchan's stored data on init")
print("Buttchan with id {} made".format(self.sessid))
def load(self):
self.last_id, data, interfacedata = self.storage.read()
self.interface.buttload(interfacedata)
for i in data:
self.challenges[i] = Challenge(self, 0, "")
self.challenges[i].load(data[i])
def save(self):
challenges = {}
for i in self.challenges:
challenges[i] = self.challenges[i].save()
data = [self.last_id, challenges, self.interface.buttsave()]
self.storage.write(data)
def challenge(self, *challenges):
chid = []
for i in challenges:
self.challenges[str(self.last_id)] = Challenge(self, str(self.last_id), *i)
print("Created challenge #{}.".format(self.last_id))
chid.append(str(self.last_id))
self.last_id += 1
self.save()
return chid
class Challenge():
def __init__(self, butt, id, desc, duration=3600, grace=600, vote=600, start=time.time()):
self.butt = butt
self.id = id
self.running = False
self.winner = butt.interface.user
self.desc = desc
self.duration = duration
self.stages = [duration, grace, vote]
self.stagenames = ["challenge", "upload", "voting"]
self.stage = 0
self.start = start
self.getintervals()
self.jointimes = {}
self.entries = {}
self.participants = []
self.votes = {}
def load(self, loadable):
self.running = loadable["running"]
if "winner" in loadable:
self.winner = self.butt.interface.buttchannel.server.get_member(loadable["winner"])
else:
self.winner = self.butt.interface.user
self.desc = loadable["desc"]
self.duration = loadable["duration"]
self.stages = [self.duration, loadable["grace"], loadable["vote"]]
self.stage = loadable["stage"]
self.start = loadable["start"]
self.entries = loadable["entries"]
self.votes = loadable["votes"]
self.jointimes = loadable["jointimes"]
self.participants = list(self.butt.interface.get_member(i) for i in loadable["participants"])
def save(self):
loadable = {}
loadable["running"] = self.running
try:
loadable["winner"] = self.winner.id
except:
pass
loadable["desc"] = self.desc
loadable["duration"] = self.duration
loadable["grace"] = self.stages[1]
loadable["vote"] = self.stages[2]
loadable["stage"] = self.stage
loadable["start"] = self.start
loadable["jointimes"] = self.jointimes
loadable["entries"] = self.entries
loadable["votes"] = self.votes
loadable["participants"] = list(i.id for i in self.participants)
return loadable
def getintervals(self):
self.intervals = [0]
for i in intervals():
if i >= self.duration:
break
self.intervals.append(i)
self.intervals.append(self.duration)
def __str__(self):
info = ["**Challenge #{}** *({}".format(self.id, [
"not running",
"running",
"upload stage",
"voting stage",
"won by " + self.winner.name if self.winner else "nobody"
][self.running + self.stage])]
if 0 <= self.stage <3 and self.running:
info[-1] += ", {} remaining)*".format(stringtime(self.start + self.duration - time.time()))
else:
info[-1] += "*)"
info.append(self.desc)
if not self.running: return "\n".join(info)
info.append("")
if self.stage == 0:
if self.participants:
info.append("Participants: " + ", ".join(i.name for i in self.participants))
else:
info.append("Participants: *None yet*")
else:
for i in self.participants:
if i.id in self.entries:
info.append("{}: {}".format(i.name, self.entries[i.id]))
if self.stage > 1:
info[-1] += " *({} votes)*".format(
list(self.votes[i] for i in self.votes).count(i.id)
)
else:
info.append("{}: *No entry*".format(i.name))
if self.stage == 3:
info.append("")
info.append("**Won by {}!**".format(self.winner.name))
return "\n".join(info)
async def run(self):
self.running = True
self.start = time.time()
await self.butt.interface.send_message(self.butt.interface.buttchannel,
"Starting challenge #{}".format(self.id))
self.butt.save()
while self.stage <3:
elapsed = time.time() - self.start
remaining = self.duration - elapsed
wait = remaining - self.intervals[-1]
await asyncio.sleep(wait)
remaining = self.intervals.pop()
if remaining:
await self.butt.interface.send_message(self.butt.interface.buttchannel,
"**Challenge #{}** *({} stage)* ~ {} remaining.".format(
self.id, self.stagenames[self.stage], stringtime(remaining))
)
if not self.intervals:
self.stage += 1
if self.stage == 3:
agnv = list(self.votes[i] for i in self.votes)
winner_id = sorted(self.participants, key=lambda x:agnv.count(x))[-1]
self.winner = self.butt.interface.buttchannel.server.get_member(winner_id)
await self.butt.interface.send_message(self.butt.interface.buttchannel,
"Challenge #{} is over! {} wins!".format(self.id, self.winner.mention))
continue
self.duration = self.stages[self.stage]
self.start = time.time()
self.getintervals()
self.butt.save()
class ButtDiscord(discord.Client):
def __init__(self):
super().__init__()
self.buttchan = None
self.buttchannel = None
def buttsave(self):
return self.buttchannel.id
def buttload(self, data):
self.buttchannel = self.get_channel(data)
async def on_ready(self):
self.buttchan = Buttchan(self)
async def on_message(self, message):
if message.content.startswith("%"):
args = shlex.split(message.content[1:])
command = args.pop(0).lower()
else: return
if not self.buttchannel:
if command == "id":
await self.send_message(message.channel, self.buttchan.sessid)
if command == "here" and args == [self.buttchan.sessid]:
self.buttchannel = message.channel
print("Buttchan {} is now in channel {}".format(self.buttchan.sessid, message.channel.id))
await self.send_message(message.channel, choice(sass[0]))
return
if command == "help":
msg = ["{} {}my id: `{}`".format(message.author.mention, choice(sass[1]), self.buttchan.sessid)]
msg += ["",
"`%help` display this message",
"`%challenge <id>` display challenge information",
"`%imin <id>` enter a challenge",
"`%submit <id>` submit your entry",
"`%vote <id> <@user>` vote for a user's entry",
"`%pool` view the challenges pool",
"`%poolvote <id>` vote for a challenge in the pool",
"`%makechallenge [id] [duration]` create a challenge from the pool *(mods only)*",
"`%start <id>` start a challenge *(mods only)*",
"`%replace <pool #>` replace a challenge in the pool *(mods only)*",
"`%delete <id>` delete a challenge *(mods only)*"]
await self.send_message(message.channel, "\n".join(msg))
if command == "challenge":
if args:
if args[0] in self.buttchan.challenges:
msg = ["{} {}".format(message.author.mention, str(self.buttchan.challenges[args[0]]))]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[3]))]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[2])), ""]
msg += list(str(self.buttchan.challenges[i]).split("\n")[0] for i in self.buttchan.challenges)
if not self.buttchan.challenges:
msg += ["i don't have any challenges at the moment..."]
await self.send_message(message.channel, "\n".join(msg))
if command == "imin":
if args:
if args[0] in self.buttchan.challenges:
challenge = self.buttchan.challenges[args[0]]
if message.author in challenge.participants:
msg = ["{} {}".format(message.author.mention, choice(sass[5]))]
else:
if challenge.stage == 0:
challenge.participants.append(message.author)
challenge.jointimes[message.author.id] = time.time()
msg = ["{} {}".format(message.author.mention, choice(sass[7]))]
if challenge.running:
msg[-1] += " ~ you have {} tho".format(stringtime(challenge.duration + challenge.start - time.time()))
else:
msg = ["{} {}".format(message.author.mention, choice(sass[8]))]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[4]))]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[6]))]
await self.send_message(message.channel, "\n".join(msg))
if command == "submit":
if args:
if args[0] in self.buttchan.challenges:
challenge = self.buttchan.challenges[args[0]]
if challenge.stage == 1:
challenge.entries[message.author.id] = args[1]
msg = ["{} your entry has been counted <3".format(message.author.mention)]
else:
msg = ["{} you can only submit during the upload stage".format(message.author.mention)]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[3]))]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[6]))]
await self.send_message(message.channel, "\n".join(msg))
self.buttchan.save()
if command == "vote":
if args:
if args[0] in self.buttchan.challenges:
challenge = self.buttchan.challenges[args[0]]
vote_id = args[1][2:-1]
if vote_id in challenge.participants:
if challenge.stage == 2:
challenge.votes[message.author.id] = vote_id
msg = ["{} your vote has been counted <3".format(message.author.mention)]
else:
msg = ["{} you can only vote during the voting stage".format(message.author.mention)]
else:
msg = ["{} that person doesn't exist it seems".format(message.author.mention)]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[3]))]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[6]))]
await self.send_message(message.channel, "\n".join(msg))
self.buttchan.save()
if command == "start":
if not self.admincheck(message.author):
msg = ["{} {}".format(message.author.mention, choice(sass[10]))]
elif args:
if args[0] in self.buttchan.challenges:
challenge = self.buttchan.challenges[args[0]]
self.buttchan.running[args[0]] = self.buttchan.loop.create_task(challenge.run())
else:
msg = ["{} {}".format(message.author.mention, choice(sass[3]))]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[6]))]
try:
await self.send_message(message.channel, "\n".join(msg))
except: pass
self.buttchan.save()
if command == "replace":
if not self.admincheck(message.author):
msg = ["{} {}".format(message.author.mention, choice(sass[10]))]
elif args:
if args[0] in self.buttchan.pool.challenges:
del self.buttchan.pool.challenges[args[0]]
self.buttchan.pool.fill()
msg = ["{} made a new thingy for ya".format(message.author.mention)]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[3]))]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[6]))]
try:
await self.send_message(message.channel, "\n".join(msg))
except: pass
if command == "delete":
if not self.admincheck(message.author):
msg = ["{} {}".format(message.author.mention, choice(sass[10]))]
elif args:
if args[0] in self.buttchan.challenges:
try:
self.buttchan.running[args[0]].stop()
del self.buttchan.running[args[0]]
except: pass
del self.buttchan.challenges[args[0]]
msg = ["{} rip that challenge".format(message.author.mention)]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[3]))]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[6]))]
try:
await self.send_message(message.channel, "\n".join(msg))
except: pass
self.buttchan.save()
if command == "pool":
await self.send_message(message.channel, str(self.buttchan.pool))
if command == "poolvote":
if args:
try:
self.buttchan.pool.vote(args[0], message.author)
msg = ["{} your vote has been counted <3".format(message.author.mention)]
except:
msg = ["{} {}".format(message.author.mention, choice(sass[3]))]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[6]))]
await self.send_message(message.channel, "\n".join(msg))
self.buttchan.save()
if command == "makechallenge":
if not self.admincheck(message.author):
msg = ["{} {}".format(message.author.mention, choice(sass[10]))]
await self.send_message(message.channel, "\n".join(msg))
return
agnv = list(self.buttchan.pool.votes[i] for i in self.buttchan.pool.votes)
highest_challenge = sorted(self.buttchan.pool.challenges, key=lambda x:agnv.count(x))[-1]
highest_challenge = self.buttchan.pool.challenges[highest_challenge]
if args:
if args[0] in self.buttchan.pool.challenges:
challenge = self.buttchan.pool.challenges[args[0]]
if len(args) > 1:
try:
chid = self.buttchan.challenge([challenge, float(args[1])])
msg = ["{} made challenge *{}*".format(message.author.mention, chid[0])]
except:
msg = ["{} that aint a number ;~;".format(message.author.mention)]
else:
chid = self.buttchan.challenge([challenge])
msg = ["{} made challenge *{}*".format(message.author.mention, chid[0])]
else:
msg = ["{} {}".format(message.author.mention, choice(sass[3]))]
else:
chid = self.buttchan.challenge([highest_challenge])
msg = ["{} made challenge *{}*".format(message.author.mention, chid[0])]
await self.send_message(message.channel, "\n".join(msg))
self.buttchan.save()
def admincheck(self, user):
return self.buttchannel.permissions_for(user).kick_members
sass = [
"hai ;3|oh hello|bbs :D|sup hoes".split("|"),
"you fuckin know who i am~ |isn't my name obvious enough...|i'm a buttchan! ".split("|"),
"here's a list of timewasters for you:|these challenges are lit|i'll challenge you~|*siiiigh*".split("|"),
"there's no challenge like that|check your spelling pls bb|woah hold up, dunno what that is lol|bruh...noooo".split("|"),
"you can't join a nonexistent challenge|why don't you *make* that challenge first...|sorry eh, that one doesn't exist, can't put you in it".split("|"),
"you're already in that sweetie|you're in that one...|you can't enter twice you goon".split("|"),
"you gotta tell me what challenge you're talking about|woah woah i need some id|what challenge tho".split("|"),
"added you <3|you're innnn|gl :D|have fuuuun bb :D you're in".split("|"),
"that challenge isn't in a joinable stage|that one's like...over already...|*i cannot turn back time*".split("|"),
"chek ur fuggin privlig|you ain't no staff member last i checked|get somebody who knows what they're doing to do that".split("|")]
if __name__ == "__main__":
interface = ButtDiscord()
interface.run(*sys.argv[1:])
| StarcoderdataPython |
5157996 | from __future__ import unicode_literals
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.contrib import auth
class UserCreationFormNew(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Password',
'required': 'required'
}))
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput(attrs={
'class': 'form-control',
'required': 'required',
'placeholder': 'Confirm password'
}),
help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_password2(self):
password1 = self.cleaned_data.get("<PASSWORD>")
password2 = self.cleaned_data.get("<PASSWORD>")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
user = super(UserCreationFormNew, self).save(commit=False)
user.set_password(self.cleaned_data["<PASSWORD>"])
if commit:
user.save()
return user
class UserAuthenticationForm(forms.ModelForm):
class Meta:
model = User
fields = ()
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.CharField(label=_("Username"),
widget=forms.TextInput(attrs={
'class': 'form-control',
'required': 'required',
'name': 'username',
'placeholder': "Username"
}))
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput(attrs={
'class': 'form-control',
'required': 'required',
'placeholder': 'Password',
'name': 'password'
}))
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = auth.authenticate(username=username, password=password)
if not user or not user.is_active:
raise forms.ValidationError("Sorry, that login was invalid. Please try again.")
return self.cleaned_data
| StarcoderdataPython |
3432304 | <reponame>chandrakant100/Python-Project<filename>Challenges-5/over9000.py
# Challenge 7 : Create a function named over_nine_thousand() that takes a list of numbers named lst as a parameter.
# The function should sum the elements of the list until the sum is greater than 9000.
# When this happens, the function should return the sum.
# If the sum of all of the elements is never greater than 9000, the function should return total sum of all the elements.
# If the list is empty, the function should return 0.
# Date : Sun 07 Jun 2020 08:09:34 AM IST
def over_nine_thousand(lst):
sum = 0
if lst == []:
return sum
for i in lst:
sum += i
if sum > 9000:
break
return sum
print(over_nine_thousand([8000, 900, 120, 5000]))
| StarcoderdataPython |
5029959 | from toee import *
from utilities import *
from InventoryRespawn import *
from py00439script_daemon import record_time_stamp, get_v, set_v, tsc, within_rect_by_corners
from combat_standard_routines import *
def san_dialog( attachee, triggerer ):
if (attachee.map == 5074):
# Found them in the wilderness
attachee.turn_towards(triggerer)
triggerer.begin_dialog( attachee, 820)
return SKIP_DEFAULT
if ((game.global_flags[835] == 1 or game.global_flags[837] == 1) and game.global_flags[37] == 0 and attachee.has_met(triggerer) and not game.quests[16].state == qs_completed and not game.quests[15].state == qs_completed and game.global_flags[843] == 0):
# Lareth Prisoner subplot node (enables to ask about master)
triggerer.begin_dialog( attachee, 2500 )
if (game.quests[16].state == qs_completed and game.quests[15].state == qs_completed):
# Ratted Traders to Burne
triggerer.begin_dialog( attachee, 20 )
elif (game.global_flags[41] == 1 or game.quests[16].state == qs_completed or game.quests[64].state == qs_completed):
triggerer.begin_dialog( attachee, 290 )
elif (game.quests[17].state == qs_completed):
triggerer.begin_dialog( attachee, 30 )
elif (game.global_flags[31] == 1 or game.quests[64].state == qs_botched):
triggerer.begin_dialog( attachee, 50 )
elif (attachee.has_met(triggerer)):
triggerer.begin_dialog( attachee, 70 )
else:
triggerer.begin_dialog( attachee, 1 )
return SKIP_DEFAULT
def san_first_heartbeat( attachee, triggerer ):
if (attachee.map == 5010):
if (game.global_vars[501] == 4 or game.global_vars[501] == 5 or game.global_vars[501] == 6 or game.global_vars[510] == 2):
attachee.object_flag_set(OF_OFF)
elif (game.global_vars[750] == 0):
attachee.object_flag_unset(OF_OFF)
if (game.global_flags[907] == 0):
game.timevent_add(respawn, (attachee), 86400000 ) #86400000ms is 24 hours
game.global_flags[907] = 1
return RUN_DEFAULT
def san_dying( attachee, triggerer ):
if should_modify_CR( attachee ):
modify_CR( attachee, get_av_level() )
if (attachee.map == 5010):
rngfighttime_set()
game.global_flags[426] = 1
game.global_flags[814] = 1
if (game.global_flags[815] == 1):
for pc in game.party:
if ( pc.reputation_has( 23 ) == 1):
pc.reputation_remove( 23 )
if (game.party[0].reputation_has(9) == 0):
game.party[0].reputation_add(9)
return RUN_DEFAULT
def san_enter_combat( attachee, triggerer ):
if (triggerer.type == obj_t_pc and game.global_vars[750] == 0):
raimol = find_npc_near( attachee, 8050)
if (raimol != OBJ_HANDLE_NULL):
attachee.float_line(380,triggerer)
leader = raimol.leader_get()
if (leader != OBJ_HANDLE_NULL):
leader.follower_remove(raimol)
raimol.attack(triggerer)
return RUN_DEFAULT
def san_start_combat( attachee, triggerer ):
if (game.global_vars[750] == 1 and attachee.map == 5010 and critter_is_unconscious(attachee) != 1 and not attachee.d20_query(Q_Prone)):
game.global_vars[750] = 2
create_item_in_inventory( 8010, attachee )
return RUN_DEFAULT
if (game.global_vars[750] == 2 and attachee.map == 5010 and critter_is_unconscious(attachee) != 1 and not attachee.d20_query(Q_Prone)):
game.global_vars[750] = 3
if (game.party[0].reputation_has(23) == 0):
game.party[0].reputation_add(23)
attachee.runoff(attachee.location-3)
return SKIP_DEFAULT
if ( game.global_vars[751] == 0 and attachee.stat_level_get(stat_hp_current) >= 0 and game.global_flags[815] == 1 and attachee.map == 5010) and (game.global_vars[450] & 2**0 == 0) and (game.global_vars[450] & 2**10 == 0):
found_pc = OBJ_HANDLE_NULL
gremag = find_npc_near(attachee,8049)
raimol = find_npc_near(attachee,8050)
for pc in game.party[0].group_list():
if pc.type == obj_t_pc and pc.is_unconscious() == 0:
found_pc = pc
attachee.ai_shitlist_remove( pc )
gremag.ai_shitlist_remove( pc )
raimol.ai_shitlist_remove( pc )
else:
attachee.ai_shitlist_remove( pc )
gremag.ai_shitlist_remove( pc )
raimol.ai_shitlist_remove( pc )
pc.ai_shitlist_remove( attachee )
pc.ai_shitlist_remove( gremag )
pc.ai_shitlist_remove( raimol )
if (found_pc != OBJ_HANDLE_NULL):
found_pc.begin_dialog( attachee, 1100 )
return SKIP_DEFAULT
if ( obj_percent_hp(attachee) < 95 and game.global_vars[750] == 0 and attachee.stat_level_get(stat_hp_current) >= 0 and attachee.map == 5010) and (game.global_vars[450] & 2**0 == 0) and (game.global_vars[450] & 2**10 == 0):
found_pc = OBJ_HANDLE_NULL
gremag = find_npc_near(attachee,8049)
raimol = find_npc_near(attachee,8050)
for pc in game.party[0].group_list():
if pc.type == obj_t_pc and pc.is_unconscious() == 0:
found_pc = pc
attachee.ai_shitlist_remove( pc )
gremag.ai_shitlist_remove( pc )
raimol.ai_shitlist_remove( pc )
else:
attachee.ai_shitlist_remove( pc )
gremag.ai_shitlist_remove( pc )
raimol.ai_shitlist_remove( pc )
pc.ai_shitlist_remove( attachee )
pc.ai_shitlist_remove( gremag )
pc.ai_shitlist_remove( raimol )
if (found_pc != OBJ_HANDLE_NULL):
if (game.global_flags[815] == 0 and gremag.stat_level_get(stat_hp_current) >= 0):
found_pc.begin_dialog( attachee, 1000 )
return SKIP_DEFAULT
if (game.global_flags[815] == 1 or gremag.stat_level_get(stat_hp_current) <= -1):
found_pc.begin_dialog( attachee, 1100 )
return SKIP_DEFAULT
## THIS IS USED FOR BREAK FREE
for obj in game.party[0].group_list():
if (obj.distance_to(attachee) <= 3 and obj.stat_level_get(stat_hp_current) >= -9):
return RUN_DEFAULT
while(attachee.item_find(8903) != OBJ_HANDLE_NULL):
attachee.item_find(8903).destroy()
#if (attachee.d20_query(Q_Is_BreakFree_Possible)): # workaround no longer necessary!
# create_item_in_inventory( 8903, attachee )
## attachee.d20_send_signal(S_BreakFree)
return RUN_DEFAULT
def san_resurrect( attachee, triggerer ):
game.global_flags[814] = 0
if (game.party[0].reputation_has(9) == 1):
for pc in game.party:
if ( pc.reputation_has( 23 ) == 0):
pc.reputation_add( 23 )
game.party[0].reputation_remove(9)
return RUN_DEFAULT
def san_heartbeat( attachee, triggerer ):
itemA = attachee.item_find(8010)
if (itemA != OBJ_HANDLE_NULL and game.global_vars[750] == 0 and attachee.map == 5010):
itemA.destroy()
# create_item_in_inventory( 8021, attachee )
game.new_sid = 0
return RUN_DEFAULT
def switch_to_gremag( rannos, pc ):
gremag = find_npc_near(rannos,8049)
game.global_vars[750] = 1
game.global_vars[751] = 1
pc.begin_dialog(gremag,1010)
rannos.turn_towards(gremag)
gremag.turn_towards(rannos)
return SKIP_DEFAULT
def respawn(attachee):
box = find_container_near(attachee,1004)
RespawnInventory(box)
game.timevent_add(respawn, (attachee), 86400000 ) #86400000ms is 24 hours
return
def buff_npc( attachee, triggerer ):
for obj in game.obj_list_vicinity(attachee.location,OLC_NPC):
if (obj.name == 14607 and obj.leader_get() == OBJ_HANDLE_NULL):
obj.cast_spell(spell_stoneskin, obj)
if (obj.name == 14609 and obj.leader_get() == OBJ_HANDLE_NULL):
obj.cast_spell(spell_shield_of_faith, obj)
return RUN_DEFAULT
def buff_npc_two( attachee, triggerer ):
for obj in game.obj_list_vicinity(attachee.location,OLC_NPC):
if (obj.name == 14607 and obj.leader_get() == OBJ_HANDLE_NULL):
obj.cast_spell(spell_improved_invisibility, obj)
if (obj.name == 14609 and obj.leader_get() == OBJ_HANDLE_NULL):
obj.cast_spell(spell_owls_wisdom, obj)
return RUN_DEFAULT
def buff_npc_three( attachee, triggerer ):
target = find_npc_near( attachee, 8049)
for obj in game.obj_list_vicinity(attachee.location,OLC_NPC):
if (obj.name == 14607 and obj.leader_get() == OBJ_HANDLE_NULL):
obj.cast_spell(spell_endurance, attachee)
if (obj.name == 14609 and obj.leader_get() == OBJ_HANDLE_NULL):
obj.cast_spell(spell_endurance, target)
return RUN_DEFAULT
def buff_npc_four( attachee, triggerer ):
target = find_npc_near( attachee, 14606)
for obj in game.obj_list_vicinity(attachee.location,OLC_NPC):
if (obj.name == 14607 and obj.leader_get() == OBJ_HANDLE_NULL):
obj.cast_spell(spell_endurance, attachee)
if (obj.name == 14609 and obj.leader_get() == OBJ_HANDLE_NULL):
obj.cast_spell(spell_endurance, target)
return RUN_DEFAULT
def burne_quest():
if game.quests[64].state == qs_accepted or game.quests[64].state == qs_mentioned:
game.quests[64].state = qs_completed
return
def rngfighttime_set():
if game.global_flags[426] == 0:
record_time_stamp(426)
game.global_flags[426] = 1
return
def q16():
if game.quests[16].state == qs_accepted or game.quests[16].state == qs_mentioned:
game.quests[16].state = qs_completed
record_time_stamp(431)
return
def f41():
if game.global_flags[41] == 0:
game.global_flags[41] = 1
record_time_stamp(432)
return | StarcoderdataPython |
4902439 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from .graph import DAG
from .utils import kernel_mode, enter_build_mode
class GraphBuilder(object):
def __init__(self, graph=None, graph_cls=DAG, node_processor=None,
inputs_selector=None):
self._graph_cls = graph_cls
if graph is not None:
self._graph = graph
else:
self._graph = graph_cls()
self._node_processor = node_processor
if inputs_selector is None:
inputs_selector = lambda x: x
self._inputs_selector = inputs_selector
def _add_nodes(self, nodes, visited):
graph = self._graph
visited.update(nodes)
while len(nodes) > 0:
node = nodes.pop()
if self._node_processor:
# if node processor registered, process the node first
node = self._node_processor(node)
visited.add(node)
if not graph.contains(node):
graph.add_node(node)
children = self._inputs_selector(node.inputs or [])
for c in children:
if self._node_processor:
c = self._node_processor(c)
if not graph.contains(c):
graph.add_node(c)
if not graph.has_successor(c, node):
graph.add_edge(c, node)
for n in c.op.outputs:
if n not in visited:
nodes.append(n)
def build(self, tileables, tileable_graph=None):
raise NotImplementedError
class TileableGraphBuilder(GraphBuilder):
def __init__(self, graph=None, graph_cls=DAG, node_processor=None,
inputs_selector=None):
super().__init__(graph=graph, graph_cls=graph_cls,
node_processor=node_processor,
inputs_selector=inputs_selector)
@kernel_mode
@enter_build_mode
def build(self, tileables, tileable_graph=None):
if tileable_graph is not None: # pragma: no cover
return tileable_graph
visited = set()
nodes = list(itertools.chain(
*(tileable.op.outputs for tileable in tileables)))
self._add_nodes(nodes, visited)
return self._graph
| StarcoderdataPython |
1634699 | '评论应用模型'
from django.db import models
from django.contrib.auth.models import User
# 从别的app中导入文章模型
from stocks.models import BKDetail
class BaseComment(models.Model):
'基础评论模型'
content = models.TextField('评论', max_length=500)
time = models.DateTimeField('评论时间', auto_now_add=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='评论者')
class Meta:
abstract = True
class ArticleComment(BaseComment):
'文章评论'
article = models.ForeignKey(BKDetail, on_delete=models.CASCADE, verbose_name='评论')
class Meta:
ordering = ['-time']
class ArticleCommentReply(BaseComment):
'文章评论回复(二级评论)'
comment = models.ForeignKey(ArticleComment, on_delete=models.CASCADE, related_name='replies', verbose_name='一级评论')
reply = models.ForeignKey('self', null=True, blank=True, on_delete=models.CASCADE, verbose_name='回复对象')
class Meta:
ordering = ['time'] | StarcoderdataPython |
3318322 | from root import MEASURE_RESULTS
import confmeasure
import measure_helpers as util
from measure_helpers import (
GUEST_JAVDEV,
GUEST_QEMUBLK,
GUEST_QEMU9P,
GUEST_JAVDEV_MOUNT,
GUEST_QEMUBLK_MOUNT,
HOST_SSD,
run,
)
from qemu import QemuVm
from typing import List, Any, Optional, Callable, Tuple
import re
import json
# overwrite the number of samples to take to a minimum
QUICK = True
def lsblk(vm: QemuVm) -> None:
term = vm.ssh_cmd(["lsblk"], check=False)
print(term.stdout)
def hdparm(vm: QemuVm, device: str) -> Optional[float]:
term = vm.ssh_cmd(["hdparm", "-t", device], check=False)
if term.returncode != 0:
return None
out = term.stdout
print(out)
out = re.sub(" +", " ", out).split(" ")
mb = float(out[5])
sec = float(out[8])
return mb / sec
def fio(
vm: Optional[QemuVm],
device: str,
random: bool = False,
readonly: bool = True,
iops: bool = False,
file: bool = False,
) -> Tuple[float, float, float, float]:
"""
inspired by https://docs.oracle.com/en-us/iaas/Content/Block/References/samplefiocommandslinux.htm
@param random: random vs sequential
@param readonly: readonly vs read+write
@param iops: return iops vs bandwidth
@param file: target is file vs blockdevice
@return (read_mean, stddev, write_mean, stdev) in kiB/s
"""
runtime = 120
size = 100 # filesize in GB
if QUICK:
runtime = 10
size = 10
cmd = ["fio"]
if file:
cmd += [f"--filename={device}/file", f"--size={size}GB"]
else:
cmd += [f"--filename={device}", "--direct=1"]
if readonly and random:
cmd += ["--rw=randread"]
elif not readonly and random:
# fio/examples adds rwmixread=60 and rwmixwrite=40 here
cmd += ["--rw=randrw"]
elif readonly and not random:
cmd += ["--rw=read"]
elif not readonly and not random:
cmd += ["--rw=readwrite"]
if iops:
# fio/examples uses 16 here as well
cmd += ["--bs=4k", "--ioengine=libaio", "--iodepth=64"]
else:
cmd += ["--bs=64k", "--ioengine=libaio", "--iodepth=16"]
cmd += [
f"--runtime={runtime}",
"--numjobs=1",
"--time_based",
"--group_reporting",
"--name=generic_name",
"--eta-newline=1",
]
if readonly:
cmd += ["--readonly"]
cmd += ["--output-format=json"]
print(cmd)
if vm is None:
term = run(cmd, check=True)
else:
term = vm.ssh_cmd(cmd, check=True)
out = term.stdout
# print(out)
j = json.loads(out)
read = j["jobs"][0]["read"]
write = j["jobs"][0]["write"]
if iops:
print(
"IOPS: read",
read["iops_mean"],
read["iops_stddev"],
"write",
write["iops_mean"],
write["iops_stddev"],
)
return (
read["iops_mean"],
read["iops_stddev"],
write["iops_mean"],
write["iops_stddev"],
)
else:
print("Bandwidth read", float(read["bw_mean"]) / 1024 / 1024, "GB/s")
print("Bandwidth write", float(write["bw_mean"]) / 1024 / 1024, "GB/s")
return (read["bw_mean"], read["bw_dev"], write["bw_mean"], write["bw_dev"])
SIZE = 16
WARMUP = 0
if QUICK:
WARMUP = 0
SIZE = 2
# QUICK: 20s else: 5min
def sample(
f: Callable[[], Optional[float]], size: int = SIZE, warmup: int = WARMUP
) -> List[float]:
ret = []
for i in range(0, warmup):
f()
for i in range(0, size):
r = f()
if r is None:
return []
ret += [r]
return ret
# QUICK: ? else: ~5min
def fio_suite(
vm: Optional[QemuVm], measurements: Any, device: str, name: str, file: bool = True
) -> Any:
measurements["fio"][f"{name}-best-case-bw"] = list(
fio(
vm,
device,
random=False,
readonly=False,
iops=False,
file=file,
)
)
measurements["fio"][f"{name}-worst-case-iops"] = list(
fio(
vm,
device,
random=True,
readonly=False,
iops=True,
file=file,
)
)
pass
# not quick: 5 * fio_suite(5min) + 2 * sample(5min) = 35min
if __name__ == "__main__":
util.check_ssd()
util.check_system()
helpers = confmeasure.Helpers()
measurements = util.read_stats(f"{MEASURE_RESULTS}/stats.json")
measurements["fio"] = {}
measurements["hdparm"] = {}
# fresh ssd, unmount and fio_suite(HOST_SSD)
with util.fresh_ssd():
pass
fio_suite(None, measurements, HOST_SSD, "direct_host", file=False)
with util.fresh_ssd():
with util.testbench(helpers, with_vmsh=False, ioregionfd=False) as vm:
fio_suite(
vm, measurements, GUEST_QEMUBLK, "direct_detached_qemublk", file=False
)
# testbench wants to mount again -> restore fs via fresh_ssd()
with util.fresh_ssd():
with util.testbench(helpers, with_vmsh=True, ioregionfd=False) as vm:
fio_suite(vm, measurements, GUEST_QEMUBLK, "direct_ws_qemublk", file=False)
fio_suite(vm, measurements, GUEST_JAVDEV, "direct_ws_javdev", file=False)
with util.fresh_ssd():
with util.testbench(helpers, with_vmsh=True, ioregionfd=True) as vm:
fio_suite(
vm, measurements, GUEST_QEMUBLK, "direct_iorefd_qemublk", file=False
)
fio_suite(
vm, measurements, GUEST_JAVDEV, "direct_iorefd_javdev", file=False
)
# we just wrote randomly to the disk -> fresh_ssd() required
with util.fresh_ssd():
with util.testbench(helpers, with_vmsh=False, ioregionfd=False) as vm:
lsblk(vm)
fio_suite(vm, measurements, GUEST_QEMUBLK_MOUNT, "detached_qemublk")
fio_suite(vm, measurements, GUEST_QEMU9P, "detached_qemu9p")
measurements["hdparm"]["detached_qemublk"] = sample(
lambda: hdparm(vm, GUEST_QEMUBLK)
)
with util.testbench(helpers, with_vmsh=True, ioregionfd=False) as vm:
fio_suite(vm, measurements, GUEST_JAVDEV_MOUNT, "attached_ws_javdev")
with util.testbench(helpers, with_vmsh=True, ioregionfd=True) as vm:
fio_suite(vm, measurements, GUEST_JAVDEV_MOUNT, "attached_iorefd_javdev")
measurements["hdparm"]["attached_iorefd_javdev"] = sample(
lambda: hdparm(vm, GUEST_JAVDEV)
)
util.export_lineplot("hdparm_warmup", measurements["hdparm"])
util.export_barplot("hdparm_warmup_barplot", measurements["hdparm"])
util.export_fio("fio", measurements["fio"])
util.write_stats(f"{MEASURE_RESULTS}/stats.json", measurements)
"""
# Compare block devices:
- qemu virtio blk
- qemu virtio 9p
- vmsh virtio blk ws
- vmsh virtio blk ioregionfd
for each:
- best case bw read
- best case bw write
- worst case iops
# Compare guest performance under vmsh
- native
- detached
- vmsh ws
- vmsh ioregionfd
- run via vmsh (in container in vm)
- run via ssh (no container in vm)
for each:
- blkdev
- shell latency
- phoronix
"""
| StarcoderdataPython |
362201 | <reponame>yelircaasi/manim
import click
from cloup import option, option_group
ease_of_access_options = option_group(
"Ease of access options",
option(
"--progress_bar",
default="display",
show_default=True,
type=click.Choice(
["display", "leave", "none"],
case_sensitive=False,
),
help="Display progress bars and/or keep them displayed.",
),
option(
"-p",
"--preview",
is_flag=True,
help="Preview the Scene's animation. OpenGL does a live preview in a "
"popup window. Cairo opens the rendered video file in the system "
"default media player.",
),
option(
"-f",
"--show_in_file_browser",
is_flag=True,
help="Show the output file in the file browser.",
),
option("--jupyter", is_flag=True, help="Using jupyter notebook magic."),
)
| StarcoderdataPython |
4978838 | #! /usr/bin/env python3
#
import math
from numpy import *
from numpy import bitwise_xor
import datetime
def i4_bit_hi1(n):
# *****************************************************************************80
#
## I4_BIT_HI1 returns the position of the high 1 bit base 2 in an I4.
#
# Discussion:
#
# An I4 is an integer ( kind = 4 ) value.
#
# Example:
#
# N Binary Hi 1
# ---- -------- ----
# 0 0 0
# 1 1 1
# 2 10 2
# 3 11 2
# 4 100 3
# 5 101 3
# 6 110 3
# 7 111 3
# 8 1000 4
# 9 1001 4
# 10 1010 4
# 11 1011 4
# 12 1100 4
# 13 1101 4
# 14 1110 4
# 15 1111 4
# 16 10000 5
# 17 10001 5
# 1023 1111111111 10
# 1024 10000000000 11
# 1025 10000000001 11
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 26 October 2014
#
# Author:
#
# <NAME>
#
# Parameters:
#
# Input, integer N, the integer to be measured.
# N should be nonnegative. If N is nonpositive, the function
# will always be 0.
#
# Output, integer BIT, the position of the highest bit.
#
i = n
bit = 0
while (True):
if (i <= 0):
break
bit = bit + 1
i = i // 2
return bit
def i4_bit_hi1_test():
# *****************************************************************************80
#
## I4_BIT_HI1_TEST tests I4_BIT_HI1.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 26 October 2014
#
# Author:
#
# <NAME>
#
import platform
seed = 123456789
test_num = 10
print('')
print('I4_BIT_HI1_TEST')
print(' Python version: %s' % (platform.python_version()))
print(' I4_BIT_HI1 returns the location of the high 1 bit.')
print('')
print(' I I4_BIT_HI1(I)')
print('')
for test in range(0, test_num):
i, seed = i4_uniform_ab(0, 100, seed)
j = i4_bit_hi1(i)
print(' %8d %8d' % (i, j))
#
# Terminate.
#
print('')
print('I4_BIT_HI1_TEST')
print(' Normal end of execution.')
return
def i4_bit_lo0(n):
# *****************************************************************************80
#
## I4_BIT_LO0 returns the position of the low 0 bit base 2 in an I4.
#
# Discussion:
#
# An I4 is an integer ( kind = 4 ) value.
#
# Example:
#
# N Binary Lo 0
# ---- -------- ----
# 0 0 1
# 1 1 2
# 2 10 1
# 3 11 3
# 4 100 1
# 5 101 2
# 6 110 1
# 7 111 4
# 8 1000 1
# 9 1001 2
# 10 1010 1
# 11 1011 3
# 12 1100 1
# 13 1101 2
# 14 1110 1
# 15 1111 5
# 16 10000 1
# 17 10001 2
# 1023 1111111111 11
# 1024 10000000000 1
# 1025 10000000001 2
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 08 February 2018
#
# Author:
#
# <NAME>
#
# Parameters:
#
# Input, integer N, the integer to be measured.
# N should be nonnegative.
#
# Output, integer BIT, the position of the low 1 bit.
#
bit = 0
i = n
while (True):
bit = bit + 1
i2 = i // 2
if (i == 2 * i2):
break
i = i2
return bit
def i4_bit_lo0_test():
# *****************************************************************************80
#
## I4_BIT_LO0_TEST tests I4_BIT_LO0.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 27 September 2014
#
# Author:
#
# <NAME>
#
import platform
seed = 123456789
test_num = 10
print('')
print('I4_BIT_LO0_TEST')
print(' Python version: %s' % (platform.python_version()))
print(' I4_BIT_LO0 returns the location of the low 0 bit.')
print('')
print(' I I4_BIT_LO0(I)')
print('')
for test in range(0, test_num):
i, seed = i4_uniform_ab(0, 100, seed)
j = i4_bit_lo0(i)
print(' %8d %8d' % (i, j))
#
# Terminate.
#
print('')
print('I4_BIT_LO0_TEST')
print(' Normal end of execution.')
return
def i4_sobol_generate(m, n, skip):
# *****************************************************************************80
#
## I4_SOBOL_GENERATE generates a Sobol dataset.
#
# Licensing:
#
# This code is distributed under the MIT license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by <NAME>.
# PYTHON version by <NAME>
#
# Parameters:
#
# Input, integer M, the spatial dimension.
#
# Input, integer N, the number of points to generate.
#
# Input, integer SKIP, the number of initial points to skip.
#
# Output, real R(M,N), the points.
#
r = zeros((m, n))
for j in range(1, n + 1):
seed = skip + j - 2
[r[0:m, j - 1], seed] = i4_sobol(m, seed)
return r
def i4_sobol(dim_num, seed):
# *****************************************************************************80
#
## I4_SOBOL generates a new quasirandom Sobol vector with each call.
#
# Discussion:
#
# The routine adapts the ideas of Antonov and Saleev.
#
# Licensing:
#
# This code is distributed under the MIT license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original FORTRAN77 version by <NAME>.
# MATLAB version by <NAME>.
# PYTHON version by <NAME>
#
# Reference:
#
# <NAME>,
# USSR Computational Mathematics and Mathematical Physics,
# olume 19, 1980, pages 252 - 256.
#
# <NAME>, <NAME>,
# Algorithm 659:
# Implementing Sobol's Quasirandom Sequence Generator,
# ACM Transactions on Mathematical Software,
# Volume 14, Number 1, pages 88-100, 1988.
#
# <NAME>,
# Algorithm 647:
# Implementation and Relative Efficiency of Quasirandom
# Sequence Generators,
# ACM Transactions on Mathematical Software,
# Volume 12, Number 4, pages 362-376, 1986.
#
# <NAME>,
# USSR Computational Mathematics and Mathematical Physics,
# Volume 16, pages 236-242, 1977.
#
# <NAME>, Levitan,
# The Production of Points Uniformly Distributed in a Multidimensional
# Cube (in Russian),
# Preprint IPM Akad. Nauk SSSR,
# Number 40, Moscow 1976.
#
# Parameters:
#
# Input, integer DIM_NUM, the number of spatial dimensions.
# DIM_NUM must satisfy 1 <= DIM_NUM <= 40.
#
# Input/output, integer SEED, the "seed" for the sequence.
# This is essentially the index in the sequence of the quasirandom
# value to be generated. On output, SEED has been set to the
# appropriate next value, usually simply SEED+1.
# If SEED is less than 0 on input, it is treated as though it were 0.
# An input value of 0 requests the first (0-th) element of the sequence.
#
# Output, real QUASI(DIM_NUM), the next quasirandom vector.
#
global atmost
global dim_max
global dim_num_save
global initialized
global lastq
global log_max
global maxcol
global poly
global recipd
global seed_save
global v
if (not 'initialized' in globals().keys()):
initialized = 0
dim_num_save = -1
if (not initialized or dim_num != dim_num_save):
initialized = 1
dim_max = 40
dim_num_save = -1
log_max = 30
seed_save = -1
#
# Initialize (part of) V.
#
v = zeros((dim_max, log_max))
v[0:40, 0] = transpose([ \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
v[2:40, 1] = transpose([ \
1, 3, 1, 3, 1, 3, 3, 1, \
3, 1, 3, 1, 3, 1, 1, 3, 1, 3, \
1, 3, 1, 3, 3, 1, 3, 1, 3, 1, \
3, 1, 1, 3, 1, 3, 1, 3, 1, 3])
v[3:40, 2] = transpose([ \
7, 5, 1, 3, 3, 7, 5, \
5, 7, 7, 1, 3, 3, 7, 5, 1, 1, \
5, 3, 3, 1, 7, 5, 1, 3, 3, 7, \
5, 1, 1, 5, 7, 7, 5, 1, 3, 3])
v[5:40, 3] = transpose([ \
1, 7, 9, 13, 11, \
1, 3, 7, 9, 5, 13, 13, 11, 3, 15, \
5, 3, 15, 7, 9, 13, 9, 1, 11, 7, \
5, 15, 1, 15, 11, 5, 3, 1, 7, 9])
v[7:40, 4] = transpose([ \
9, 3, 27, \
15, 29, 21, 23, 19, 11, 25, 7, 13, 17, \
1, 25, 29, 3, 31, 11, 5, 23, 27, 19, \
21, 5, 1, 17, 13, 7, 15, 9, 31, 9])
v[13:40, 5] = transpose([ \
37, 33, 7, 5, 11, 39, 63, \
27, 17, 15, 23, 29, 3, 21, 13, 31, 25, \
9, 49, 33, 19, 29, 11, 19, 27, 15, 25])
v[19:40, 6] = transpose([ \
13, \
33, 115, 41, 79, 17, 29, 119, 75, 73, 105, \
7, 59, 65, 21, 3, 113, 61, 89, 45, 107])
v[37:40, 7] = transpose([ \
7, 23, 39])
#
# Set POLY.
#
poly = [ \
1, 3, 7, 11, 13, 19, 25, 37, 59, 47, \
61, 55, 41, 67, 97, 91, 109, 103, 115, 131, \
193, 137, 145, 143, 241, 157, 185, 167, 229, 171, \
213, 191, 253, 203, 211, 239, 247, 285, 369, 299]
atmost = 2 ** log_max - 1
#
# Find the number of bits in ATMOST.
#
maxcol = i4_bit_hi1(atmost)
#
# Initialize row 1 of V.
#
v[0, 0:maxcol] = 1
#
# Things to do only if the dimension changed.
#
if (dim_num != dim_num_save):
#
# Check parameters.
#
if (dim_num < 1 or dim_max < dim_num):
print('I4_SOBOL - Fatal error!')
print(' The spatial dimension DIM_NUM should satisfy:')
print(' 1 <= DIM_NUM <= %d' % dim_max)
print(' But this input value is DIM_NUM = %d' % dim_num)
return
dim_num_save = dim_num
#
# Initialize the remaining rows of V.
#
for i in range(2, dim_num + 1):
#
# The bits of the integer POLY(I) gives the form of polynomial I.
#
# Find the degree of polynomial I from binary encoding.
#
j = poly[i - 1]
m = 0
while (1):
j = math.floor(j / 2.)
if (j <= 0):
break
m = m + 1
#
# Expand this bit pattern to separate components of the logical array INCLUD.
#
j = poly[i - 1]
includ = zeros(m)
for k in range(m, 0, -1):
j2 = math.floor(j / 2.)
includ[k - 1] = (j != 2 * j2)
j = j2
#
# Calculate the remaining elements of row I as explained
# in Bratley and Fox, section 2.
#
for j in range(m + 1, maxcol + 1):
newv = v[i - 1, j - m - 1]
l = 1
for k in range(1, m + 1):
l = 2 * l
if (includ[k - 1]):
newv = bitwise_xor(int(newv), int(l * v[i - 1, j - k - 1]))
v[i - 1, j - 1] = newv
#
# Multiply columns of V by appropriate power of 2.
#
l = 1
for j in range(maxcol - 1, 0, -1):
l = 2 * l
v[0:dim_num, j - 1] = v[0:dim_num, j - 1] * l
#
# RECIPD is 1/(common denominator of the elements in V).
#
recipd = 1.0 / (2 * l)
lastq = zeros(dim_num)
seed = int(math.floor(seed))
if (seed < 0):
seed = 0
if (seed == 0):
l = 1
lastq = zeros(dim_num)
elif (seed == seed_save + 1):
#
# Find the position of the right-hand zero in SEED.
#
l = i4_bit_lo0(seed)
elif (seed <= seed_save):
seed_save = 0
l = 1
lastq = zeros(dim_num)
for seed_temp in range(int(seed_save), int(seed)):
l = i4_bit_lo0(seed_temp)
for i in range(1, dim_num + 1):
lastq[i - 1] = bitwise_xor(int(lastq[i - 1]), int(v[i - 1, l - 1]))
l = i4_bit_lo0(seed)
elif (seed_save + 1 < seed):
for seed_temp in range(int(seed_save + 1), int(seed)):
l = i4_bit_lo0(seed_temp)
for i in range(1, dim_num + 1):
lastq[i - 1] = bitwise_xor(int(lastq[i - 1]), int(v[i - 1, l - 1]))
l = i4_bit_lo0(seed)
#
# Check that the user is not calling too many times!
#
if (maxcol < l):
print('I4_SOBOL - Fatal error!')
print(' Too many calls!')
print(' MAXCOL = %d\n' % maxcol)
print(' L = %d\n' % l)
return
#
# Calculate the new components of QUASI.
#
quasi = zeros(dim_num)
for i in range(1, dim_num + 1):
quasi[i - 1] = lastq[i - 1] * recipd
lastq[i - 1] = bitwise_xor(int(lastq[i - 1]), int(v[i - 1, l - 1]))
seed_save = seed
seed = seed + 1
return [quasi, seed]
def i4_uniform_ab(a, b, seed):
# *****************************************************************************80
#
## I4_UNIFORM_AB returns a scaled pseudorandom I4.
#
# Discussion:
#
# The pseudorandom number will be scaled to be uniformly distributed
# between A and B.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 05 April 2013
#
# Author:
#
# <NAME>
#
# Reference:
#
# <NAME>, <NAME>, <NAME>,
# A Guide to Simulation,
# Second Edition,
# Springer, 1987,
# ISBN: 0387964673,
# LC: QA76.9.C65.B73.
#
# <NAME>,
# Algorithm 647:
# Implementation and Relative Efficiency of Quasirandom
# Sequence Generators,
# ACM Transactions on Mathematical Software,
# Volume 12, Number 4, December 1986, pages 362-376.
#
# <NAME>,
# Random Number Generation,
# in Handbook of Simulation,
# edited by <NAME>,
# Wiley, 1998,
# ISBN: 0471134031,
# LC: T57.62.H37.
#
# <NAME>, <NAME>, <NAME>,
# A Pseudo-Random Number Generator for the System/360,
# IBM Systems Journal,
# Volume 8, Number 2, 1969, pages 136-143.
#
# Parameters:
#
# Input, integer A, B, the minimum and maximum acceptable values.
#
# Input, integer SEED, a seed for the random number generator.
#
# Output, integer C, the randomly chosen integer.
#
# Output, integer SEED, the updated seed.
#
from sys import exit
i4_huge = 2147483647
seed = int(seed)
seed = (seed % i4_huge)
if (seed < 0):
seed = seed + i4_huge;
if (seed == 0):
print('')
print('I4_UNIFORM_AB - Fatal error!')
print(' Input SEED = 0!')
exit('I4_UNIFORM_AB - Fatal error!')
k = (seed // 127773)
seed = 16807 * (seed - k * 127773) - k * 2836
if (seed < 0):
seed = seed + i4_huge
r = seed * 4.656612875E-10
#
# Scale R to lie between A-0.5 and B+0.5.
#
a = round(a)
b = round(b)
r = (1.0 - r) * (min(a, b) - 0.5) \
+ r * (max(a, b) + 0.5)
#
# Use rounding to convert R to an integer between A and B.
#
value = round(r)
value = max(value, min(a, b))
value = min(value, max(a, b))
value = int(value)
return value, seed
def i4_uniform_ab_test():
# *****************************************************************************80
#
## I4_UNIFORM_AB_TEST tests I4_UNIFORM_AB.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 27 October 2014
#
# Author:
#
# <NAME>
#
import platform
a = -100
b = 200
seed = 123456789
print('')
print('I4_UNIFORM_AB_TEST')
print(' Python version: %s' % (platform.python_version()))
print(' I4_UNIFORM_AB computes pseudorandom values')
print(' in an interval [A,B].')
print('')
print(' The lower endpoint A = %d' % (a))
print(' The upper endpoint B = %d' % (b))
print(' The initial seed is %d' % (seed))
print('')
for i in range(1, 21):
j, seed = i4_uniform_ab(a, b, seed)
print(' %8d %8d' % (i, j))
#
# Terminate.
#
print('')
print('I4_UNIFORM_AB_TEST:')
print(' Normal end of execution.')
return
def prime_ge(n):
# *****************************************************************************80
#
## PRIME_GE returns the smallest prime greater than or equal to N.
#
# Example:
#
# N PRIME_GE
#
# -10 2
# 1 2
# 2 2
# 3 3
# 4 5
# 5 5
# 6 7
# 7 7
# 8 11
# 9 11
# 10 11
#
# Licensing:
#
# This code is distributed under the MIT license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by <NAME>.
# PYTHON version by <NAME>
#
# Parameters:
#
# Input, integer N, the number to be bounded.
#
# Output, integer P, the smallest prime number that is greater
# than or equal to N.
#
p = max(math.ceil(n), 2)
while (not isprime(p)):
p = p + 1
return p
def isprime(n):
# *****************************************************************************80
#
## IS_PRIME returns True if N is a prime number, False otherwise
#
# Licensing:
#
# This code is distributed under the MIT license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# <NAME>
#
# Parameters:
#
# Input, integer N, the number to be checked.
#
# Output, boolean value, True or False
#
if n != int(n) or n < 1:
return False
p = 2
while p < n:
if n % p == 0:
return False
p += 1
return True
def r4_uniform_01(seed):
# *****************************************************************************80
#
## R4_UNIFORM_01 returns a unit pseudorandom R4.
#
# Discussion:
#
# This routine implements the recursion
#
# seed = 16807 * seed mod ( 2^31 - 1 )
# r = seed / ( 2^31 - 1 )
#
# The integer arithmetic never requires more than 32 bits,
# including a sign bit.
#
# If the initial seed is 12345, then the first three computations are
#
# Input Output R4_UNIFORM_01
# SEED SEED
#
# 12345 207482415 0.096616
# 207482415 1790989824 0.833995
# 1790989824 2035175616 0.947702
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 04 April 2013
#
# Author:
#
# <NAME>
#
# Reference:
#
# <NAME>, <NAME>, <NAME>,
# A Guide to Simulation,
# Second Edition,
# Springer, 1987,
# ISBN: 0387964673,
# LC: QA76.9.C65.B73.
#
# <NAME>,
# Algorithm 647:
# Implementation and Relative Efficiency of Quasirandom
# Sequence Generators,
# ACM Transactions on Mathematical Software,
# Volume 12, Number 4, December 1986, pages 362-376.
#
# <NAME>,
# Random Number Generation,
# in Handbook of Simulation,
# edited by <NAME>,
# Wiley, 1998,
# ISBN: 0471134031,
# LC: T57.62.H37.
#
# <NAME>, <NAME>, <NAME>,
# A Pseudo-Random Number Generator for the System/360,
# IBM Systems Journal,
# Volume 8, Number 2, 1969, pages 136-143.
#
# Parameters:
#
# Input, integer SEED, the integer "seed" used to generate
# the output random number. SEED should not be 0.
#
# Output, real R, a random value between 0 and 1.
#
# Output, integer SEED, the updated seed. This would
# normally be used as the input seed on the next call.
#
from sys import exit
i4_huge = 2147483647
if (seed == 0):
print('')
print('R4_UNIFORM_01 - Fatal error!')
print(' Input SEED = 0!')
exit('R4_UNIFORM_01 - Fatal error!')
seed = (seed % i4_huge)
if (seed < 0):
seed = seed + i4_huge
k = (seed // 127773)
seed = 16807 * (seed - k * 127773) - k * 2836
if (seed < 0):
seed = seed + i4_huge
r = seed * 4.656612875E-10
return r, seed
def r4_uniform_01_test():
# *****************************************************************************80
#
## R4_UNIFORM_01_TEST tests R4_UNIFORM_01.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 26 July 2014
#
# Author:
#
# <NAME>
#
import platform
print('')
print('R4_UNIFORM_01_TEST')
print(' Python version: %s' % (platform.python_version()))
print(' R4_UNIFORM_01 produces a sequence of random values.')
seed = 123456789
print('')
print(' Using random seed %d' % (seed))
print('')
print(' SEED R4_UNIFORM_01(SEED)')
print('')
for i in range(0, 10):
seed_old = seed
x, seed = r4_uniform_01(seed)
print(' %12d %14f' % (seed, x))
print('')
print(' Verify that the sequence can be restarted.')
print(' Set the seed back to its original value, and see that')
print(' we generate the same sequence.')
seed = 123456789
print('')
print(' SEED R4_UNIFORM_01(SEED)')
print('')
for i in range(0, 10):
seed_old = seed
x, seed = r4_uniform_01(seed)
print(' %12d %14f' % (seed, x))
#
# Terminate.
#
print('')
print('R4_UNIFORM_01_TEST')
print(' Normal end of execution.')
return
def r8mat_write(filename, m, n, a):
# *****************************************************************************80
#
## R8MAT_WRITE writes an R8MAT to a file.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 12 October 2014
#
# Author:
#
# <NAME>
#
# Parameters:
#
# Input, string FILENAME, the name of the output file.
#
# Input, integer M, the number of rows in A.
#
# Input, integer N, the number of columns in A.
#
# Input, real A(M,N), the matrix.
#
output = open(filename, 'w')
for i in range(0, m):
for j in range(0, n):
s = ' %g' % (a[i, j])
output.write(s)
output.write('\n')
output.close()
return
def r8mat_write_test():
# *****************************************************************************80
#
## R8MAT_WRITE_TEST tests R8MAT_WRITE.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 12 October 2014
#
# Author:
#
# <NAME>
#
import numpy as np
import platform
print('')
print('R8MAT_WRITE_TEST:')
print(' Python version: %s' % (platform.python_version()))
print(' Test R8MAT_WRITE, which writes an R8MAT to a file.')
filename = 'r8mat_write_test.txt'
m = 5
n = 3
a = np.array(( \
(1.1, 1.2, 1.3), \
(2.1, 2.2, 2.3), \
(3.1, 3.2, 3.3), \
(4.1, 4.2, 4.3), \
(5.1, 5.2, 5.3)))
r8mat_write(filename, m, n, a)
print('')
print(' Created file "%s".' % (filename))
#
# Terminate.
#
print('')
print('R8MAT_WRITE_TEST:')
print(' Normal end of execution.')
return
def tau_sobol(dim_num):
# *****************************************************************************80
#
## TAU_SOBOL defines favorable starting seeds for Sobol sequences.
#
# Discussion:
#
# For spatial dimensions 1 through 13, this routine returns
# a "favorable" value TAU by which an appropriate starting point
# in the Sobol sequence can be determined.
#
# These starting points have the form N = 2**K, where
# for integration problems, it is desirable that
# TAU + DIM_NUM - 1 <= K
# while for optimization problems, it is desirable that
# TAU < K.
#
# Licensing:
#
# This code is distributed under the MIT license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original FORTRAN77 version by <NAME>.
# MATLAB version by <NAME>.
# PYTHON version by <NAME>
#
# Reference:
#
# <NAME>, <NAME>,
# USSR Computational Mathematics and Mathematical Physics,
# Volume 19, 1980, pages 252 - 256.
#
# <NAME>, <NAME>,
# Algorithm 659:
# Implementing Sobol's Quasirandom Sequence Generator,
# ACM Transactions on Mathematical Software,
# Volume 14, Number 1, pages 88-100, 1988.
#
# <NAME>,
# Algorithm 647:
# Implementation and Relative Efficiency of Quasirandom
# Sequence Generators,
# ACM Transactions on Mathematical Software,
# Volume 12, Number 4, pages 362-376, 1986.
#
# <NAME>, <NAME>
# Remark on Algorithm 659:
# Implementing Sobol's Quasirandom Sequence Generator,
# ACM Transactions on Mathematical Software,
# Volume 29, Number 1, pages 49-57, March 2003.
#
# <NAME>,
# USSR Computational Mathematics and Mathematical Physics,
# Volume 16, pages 236-242, 1977.
#
# <NAME>, <NAME>,
# The Production of Points Uniformly Distributed in a Multidimensional
# Cube (in Russian),
# Preprint IPM Akad. Nauk SSSR,
# Number 40, Moscow 1976.
#
# Parameters:
#
# Input, integer DIM_NUM, the spatial dimension. Only values
# of 1 through 13 will result in useful responses.
#
# Output, integer TAU, the value TAU.
#
dim_max = 13
tau_table = [0, 0, 1, 3, 5, \
8, 11, 15, 19, 23, \
27, 31, 35]
if (1 <= dim_num and dim_num <= dim_max):
tau = tau_table[dim_num]
else:
tau = - 1
return tau
def sobol_test01():
# *****************************************************************************80
#
## SOBOL_TEST01 tests BITWISE_XOR.
#
# Licensing:
#
# This code is distributed under the MIT license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by <NAME>.
# Python version by <NAME>
#
print('')
print('SOBOL_TEST01')
print(' BITWISE_XOR returns the bitwise exclusive OR of two integers.')
print('')
print(' I J BITXOR(I,J)')
print('')
seed = 123456789
for test in range(0, 10):
i, seed = i4_uniform_ab(0, 100, seed)
j, seed = i4_uniform_ab(0, 100, seed)
k = bitwise_xor(i, j)
print(' %6d %6d %6d' % (i, j, k))
return
def sobol_test02():
# *****************************************************************************80
#
## SOBOL_TEST02 tests I4_BIT_HI1.
#
# Licensing:
#
# This code is distributed under the MIT license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by <NAME>.
# PYTHON version by <NAME>
#
print('')
print('SOBOL_TEST02')
print(' I4_BIT_HI1 returns the location of the high 1 bit.')
print('')
print(' I I4_BIT_HI1(I)')
print('')
seed = 123456789
for test in range(0, 10):
[i, seed] = i4_uniform_ab(0, 100, seed)
j = i4_bit_hi1(i)
print('%6d %6d' % (i, j))
return
def sobol_test03():
# *****************************************************************************80
#
## SOBOL_TEST03 tests I4_BIT_LO0.
#
# Licensing:
#
# This code is distributed under the MIT license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by <NAME>.
# PYTHON version by <NAME>
#
print('')
print('SOBOL_TEST03')
print(' I4_BIT_LO0 returns the location of the low 0 bit.')
print('')
print(' I I4_BIT_LO0(I)')
print('')
seed = 123456789
for test in range(0, 10):
[i, seed] = i4_uniform_ab(0, 100, seed)
j = i4_bit_lo0(i)
print('%6d %6d' % (i, j))
return
def sobol_test04():
# *****************************************************************************80
#
## SOBOL_TEST04 tests I4_SOBOL.
#
# Licensing:
#
# This code is distributed under the MIT license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by <NAME>.
# PYTHON version by <NAME>
#
print('\nSOBOL_TEST04')
print(' I4_SOBOL returns the next element')
print(' of a Sobol sequence.')
print('\n In this test, we call I4_SOBOL repeatedly.\n')
dim_max = 4
for dim_num in range(2, dim_max + 1):
seed = 0
qs = prime_ge(dim_num)
print('\n Using dimension DIM_NUM = %d' % dim_num)
print('\n Seed Seed I4_SOBOL')
print(' In Out\n')
for i in range(0, 111):
[r, seed_out] = i4_sobol(dim_num, seed)
if (i <= 11 or 95 <= i):
out = '%6d %6d ' % (seed, seed_out)
for j in range(0, dim_num):
out += '%10f ' % r[j]
print(out)
elif (i == 12):
print('......................')
seed = seed_out
return
def sobol_test05():
# *****************************************************************************80
#
## SOBOL_TEST05 tests I4_SOBOL.
#
# Licensing:
#
# This code is distributed under the MIT license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by <NAME>.
# Python version by <NAME>
#
print('')
print('SOBOL_TEST05')
print(' I4_SOBOL computes the next element of a Sobol sequence.')
print('')
print(' In this test, we demonstrate how the SEED can be')
print(' manipulated to skip ahead in the sequence, or')
print(' to come back to any part of the sequence.')
print('')
dim_num = 3
print('')
print(' Using dimension DIM_NUM = %d\n' % dim_num)
seed = 0
print('')
print(' Seed Seed I4_SOBOL')
print(' In Out')
print('')
for i in range(0, 10 + 1):
[r, seed_out] = i4_sobol(dim_num, seed)
out = '%6d %6d ' % (seed, seed_out)
for j in range(1, dim_num + 1):
out += '%10f ' % r[j - 1]
print(out)
seed = seed_out
print('')
print(' Jump ahead by increasing SEED:')
print('')
seed = 100
print('')
print(' Seed Seed I4_SOBOL')
print(' In Out')
print('')
for i in range(1, 6):
[r, seed_out] = i4_sobol(dim_num, seed)
out = '%6d %6d ' % (seed, seed_out)
for j in range(1, dim_num + 1):
out += '%10f ' % r[j - 1]
print(out)
seed = seed_out
print('')
print(' Jump back by decreasing SEED:')
print('')
seed = 3
print('')
print(' Seed Seed I4_SOBOL')
print(' In Out')
print('')
for i in range(0, 11):
[r, seed_out] = i4_sobol(dim_num, seed)
out = '%6d %6d ' % (seed, seed_out)
for j in range(1, dim_num + 1):
out += '%10f ' % r[j - 1]
print(out)
seed = seed_out
print('')
print(' Jump back by decreasing SEED:')
print('')
seed = 98
print('')
print(' Seed Seed I4_SOBOL')
print(' In Out')
print('')
for i in range(1, 6):
[r, seed_out] = i4_sobol(dim_num, seed)
out = '%6d %6d ' % (seed, seed_out)
for j in range(1, dim_num + 1):
out += '%10f ' % r[j - 1]
print(out)
seed = seed_out
return
def sobol_test(argv=None):
# *****************************************************************************80
#
## SOBOL_TEST tests the SOBOL library.
#
# Licensing:
#
# This code is distributed under the MIT license.
#
# Modified:
#
# 25 October 2016
#
# Author:
#
# Original MATLAB version by <NAME>.
# Python version by <NAME>
#
import platform
d = datetime.datetime.today()
print(d.strftime("%d-%b-%Y %H:%M:%S"))
print('')
print('SOBOL_TEST')
print(' Test the SOBOL routines.')
sobol_test01()
sobol_test02()
sobol_test03()
sobol_test04()
sobol_test05()
#
# Terminate.
#
print('')
print('SOBOL_TEST')
print(' Normal end of execution.')
d = datetime.datetime.today()
print(d.strftime("%d-%b-%Y %H:%M:%S"))
if __name__ == "__main__":
sobol_test()
| StarcoderdataPython |
6496636 | # Author: <NAME> (<EMAIL>)
import argparse
import dynet as dy
import matplotlib.pyplot as plt
import numpy as np
import random
from core.information_theory import InformationTheory
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
info = InformationTheory()
num_points_except_end = args.num_points - 1
stepsize = 1.0 / num_points_except_end
found = False
epsilon = 1e-6
while not found:
Omega1 = info.rand_joint(args.zsize, args.zsize)
Omega2 = info.rand_joint(args.zsize, args.zsize)
#Omega1 = dy.inputTensor([[1.0, 0.0],
# [0.0, 0.1]]) # NOT doubly stochastic!
#Omega2 = dy.inputTensor([[0.0, 0.1],
# [0.1, 0.0]])
Omega1 = dy.inputTensor([[0.4940, 0.3006],
[0.1383, 0.0671]])
Omega2 = dy.inputTensor([[0.1513, 0.2415],
[0.2545, 0.3527]])
print
print "Going from: "
print Omega1.value()
print "to"
print Omega2.value()
print
alpha = 0
point_indices = []
mi_values = []
increasing = False
decreasing = False
num_turns = 0
for point_index in xrange(args.num_points):
Omega = (1.0 - alpha) * Omega1 + alpha * Omega2
mi_value = info.mi_zero(Omega).value()
point_indices.append(point_index + 1)
mi_values.append(mi_value)
alpha += stepsize
if point_index == 1:
print "point {0}, MI: {1} -> {2}".format(point_index + 1,
mi_value_before,
mi_value),
if mi_value > mi_value_before:
increasing = True
decreasing = False
if mi_value < mi_value_before:
increasing = False
decreasing = True
if increasing:
print "increasing"
if decreasing:
print "decreasing"
elif point_index > 1:
if increasing:
print "point {0} increasing, now MI: {1} -> {2}".format(
point_index + 1, mi_value_before, mi_value),
if mi_value < mi_value_before - epsilon:
increasing = False
decreasing = True
print "inc->dec",
num_turns += 1
print "TURNED {0} times".format(num_turns),
if num_turns == args.turn and not found:
print " ------ FOUND",
found = True
print
if decreasing:
print "point {0} decreasing, now MI: {1} -> {2}".format(
point_index + 1, mi_value_before, mi_value),
if mi_value > mi_value_before + epsilon:
increasing = True
decreasing = False
print "dec->inc",
num_turns += 1
print "TURNED {0} times".format(num_turns),
if num_turns == args.turn and not found:
print " ------ FOUND",
found = True
print
mi_value_before = mi_value
#break
assert len(point_indices) == args.num_points
assert len(mi_values) == args.num_points
plt.plot(point_indices, mi_values)
plt.show()
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("--zsize", type=int, default=2,
help="number of variables: %(default)d")
argparser.add_argument("--num-points", type=int, default=100,
help="number of interpolated points: %(default)d")
argparser.add_argument("--turn", type=int, default=3,
help="number of turns: %(default)d")
argparser.add_argument("--seed", type=int, default=1024,
help="random seed: %(default)d")
parsed_args = argparser.parse_args()
main(parsed_args)
| StarcoderdataPython |
3318693 | <reponame>barni2000/Ion<filename>ionlib/launch.py<gh_stars>1-10
""" Launch a game from Steam """
from . import wine
from . import setup
from .logger import log
from .cmdline import args
from .game import Game
def get_path():
""" Returns the wine path (native or compat) """
command = ['winepath', args.path]
if 'compat' in args.mode:
command.insert(1, '-w')
wine.run(command)
def run(wait=False):
""" Run game fix """
game = Game()
setup.game_setup(game)
log.info(game)
if wait:
wine.server('wait', env=game.env)
wine.run(game.cmd, env=game.env)
| StarcoderdataPython |
3532684 |
class Command:
def __init__(self):
self.cmd = 'H'
self.target = None
self.atk = []
self.sup = []
def __repr__(self):
return '%s (%s vs %s)' % (self.cmd, self.atk, self.sup)
countries = {1: "Russia",
2: "England",
3: "Germany",
4: "France",
5: "Austria",
6: "Italy",
7: "Turkey"}
def move(map_,orders_):
orders = { n: Command() for n, p in map_.provinces.items() if p.unit != None }
ordrs = []
for i in countries:
ordrs = ordrs + orders_[i]
for command in ordrs:
if command[2] == '-':
_, attacker, _, target = command
try:
orders[attacker].cmd = '-'
orders[attacker].target = target
if target in orders:
orders[target].atk.append(attacker)
except KeyError: continue
elif command[2] == 'S':
try:
_, _, supporter, attacker, _, target = command
except ValueError:
_, _, supporter, attacker, _ = command
try:
orders[supporter].cmd = 'S'
orders[target].atk.append(supporter)
orders[attacker].sup.append(supporter)
except KeyError: continue
elif command[2] == 'H':
pass
success = []
fails = []
retreats = []
for p in orders.keys():
if succeeds(p,orders):
success.append(p)
if(orders[p].cmd == '-'):
try:
map_.moveUnit(p,orders[p].target)
except AssertionError:
retreats.append((map_.getUnitByProvince(orders[p].target),orders[p].target)) #unit,prev location
map_.deleteUnit(orders[p].target)
map_.moveUnit(p,orders[p].target)
else:
pass
else:
fails.append(p)
return retreats
def active(p,q):
if p not in q:
return False
c = q[p]
if c.cmd == 'S':
if any([x for x in c.atk if active(x,q)]):
return False
return True
def support(p,q):
return sum([1 for x in q[p].sup if active(x,q)])
def succeeds(p,q):
c = q[p]
if c.cmd == 'H':
if not c.atk:
return True
return max([support(x,q) for x in c.atk]) <= support(p,q)
if c.cmd == '-':
if not active(c.target,q) or (q[c.target].cmd == '-' and succeeds(c.target,q)):
return True
return support(p,q) > support(c.target,q)
if c.cmd == 'S':
return not active(p,q)
def retreat(orders_,retreats):
for u,loc in retreats:
try:
newLoc = None
for i in orders_.values():
if(i[1] == loc):
newLoc = i[3]
if(newLoc):
if(map_.isValidRetreat(u.type, loc, newLoc)):
map_.placeUnit(u.type, u.controllerID, newLoc)
except AssertionError: pass
def build(players,map_):
unitsToBuild = {1:0,2:0,3:0,4:0,5:0,6:0,7:0}
for i in players:
ctry = players[i][1]
units = map_.getUnitsByCountry(ctry)
for loc,u in units:
map_.changeController(loc,ctry)
supplyDepots = len(map_.getOwnedSupplyDepots(ctry))
unitsToBuild[ctry] = supplyDepots - len(units)
return unitsToBuild
def resolveWinterOrders(players, map_, orders, unitsToBuild):
for i in players:
ctry = players[i][1]
if(unitsToBuild[ctry] > 0): #Build Units
unitsBuilt = 0
for i in orders[ctry]:
if(unitsBuilt < unitsToBuild[ctry]):
map_.placeUnit(i[0],ctry,i[1])
unitsBuilt += 1
elif(unitsToBuild[ctry] < 0): #Delete Units
if(orders[ctry] >= unitsToBuild[ctry]):
unitsRemoved = 0
for i in orders[ctry]:
if(unitsRemoved < unitsToBuild[ctry]):
map_.deleteUnit(i[1])
unitsRemoved += 1
else:
unitsRemoved = 0
units = map_.getUnitsByCountry(ctry)
for i in orders[ctry]:
if(unitsRemoved < unitsToBuild[ctry]):
map_.deleteUnit(i[1])
unitsRemoved += 1
for i in units:
if(unitsRemoved < unitsToBuild[ctry]):
map_.deleteUnit(i[1])
unitsRemoved += 1
else: pass #No units to remove or add
| StarcoderdataPython |
3422091 | from scgapi.Scg import Scg
import scgapi.MessageRequest
import scgapi.Contact
import scgapi.ContactGroup
import argparse
def send_sms(api, config, bob_mdn, alice_mdn, senderid):
# Construct an instance of the authentication object
# with authentication data from a json file (auth.json)
auth = scgapi.AuthInfo(config=config)
# Prepare a session to the server.
scg = Scg()
# Prepare a session to the server.
session = scg.connect(auth, api)
# Create some contacts
contacts_res = scgapi.Contact.Resource(session)
bob = contacts_res.create(first_name="Bob", primary_mdn=bob_mdn)
alice = contacts_res.create(first_name="Alice", primary_mdn=alice_mdn)
# Create a group
grp_res = scgapi.ContactGroup.Resource(session)
friends = grp_res.get(grp_res.create(name="Our Friends"))
# add our new friends to the group
friends.add_contact([bob, alice])
# Get a MessageRequest resource
mrq_res = scgapi.MessageRequest.Resource(session)
# Send the message
# Note that from_ has a padding underscore due to
# syntax constraints in Python.
reqid = mrq_res.create(
from_=senderid,
to=["group:" + friends.id],
body="Hello, this is a SMS message to our friends.")
print('Sent message request {} to group {}'.format(reqid, friends.id))
# Clean up
friends.delete()
contacts_res.delete(bob)
contacts_res.delete(alice)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('senderid', help='Sender-id to send from')
parser.add_argument('bob', help='GSM number to send to')
parser.add_argument('alice', help='GSM number to send to')
parser.add_argument('--api', help='URL to the API server', default=None)
parser.add_argument('--auth', help='Location of json auth file', default='auth.json')
args = parser.parse_args()
send_sms(api=args.api, config=args.auth, bob_mdn=args.bob, alice_mdn=args.alice, senderid=args.senderid)
| StarcoderdataPython |
178820 | import PandasPatch
from iago import *
import cp2k
from Analyser import Analyser | StarcoderdataPython |
3254320 | <filename>tensorflow_datasets/text/unifiedqa/unifiedqa_test.py
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UnifiedQA dataset."""
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.text.unifiedqa import unifiedqa
class UnifiedQAAI2ScienceElementaryTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for ai2_science_elementary."""
BUILDER_CONFIG_NAMES_TO_TEST = ['ai2_science_elementary']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'ai2_science_elementary/train.tsv',
'validation': 'ai2_science_elementary/dev.tsv',
'test': 'ai2_science_elementary/test.tsv',
}
class UnifiedQAAI2ScienceMiddleTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for ai2_science_middleai2_science_middle."""
BUILDER_CONFIG_NAMES_TO_TEST = ['ai2_science_middle']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'ai2_science_middle/train.tsv',
'validation': 'ai2_science_middle/dev.tsv',
'test': 'ai2_science_middle/test.tsv',
}
class UnifiedQAAmbigQATest(tfds.testing.DatasetBuilderTestCase):
"""Tests for ambigqa."""
BUILDER_CONFIG_NAMES_TO_TEST = ['ambigqa']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'ambigqa/train.tsv',
'validation': 'ambigqa/dev.tsv',
}
class UnifiedQAARCEasyTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for arc_easy."""
BUILDER_CONFIG_NAMES_TO_TEST = ['arc_easy']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'arc_easy/train.tsv',
'validation': 'arc_easy/dev.tsv',
'test': 'arc_easy/test.tsv',
}
class UnifiedQAARCEasyDevTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for arc_easy_dev."""
BUILDER_CONFIG_NAMES_TO_TEST = ['arc_easy_dev']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'arc_easy_dev/train.tsv',
'validation': 'arc_easy_dev/dev.tsv',
'test': 'arc_easy_dev/test.tsv',
}
class UnifiedQAARCEasyIRTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for arc_easy_with_ir."""
BUILDER_CONFIG_NAMES_TO_TEST = ['arc_easy_with_ir']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'arc_easy_with_ir/train.tsv',
'validation': 'arc_easy_with_ir/dev.tsv',
'test': 'arc_easy_with_ir/test.tsv',
}
class UnifiedQAARCEasyIRDevTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for arc_easy_with_ir_dev."""
BUILDER_CONFIG_NAMES_TO_TEST = ['arc_easy_with_ir_dev']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'arc_easy_with_ir_dev/train.tsv',
'validation': 'arc_easy_with_ir_dev/dev.tsv',
'test': 'arc_easy_with_ir_dev/test.tsv',
}
class UnifiedQAARCHardTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for arc_hard."""
BUILDER_CONFIG_NAMES_TO_TEST = ['arc_hard']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'arc_hard/train.tsv',
'validation': 'arc_hard/dev.tsv',
'test': 'arc_hard/test.tsv',
}
class UnifiedQAARCHardDevTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for arc_hard_dev."""
BUILDER_CONFIG_NAMES_TO_TEST = ['arc_hard_dev']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'arc_hard_dev/train.tsv',
'validation': 'arc_hard_dev/dev.tsv',
'test': 'arc_hard_dev/test.tsv',
}
class UnifiedQAARCHardIRTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for arc_hard_with_ir."""
BUILDER_CONFIG_NAMES_TO_TEST = ['arc_hard_with_ir']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'arc_hard_with_ir/train.tsv',
'validation': 'arc_hard_with_ir/dev.tsv',
'test': 'arc_hard_with_ir/test.tsv',
}
class UnifiedQAARCHardIRDevTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for arc_hard_with_ir_dev."""
BUILDER_CONFIG_NAMES_TO_TEST = ['arc_hard_with_ir_dev']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'arc_hard_with_ir_dev/train.tsv',
'validation': 'arc_hard_with_ir_dev/dev.tsv',
'test': 'arc_hard_with_ir_dev/test.tsv',
}
class UnifiedQABoolQTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for boolq."""
BUILDER_CONFIG_NAMES_TO_TEST = ['boolq']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'boolq/train.tsv',
'validation': 'boolq/dev.tsv',
}
class UnifiedQABoolQNPTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for boolq_np."""
BUILDER_CONFIG_NAMES_TO_TEST = ['boolq_np']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'boolq_np/train.tsv',
'validation': 'boolq_np/dev.tsv',
}
class UnifiedQACQATest(tfds.testing.DatasetBuilderTestCase):
"""Tests for commonsenseqa."""
BUILDER_CONFIG_NAMES_TO_TEST = ['commonsenseqa']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'commonsenseqa/train.tsv',
'validation': 'commonsenseqa/dev.tsv',
'test': 'commonsenseqa/test.tsv',
}
class UnifiedQACQATestTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for commonsenseqa_test."""
BUILDER_CONFIG_NAMES_TO_TEST = ['commonsenseqa_test']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'commonsenseqa_test/train.tsv',
'validation': 'commonsenseqa_test/dev.tsv',
'test': 'commonsenseqa_test/test.tsv',
}
class UnifiedQAContrastSetsBoolQTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for contrast_sets_boolq."""
BUILDER_CONFIG_NAMES_TO_TEST = ['contrast_sets_boolq']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'contrast_sets_boolq/train.tsv',
'validation': 'contrast_sets_boolq/dev.tsv',
}
class UnifiedQAContrastSetsDROPTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for contrast_sets_drop."""
BUILDER_CONFIG_NAMES_TO_TEST = ['contrast_sets_drop']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'contrast_sets_drop/train.tsv',
'validation': 'contrast_sets_drop/dev.tsv',
}
class UnifiedQAContrastSetsQuorefTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for contrast_sets_quoref."""
BUILDER_CONFIG_NAMES_TO_TEST = ['contrast_sets_quoref']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'contrast_sets_quoref/train.tsv',
'validation': 'contrast_sets_quoref/dev.tsv',
}
class UnifiedQAContrastSetsROPESTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for contrast_sets_ropes."""
BUILDER_CONFIG_NAMES_TO_TEST = ['contrast_sets_ropes']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'contrast_sets_ropes/train.tsv',
'validation': 'contrast_sets_ropes/dev.tsv',
}
class UnifiedQADROPTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for drop."""
BUILDER_CONFIG_NAMES_TO_TEST = ['drop']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'drop/train.tsv',
'validation': 'drop/dev.tsv',
}
class UnifiedQAMCTestTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for mctest."""
BUILDER_CONFIG_NAMES_TO_TEST = ['mctest']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'mctest/train.tsv',
'validation': 'mctest/dev.tsv',
}
class UnifiedQAMCTestCorrectedTheSeparatorTest(
tfds.testing.DatasetBuilderTestCase):
"""Tests for mctest_corrected_the_separator."""
BUILDER_CONFIG_NAMES_TO_TEST = ['mctest_corrected_the_separator']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'mctest_corrected_the_separator/train.tsv',
'validation': 'mctest_corrected_the_separator/dev.tsv',
}
class UnifiedQAMultiRCTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for multirc."""
BUILDER_CONFIG_NAMES_TO_TEST = ['multirc']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'multirc/train.tsv',
'validation': 'multirc/dev.tsv',
}
class UnifiedQANarrativeQATest(tfds.testing.DatasetBuilderTestCase):
"""Tests for narrativeqa."""
BUILDER_CONFIG_NAMES_TO_TEST = ['narrativeqa']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'narrativeqa/train.tsv',
'validation': 'narrativeqa/dev.tsv',
'test': 'narrativeqa/test.tsv',
}
class UnifiedQANarrativeQADevTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for narrativeqa_dev."""
BUILDER_CONFIG_NAMES_TO_TEST = ['narrativeqa_dev']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'narrativeqa_dev/train.tsv',
'validation': 'narrativeqa_dev/dev.tsv',
'test': 'narrativeqa_dev/test.tsv',
}
class UnifiedQANatQATest(tfds.testing.DatasetBuilderTestCase):
"""Tests for natural_questions."""
BUILDER_CONFIG_NAMES_TO_TEST = ['natural_questions']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'natural_questions/train.tsv',
'validation': 'natural_questions/dev.tsv',
}
class UnifiedQANatQADirectAnsTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for natural_questions_direct_ans."""
BUILDER_CONFIG_NAMES_TO_TEST = ['natural_questions_direct_ans']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'natural_questions_direct_ans/train.tsv',
'validation': 'natural_questions_direct_ans/dev.tsv',
'test': 'natural_questions_direct_ans/test.tsv',
}
class UnifiedQANatQADirectAnsTestTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for natural_questions_direct_ans_test."""
BUILDER_CONFIG_NAMES_TO_TEST = ['natural_questions_direct_ans_test']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'natural_questions_direct_ans_test/train.tsv',
'validation': 'natural_questions_direct_ans_test/dev.tsv',
'test': 'natural_questions_direct_ans_test/test.tsv',
}
class UnifiedQANatQADPRParaTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for natural_questions_with_dpr_para."""
BUILDER_CONFIG_NAMES_TO_TEST = ['natural_questions_with_dpr_para']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'natural_questions_with_dpr_para/train.tsv',
'validation': 'natural_questions_with_dpr_para/dev.tsv',
}
class UnifiedQANatQADPRParaTestTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for natural_questions_with_dpr_para_test."""
BUILDER_CONFIG_NAMES_TO_TEST = ['natural_questions_with_dpr_para_test']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'natural_questions_with_dpr_para_test/train.tsv',
'test': 'natural_questions_with_dpr_para_test/test.tsv',
}
class UnifiedQANewsQATest(tfds.testing.DatasetBuilderTestCase):
"""Tests for newsqa."""
BUILDER_CONFIG_NAMES_TO_TEST = ['newsqa']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'newsqa/train.tsv',
'validation': 'newsqa/dev.tsv',
}
class UnifiedQAOBQATest(tfds.testing.DatasetBuilderTestCase):
"""Tests for openbookqa."""
BUILDER_CONFIG_NAMES_TO_TEST = ['openbookqa']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'openbookqa/train.tsv',
'validation': 'openbookqa/dev.tsv',
'test': 'openbookqa/test.tsv',
}
class UnifiedQAOBQADevTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for openbookqa_dev."""
BUILDER_CONFIG_NAMES_TO_TEST = ['openbookqa_dev']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'openbookqa_dev/train.tsv',
'validation': 'openbookqa_dev/dev.tsv',
'test': 'openbookqa_dev/test.tsv',
}
class UnifiedQAOBQAIRTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for openbookqa_with_ir."""
BUILDER_CONFIG_NAMES_TO_TEST = ['openbookqa_with_ir']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'openbookqa_with_ir/train.tsv',
'validation': 'openbookqa_with_ir/dev.tsv',
'test': 'openbookqa_with_ir/test.tsv',
}
class UnifiedQAOBQAIRDevTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for openbookqa_with_ir_dev."""
BUILDER_CONFIG_NAMES_TO_TEST = ['openbookqa_with_ir_dev']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'openbookqa_with_ir_dev/train.tsv',
'validation': 'openbookqa_with_ir_dev/dev.tsv',
'test': 'openbookqa_with_ir_dev/test.tsv',
}
class UnifiedQAPIQATest(tfds.testing.DatasetBuilderTestCase):
"""Tests for physical_iqa."""
BUILDER_CONFIG_NAMES_TO_TEST = ['physical_iqa']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'physical_iqa/train.tsv',
'validation': 'physical_iqa/dev.tsv',
}
class UnifiedQAQASCTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for qasc."""
BUILDER_CONFIG_NAMES_TO_TEST = ['qasc']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'qasc/train.tsv',
'validation': 'qasc/dev.tsv',
'test': 'qasc/test.tsv',
}
class UnifiedQAQASCTestTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for qasc_test."""
BUILDER_CONFIG_NAMES_TO_TEST = ['qasc_test']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'qasc_test/train.tsv',
'validation': 'qasc_test/dev.tsv',
'test': 'qasc_test/test.tsv',
}
class UnifiedQAQASCIRTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for qasc_with_ir."""
BUILDER_CONFIG_NAMES_TO_TEST = ['qasc_with_ir']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'qasc_with_ir/train.tsv',
'validation': 'qasc_with_ir/dev.tsv',
'test': 'qasc_with_ir/test.tsv',
}
class UnifiedQAQASCIRTestTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for qasc_with_ir_test."""
BUILDER_CONFIG_NAMES_TO_TEST = ['qasc_with_ir_test']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'qasc_with_ir_test/train.tsv',
'validation': 'qasc_with_ir_test/dev.tsv',
'test': 'qasc_with_ir_test/test.tsv',
}
class UnifiedQAQuorefTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for quoref."""
BUILDER_CONFIG_NAMES_TO_TEST = ['quoref']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'quoref/train.tsv',
'validation': 'quoref/dev.tsv',
}
class UnifiedQARACEStringDevTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for race_string_dev."""
BUILDER_CONFIG_NAMES_TO_TEST = ['race_string_dev']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'race_string_dev/train.tsv',
'validation': 'race_string_dev/dev.tsv',
'test': 'race_string_dev/test.tsv',
}
class UnifiedQARACEStringTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for race_string."""
BUILDER_CONFIG_NAMES_TO_TEST = ['race_string']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'race_string/train.tsv',
'validation': 'race_string/dev.tsv',
'test': 'race_string/test.tsv',
}
class UnifiedQAROPESTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for ropes."""
BUILDER_CONFIG_NAMES_TO_TEST = ['ropes']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'ropes/train.tsv',
'validation': 'ropes/dev.tsv',
}
class UnifiedQASIQATest(tfds.testing.DatasetBuilderTestCase):
"""Tests for social_iqa."""
BUILDER_CONFIG_NAMES_TO_TEST = ['social_iqa']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'social_iqa/train.tsv',
'validation': 'social_iqa/dev.tsv',
}
class UnifiedQASQuAD11Test(tfds.testing.DatasetBuilderTestCase):
"""Tests for squad1_1."""
BUILDER_CONFIG_NAMES_TO_TEST = ['squad1_1']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'squad1_1/train.tsv',
'validation': 'squad1_1/dev.tsv',
}
class UnifiedQASQuAD20Test(tfds.testing.DatasetBuilderTestCase):
"""Tests for squad2."""
BUILDER_CONFIG_NAMES_TO_TEST = ['squad2']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'squad2/train.tsv',
'validation': 'squad2/dev.tsv',
}
class UnifiedQAWGLTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for winogrande_l."""
BUILDER_CONFIG_NAMES_TO_TEST = ['winogrande_l']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'winogrande_l/train.tsv',
'validation': 'winogrande_l/dev.tsv',
}
class UnifiedQAWGMTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for winogrande_m."""
BUILDER_CONFIG_NAMES_TO_TEST = ['winogrande_m']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
}
DL_EXTRACT_RESULT = {
'train': 'winogrande_m/train.tsv',
'validation': 'winogrande_m/dev.tsv',
}
class UnifiedQAWGSTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for winogrande_s."""
BUILDER_CONFIG_NAMES_TO_TEST = ['winogrande_s']
DATASET_CLASS = unifiedqa.UnifiedQA
SPLITS = {
'train': 3, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'winogrande_s/train.tsv',
'validation': 'winogrande_s/dev.tsv',
'test': 'winogrande_s/test.tsv',
}
if __name__ == '__main__':
tfds.testing.test_main()
| StarcoderdataPython |
6419676 | <reponame>igg-bioinfo/CKG
############################################
# REFLECT ontologies - DO, BTO, STITCH, GO #
############################################
def parser(files, filters, qtype = None):
"""
Parses and extracts relevant data from REFLECT ontologies: Disease Ontology, Tissues, STITCH and \
Gene Ontology databases.
:param list files: list of files downloaded from an ontology and used to generate nodes and relationships in the graph database.
:param list filters: list of ontology identifiers to be ignored.
:param int qtype: ontology type code.
:return: Three nested dictionaries: terms, relationships between terms, and definitions of the terms.
- terms: Dictionary where each key is an ontology dentifier (*str*) and the values are lists of names and synonyms (*list[str]*).
- relationships: Dictionary of tuples (*str*). Each tuple contains two ontology identifiers (source and target) and \
the relationship type between them.
- definitions: Dictionary with ontology dentifiers as keys (*str*), and definition of the terms as values (*str*).
"""
entity = {}
terms = defaultdict(list)
relationships = set()
definitions = defaultdict()
for f in files:
with open(f, 'r') as fh:
if "entities" in f:
for line in fh:
data = line.rstrip("\r\n").split("\t")
if data[1] == str(qtype) or qtype is None:
entity[data[0]] = data[2]
if "names" in f:
for line in fh:
data = line.rstrip("\r\n").split("\t")
if data[0] in entity:
code = entity[data[0]]
term = data[1]
if len(data)<=2 or int(data[2]) == 1:
terms[code].insert(0,term)
elif int(data[2]) != 2:
terms[code].append(term)
definitions[code] = term
if "groups" in f:
for line in fh:
data = line.rstrip("\r\n").split("\t")
if data[0] in entity and data[1] in entity:
sourceID = entity[data[0]] #child
destinationID = entity[data[1]] #parent
relationships.add((sourceID, destinationID, "HAS_PARENT"))
if "texts" in f:
for line in fh:
data = line.rstrip("\r\n").split("\t")
if data[0] in entity:
code = entity[data[0]]
definition = data[1]
definitions[code] = definition.replace('\n', ' ').replace('"', '').replace('\\', '')
return terms, relationships, definitions
| StarcoderdataPython |
3464205 | #############################################################################
# Copyright (c) 2018 <NAME>. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
#
#############################################################################
import pkgutil
def get_text_resource(path):
"""
Fetch a resource text file.
Args:
path (str): The path of the resource relative to this package.
Returns:
str: The resource as a character string.
"""
return get_binary_resource(path).decode('UTF-8', 'ignore')
def get_binary_resource(path):
"""
Fetch a resource binary file.
Args:
path (str): The path of the resource relative to this package.
Returns:
str: The resource as a binary string.
"""
return pkgutil.get_data(__name__, path)
| StarcoderdataPython |
1839149 | # ======================================================================================================================
# KIV auxiliary functions: based on matlab codes of the authors
# https://github.com/r4hu1-5in9h/KIV
# ======================================================================================================================
import numpy as np
import os
from scipy import optimize
def make_psd(A):
""" for numerical stability, add a small ridge to a symmetric matrix """
# shape check: A should be a square matrix
if A.shape[0] != A.shape[1]:
raise TypeError('input matrix should be a square matrix')
eps = 1e-10
N = A.shape[0]
A_psd = (A + A.T) / 2 + eps * np.eye(N)
return A_psd
def data_split(X, Y, Z, frac):
""" splits the data in two parts according to a fraction """
# shape check: if X/Z is a vector => convert into a matrix
if len(X.shape) == 1:
X = X.reshape(len(X), 1)
if len(Z.shape) == 1:
Z = Z.reshape(len(Z), 1)
# splitting
N = len(Y)
n = int(np.round(frac * N))
X1, X2 = X[0:n, :], X[n:N, :]
Z1, Z2 = Z[0:n, :], Z[n:N, :]
Y1, Y2 = Y[0:n], Y[n:N]
# output
df = {'X1': X1, 'X2': X2, 'Z1': Z1, 'Z2': Z2, 'Y1': Y1, 'Y2': Y2}
return df
def med_inter(X):
"""
:param X: input vector
:return: median interpoint distance to use as the bandwidth
"""
n_x = len(X)
A = np.repeat(X.reshape(n_x, 1), n_x, axis=1)
dist = np.abs(A - A.T).reshape(-1)
v = np.median(dist)
return v
def get_Kmat(X, Y, v):
"""
returns the covariance matrix for the noiseless GP with RBF kernel at inputs X and Y
:param X, Y: vectors of dim n_x and n_y
:param v: bandwidth
"""
n_x = len(X)
n_y = len(Y)
K_true = np.empty((n_x, n_y))
# fill in the matrix
for i in range(n_x):
for j in range(n_y):
K_true[i, j] = np.exp(-np.sum((X[i] - Y[j]) ** 2) / (2 * (v ** 2)))
return K_true
def get_Kmat_mult(X, Y, v_vec):
"""
calculates a multivariate RBF kernel as a product of scalar products of each column of X
:param X and Y: matrices
:param v_vec: vector of bandwidths
"""
# shape check: if X/Y is a vector => convert into a matrix
if len(X.shape) == 1:
X = X.reshape(len(X), 1)
if len(Y.shape) == 1:
Y = Y.reshape(len(Y), 1)
# shape check: the number of columns should be the same
if X.shape[1] != Y.shape[1]:
raise TypeError('number of columns of input matrices must coincide')
n_x = X.shape[0]
n_y = Y.shape[0]
d = X.shape[1]
# calculate the kernel
K_true = np.ones((n_x, n_y))
for j in range(d):
K_j = get_Kmat(X[:, j], Y[:, j], v_vec[j])
K_true = np.multiply(K_true, K_j)
return K_true
def get_K(X, Z, Y, X_test):
"""
Precalculates kernel matrices for the 1st and 2nd stages
:param X: endogenous regressors
:param Z: IVs
:param Y: response variable
:param X_test: test sample
:return: data dictionary
"""
# shape check: if X/Z is a vector => convert into a matrix
if len(X.shape) == 1:
X = X.reshape(len(X), 1)
if len(Z.shape) == 1:
Z = Z.reshape(len(Z), 1)
# shape check: if oos_type is point, then X_test is d_x by 1 a vector => into [1, d_x] matrix
if len(X_test.shape) == 1:
X_test = X_test.reshape(1, len(X_test))
# bandwidths
v_x = np.array([med_inter(X[:, j]) for j in range(X.shape[1])])
v_z = np.array([med_inter(Z[:, j]) for j in range(Z.shape[1])])
# split the data
df = data_split(X, Y, Z, frac=0.5)
# calculate kernels
K_XX = get_Kmat_mult(df['X1'], df['X1'], v_x)
K_xx = get_Kmat_mult(df['X2'], df['X2'], v_x)
K_xX = get_Kmat_mult(df['X2'], df['X1'], v_x)
K_Xtest = get_Kmat_mult(df['X1'], X_test, v_x)
K_ZZ = get_Kmat_mult(df['Z1'], df['Z1'], v_z)
K_Zz = get_Kmat_mult(df['Z1'], df['Z2'], v_z)
# output
df_out = {'K_XX': K_XX, 'K_xx': K_xx, 'K_xX': K_xX, 'K_Xtest': K_Xtest,
'K_ZZ': K_ZZ, 'K_Zz': K_Zz, 'Y1': df['Y1'], 'Y2': df['Y2']}
return df_out
def KIV_pred(df, hyp, stage):
"""
:param df: data frame produced by get_K
:param hyp: hyperparameters
:param stage: stage=(2,3) corresponds to stage 2 and testing
:return: predictive mean for KIV
"""
n = len(df['Y1'])
m = len(df['Y2'])
lam = hyp[0]
xi = hyp[1]
brac = make_psd(df['K_ZZ']) + lam * np.eye(n) * n
W = df['K_XX'] @ np.linalg.inv(brac) @ df['K_Zz']
brac2 = make_psd(W @ W.T) + m * xi * make_psd(df['K_XX'])
alpha = np.linalg.inv(brac2) @ W @ df['Y2']
if stage == 2:
K_Xtest = df['K_XX']
elif stage == 3:
K_Xtest = df['K_Xtest']
else:
os.exit('stage should be equal to either 2 or 3')
y_pred = (alpha.T @ K_Xtest).flatten()
return y_pred
def KIV1_loss(df, lam):
"""
:param df: data frame produced by get_K
:param lam: 1st stage hyperparameter
:return: 1st stage error of KIV
"""
n = len(df['Y1'])
m = len(df['Y2'])
brac = make_psd(df['K_ZZ']) + lam * np.eye(n) * n
gamma = np.linalg.inv(brac) @ df['K_Zz']
loss = np.trace(df['K_xx'] - 2 * df['K_xX'] @ gamma + gamma.T @ df['K_XX'] @ gamma) / m
return loss
def KIV2_loss(df, hyp):
"""
:param df: data frame produced by get_K
:param hyp: hyperparameters
:return: 2nd stage error of KIV
"""
n = len(df['Y1'])
Y1_pred = KIV_pred(df, hyp, 2)
loss = np.sum((df['Y1'] - Y1_pred) ** 2) / n
return loss
def get_KIV(data, X_test):
"""
This function estimates the model using KIV and provides out of sample estimates
:param data: a dictionary, which is a tuple (X, Y, Z)
:param X_test: out of sample data
:return: out of sample estimates
"""
X, Y, Z = data['X'], data['Y'], data['Z']
# 1. calculate kernels
df = get_K(X, Z, Y, X_test)
# 2. initialize hyperparameters for tuning
lam_0 = np.log(0.05)
xi_0 = np.log(0.05)
# 3. 1st stage tuning
KIV1_obj = lambda lam: KIV1_loss(df, np.exp(lam))
lam_star = optimize.fmin(KIV1_obj, lam_0)
# 4. 2nd stage tuning
KIV2_obj = lambda xi: KIV2_loss(df, [np.exp(lam_star), np.exp(xi)])
xi_star = optimize.fmin(KIV2_obj, xi_0)
# 5. evaluate out of sample using tuned hyperparameters
Y_oos = KIV_pred(df, [np.exp(lam_star), np.exp(xi_star)], stage=3)
return Y_oos
| StarcoderdataPython |
12810699 | from typing import Callable, Optional, Type, TypeVar
import punq
T = TypeVar("T")
class Container:
"""
Implements a configurable DI container.
"""
def __init__(
self, configure: Callable[["Container"], None] = lambda _: None
) -> None:
self._impl: Optional[punq.Container] = None
self._configure = configure
def bootstrap(self) -> None:
self._impl = punq.Container()
self._configure(self)
def register_instance(self, type_: Type[T], instance: T) -> None:
"""
Register a singleton instance.
"""
assert self._impl is not None
self._impl.register(type_, instance=instance)
def resolve(self, type_: Type[T]) -> T:
if self._impl is None:
raise RuntimeError("Container not ready. Please call bootstrap()")
try:
return self._impl.resolve(type_)
except punq.MissingDependencyError:
raise RuntimeError(f"Failed to resolve implementation for {type_}")
| StarcoderdataPython |
1883804 | from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db import transaction
import datetime as dt
import simplejson as json
from django.http import JsonResponse
import requests
from django.conf import settings
# from .models import LaminateFlooring
from django.http import HttpResponse, Http404, HttpResponseRedirect
from .models import Resume
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
# ////////////////////////////////////////////////////////////////////////////
def view_github_repos(request):
user = requests.get('https://api.github.com/users/lucasLB7?access_token={}'.format(settings.GITHUB_API))
user_content = json.loads(user.content)
repos = requests.get('https://api.github.com/users/lucasLB7/repos?sort=asc&access_token={}'.format(settings.GITHUB_API))
repo_content = json.loads(repos.content)
page = request.GET.get('page', 1)
paginator = Paginator(repo_content, 10)
try:
content = paginator.page(page)
except PageNotAnInteger:
content = paginator.page(1)
except EmptyPage:
content = paginator.page(paginator.num_pages)
return render(request, 'repositories/view-repos.html' , {"repos_details":content , "user_details":user_content})
# ////////////////////////////////////////////////////////////////////////////////
def homepage(request):
return render(request, 'home/main.html')
# ////////////////////////////////////////////////////////////////////////////////
def view_python(request):
python_repos = Resume.objects.all()
return render(request, 'python/main.html' , {"python":python_repos})
# ////////////////////////////////////////////////////////////////////////////////
def view_html(request):
html_repos = Resume.objects.all()
return render(request, 'html/main.html' , {"HTML":html_repos})
def view_blender(request):
blender_repos = Resume.objects.all()
return render(request, 'blender/main.html' , {"blender":blender_repos})
def view_all(request):
blender_repos = Resume.objects.all()
return render(request, 'view_all/main.html' , {"blender":blender_repos})
def about_me(request):
return render(request, 'about_me/main.html') | StarcoderdataPython |
5129128 | #coding:utf-8
# 向量搜索 暴力算法
import numpy as np
import time
from scipy.spatial.distance import cosine
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import pairwise_distances
# 把字向量转化为句向量,简单相加
def seg_vector (txt, dict_vector, emb_size=768):
seg_v = np.zeros(emb_size)
for w in txt:
if w in dict_vector.keys():
v = dict_vector[w]
seg_v += v
return seg_v
# 余弦相似度各种算法: CosSim_dot最快
def CosSim(a, b):
return 1-cosine(a, b)
def CosSim_sk(a,b):
score = cosine_similarity([a,b])[0,1]
return score
CosSim_dot = lambda a,b : np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def CosSim_np (a, b):
a = np.mat(a)
b = np.mat(b)
num = float(a.T * b) #若为行向量则 A * B.T
#num = float(a * b.T)
denom = np.linalg.norm(a) * np.linalg.norm(b)
cos = num / denom #余弦值
#sim = 0.5 + 0.5 * cos
sim = 1 - cos
return sim
'''
def cosine(q,a):
pooled_len_1 = tf.sqrt(tf.reduce_sum(q * q, 0))#.to(device)
pooled_len_2 = tf.sqrt(tf.reduce_sum(a * a, 0))#.to(device)
pooled_mul_12 = tf.reduce_sum(q * a, 0)#.to(device)
score = tf.div(pooled_mul_12, pooled_len_1 * pooled_len_2 +1e-8, name="scores")#.to(device)
with tf.Session() as sess:
cos = sess.run(score)#.to(device)
return cos
# 在TF上计算余弦相似度
def get_cos_distance(X1, X2):
# calculate cos distance between two sets
# more similar more big
(k,) = X1.shape
(m,) = X2.shape
# 求模
X1_norm = tf.sqrt(tf.reduce_sum(tf.square(X1), axis=1))
X2_norm = tf.sqrt(tf.reduce_sum(tf.square(X2), axis=1))
# 内积
X1_X2 = tf.matmul(X1, tf.transpose(X2))
X1_X2_norm = tf.matmul(tf.reshape(X1_norm,[k,1]),tf.reshape(X2_norm,[1,m]))
# 计算余弦距离
cos = X1_X2/X1_X2_norm
return cos
'''
# 向量搜索类
class VecSearch:
def __init__(self):
self.dicts = {}
# 返回当前总共有多少个值
def curr_items ():
return len(self.dicts)
# 添加文档
def add_doc (self, key, vector):
self.dicts[key] = vector
# 查找向量,
# 返回结果为 距离[D], 索引[I]
def search(self, query, top=5):
# 返回结果,结构为:[sim, key]
ret = np.zeros((top,2))
# 计算余弦相似度最大值
for key, value in self.dicts.items():
sim = CosSim_dot(query, value)
#sim = CosSim(query, value)
#sim = CosSim_sk(query, value)
#sim = cosine(query, value)
#print(sim)
if sim > ret[top-1][0]:
b = np.array([[sim, key]]).astype('float32')
ret = np.insert(ret, 0, values=b, axis=0)
# 重新排序后截取
idex = np.lexsort([-1*ret[:,0]])
ret = ret[idex, :]
ret = ret[:top,]
#print(ret)
#print('-'*40)
return ret[:,0], ret[:,1].astype('int')
#-----------------------------------------
# 测试
def test ():
np.random.seed(1234) # make reproducible
print('大批量向量余弦相似度计算-[暴力版]'.center(40,'='), flush=True)
# 随机生成10万个向量
total = 100000
dim = 768
print('随机生成%d个向量,维度:%d' % (total, dim), flush=True)
#rng = np.random.RandomState(0)
#X = rng.random_sample((total, dim))
X = np.random.random((total, dim))
X[:, 0] += np.arange(total) / 1000.
#print('前10个向量为:')
#print(X[:10])
print('正在创建搜索器...')
start = time.time()
# 创建搜索器
vs = VecSearch()
# 把向量添加到搜索器
for i in range(total):
vs.add_doc(i, X[i])
end = time.time()
total_time = end - start
print('添加用时:%4f秒' % total_time)
# 查看当前进程使用的内存情况
import os,psutil
process = psutil.Process(os.getpid())
print('Used Memory:',process.memory_info().rss / 1024 / 1024,'MB')
# 进行测试
print('单条查询测试'.center(40,'-'))
test_times = 100
#Q = rng.random_sample((test_times, dim))
Q = np.random.random((test_times, dim))
Q[:, 0] += np.arange(test_times) / 1000.
q = Q[0]
D, I = vs.search(q)
#print('索引号:%d, 余弦相似度:%f' % r)
print('搜索结果:', D, I)
# 显示详细结果
def showdetail (X,q,D,I):
print('显示查询结果,并验证余弦相似度...')
for i, v in enumerate(I):
#np.squeeze(X[v])
#c = CosSim_dot(Q[0], X[v])
r = (v, D[i]) # CosSim_dot(Q[0], X[v]), #
print('索引号:%5d, 距离:%f' % r ) #, 余弦相似度:%f
#rv = X[v][:10]
#print('\n查询结果(超长只显示前10维:%s' % rv)
showdetail (X,q,D,I)
print('批量查询测试'.center(40,'-'))
start = time.time()
print('批量测试次数:%d 次,请稍候...' % test_times )
for i in range(test_times):
r = vs.search(Q[i])
end = time.time()
#print((end-start), (end-start)/test_times)
total_time = end - start
print('总用时:%d 秒, 平均用时:%4f 毫秒' % (total_time, total_time*1000/test_times) )
return
# 人工测试
while 1:
print('-'*40)
txt = input("回车开始测试(Q退出):").strip()
if txt.upper()=='Q': break
# 随机生成一个向量
print('随机生成一个查询向量...')
q = rng.random_sample(dim)
print("query:%s..." % q[:10])
# 查询
start = time.time()
r = vs.search(q)
print('查询结果:')
print('索引号:%d,相似度:%f' % r) # , X[r]
end = time.time()
total_time = end - start
print('总用时:%d 秒, 平均用时:%4f 毫秒' % (total_time, total_time*1000) )
if __name__ == '__main__':
test() | StarcoderdataPython |
1852399 | <reponame>LorbusChris/packit
"""
This is the official python interface for source-git. This is used exclusively in the CLI.
"""
import logging
import os
from functools import lru_cache
from typing import Any, Dict
import requests
from packit.fed_mes_consume import Consumerino
from packit.sync import Synchronizer
from packit.watcher import SourceGitCheckHelper
logger = logging.getLogger(__name__)
class SourceGitAPI:
def __init__(self):
# TODO: the url template should be configurable
self.datagrepper_url = (
"https://apps.fedoraproject.org/datagrepper/id?id={msg_id}&is_raw=true"
)
self.consumerino = Consumerino()
def fetch_fedmsg_dict(self, msg_id: str) -> Dict[str, Any]:
"""
Fetch selected message from datagrepper
:param msg_id: str
:return: dict, the fedmsg
"""
logger.debug(f"Proccessing message: {msg_id}")
url = self.datagrepper_url.format(msg_id=msg_id)
response = requests.get(url)
msg_dict = response.json()
return msg_dict
def sync_upstream_pr_to_distgit(self, fedmsg_dict: Dict[str, Any]) -> None:
"""
Take the input fedmsg (github push or pr create) and sync the content into dist-git
:param fedmsg_dict: dict, code change on github
"""
logger.info("syncing the upstream code to downstream")
with Synchronizer(
self.github_token,
self.pagure_user_token,
self.pagure_package_token,
self.pagure_fork_token,
) as sync:
sync.sync_using_fedmsg_dict(fedmsg_dict)
def keep_syncing_upstream_pulls(self) -> None:
"""
Watch Fedora messages and keep syncing upstream PRs downstream. This runs forever.
"""
with Synchronizer(
self.github_token,
self.pagure_user_token,
self.pagure_package_token,
self.pagure_fork_token,
) as sync:
for topic, action, msg in self.consumerino.iterate_gh_pulls():
# TODO:
# handle edited (what's that?)
# handle closed (merged & not merged)
if action in ["opened", "synchronize", "reopened"]:
sync.sync_using_fedmsg_dict(msg)
def process_ci_result(self, fedmsg_dict: Dict[str, Any]) -> None:
"""
Take the CI result, figure out if it's related to source-git and if it is, report back to upstream
:param fedmsg_dict: dict, flag added in pagure
"""
sg = SourceGitCheckHelper(self.github_token, self.pagure_user_token)
sg.process_new_dg_flag(fedmsg_dict)
def keep_fwding_ci_results(self) -> None:
"""
Watch Fedora messages and keep reporting CI results back to upstream PRs. This runs forever.
"""
for topic, msg in self.consumerino.iterate_dg_pr_flags():
self.process_ci_result(msg)
@property
@lru_cache()
def github_token(self) -> str:
return os.environ["GITHUB_TOKEN"]
@property
@lru_cache()
def pagure_user_token(self) -> str:
return os.environ["PAGURE_USER_TOKEN"]
@property
@lru_cache()
def pagure_package_token(self) -> str:
""" this token is used to comment on pull requests """
# FIXME: make this more easier to be used -- no need for a dedicated token
return os.environ["PAGURE_PACKAGE_TOKEN"]
@property
@lru_cache()
def pagure_fork_token(self) -> str:
""" this is needed to create pull requests """
# FIXME: make this more easier to be used -- no need for a dedicated token
return os.environ["PAGURE_FORK_TOKEN"]
| StarcoderdataPython |
4988332 | <gh_stars>0
import cv2
from talking_color.camera.camera import Camera, CAMERA_WIDTH, CAMERA_HEIGHT
class Webcam(Camera):
"""
Adapted from OpenCV-Python Tutorials
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
"""
def __init__(self, window_name="Webcam", algorithm=None):
super().__init__(window_name, algorithm)
# define webcam input
self.capture = cv2.VideoCapture(0)
# define window size
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT)
def process_video(self):
# begin frame loop
while True:
_, frame = self.capture.read()
# annotate frame
new_frame = self.algorithm.run(frame).labelled_frame
# draw output of webcam
cv2.imshow(self.window_name, new_frame)
# allow exit when 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def process_image(self):
# capture image and get np array
_, frame = self.capture.read()
# process image
frame = self.algorithm.run(frame).labelled_frame
# display the image on screen
cv2.imshow("Image", frame)
# also output with text and audio
self.output_sound(frame)
# allow exit when any key is pressed
print("Press any key to exit.")
cv2.waitKey(0)
def destroy(self):
# release the capture
self.capture.release()
| StarcoderdataPython |
1748160 | import json
import os
import random
import string
from pathlib import Path
from eth_typing import ChecksumAddress
from eth_utils import to_checksum_address
PATH_CONFIG = Path("/opt/synapse/config/synapse.yaml")
PATH_CONFIG_TEMPLATE = Path("/opt/synapse/config/synapse.template.yaml")
PATH_MACAROON_KEY = Path("/opt/synapse/data/keys/macaroon.key")
PATH_ADMIN_USER_CREDENTIALS = Path("/opt/synapse/config/admin_user_cred.json")
PATH_KNOWN_FEDERATION_SERVERS = Path("/opt/synapse/data/known_federation_servers.yaml")
PATH_WELL_KNOWN_FILE = Path("/opt/synapse/data_well_known/server")
def get_macaroon_key() -> str:
if not PATH_MACAROON_KEY.exists():
alphabet = string.digits + string.ascii_letters + "!@#$%^&*()_-=+{}[]"
macaroon = "".join(random.choice(alphabet) for _ in range(30))
PATH_MACAROON_KEY.write_text(macaroon)
else:
macaroon = PATH_MACAROON_KEY.read_text()
return macaroon
def render_synapse_config(
server_name: str,
eth_rpc_url: str,
service_registry_address: ChecksumAddress,
) -> None:
template_content = PATH_CONFIG_TEMPLATE.read_text()
rendered_config = string.Template(template_content).substitute(
MACAROON_KEY=get_macaroon_key(),
SERVER_NAME=server_name,
ETH_RPC=eth_rpc_url,
SERVICE_REGISTRY=service_registry_address,
)
PATH_CONFIG.write_text(rendered_config)
def render_well_known_file(server_name: str) -> None:
content = {"m.server": f"{server_name}:443"}
PATH_WELL_KNOWN_FILE.write_text(json.dumps(content, indent=2))
def generate_admin_user_credentials():
"""
Generate the username "admin-{server-name}" and a random password combination
that will be used by various tools in the
package to authenticate as an admin user via the ``AdminUserAuthProvider``.
"""
if PATH_ADMIN_USER_CREDENTIALS.exists():
return
username = f"admin-{os.environ['SERVER_NAME']}"
password = "".join(random.choice(string.digits + string.ascii_lowercase) for _ in range(30))
PATH_ADMIN_USER_CREDENTIALS.write_text(
json.dumps({"username": username, "password": password})
)
def main() -> None:
server_name = os.environ["SERVER_NAME"]
eth_rpc_url = os.environ["ETH_RPC"]
service_registry_address = to_checksum_address(os.environ["SERVICE_REGISTRY"])
render_synapse_config(
server_name=server_name,
eth_rpc_url=eth_rpc_url,
service_registry_address=service_registry_address,
)
render_well_known_file(server_name=server_name)
generate_admin_user_credentials()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1934407 | """
QSS style sheets.
"""
| StarcoderdataPython |
3215891 | <reponame>mohnbroetchen2/cykel_jenarad
# Generated by Django 2.2.4 on 2020-03-17 19:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("bikesharing", "0018_auto_20200204_2147"),
]
operations = [
migrations.AddField(
model_name="locationtracker",
name="tracker_status",
field=models.CharField(
choices=[
("AC", "Active"),
("IN", "Inactive"),
("MI", "Missing"),
("DE", "Decommissioned"),
],
default="IN",
max_length=2,
),
),
migrations.AlterField(
model_name="location",
name="bike",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="bikesharing.Bike",
),
),
]
| StarcoderdataPython |
8114294 | import random
import cv2
import numpy as np
from augraphy.base.augmentation import Augmentation
class PageBorder(Augmentation):
"""Add border effect to sides of input image.
:param side: One of the four sides of page i:e top,right,left,bottom.
By default it is "left"
:type side: string , optional
:param width_range: Pair of ints determining the range from of the page border
:type width_range: tuple, optional
:param pages: An integer determining the number of page shadows in the border
:type pages: int , optional
:param noise_intensity_range: A pair of floats determining the intensity of
noise being applied around the borders.
:type noise_intensity_range: tuple , optional
:param p: The probability this Augmentation will be applied.
:type p: float, optional
"""
def __init__(
self,
side="random",
width_range=(5, 30),
pages=None,
noise_intensity_range=(0.2, 0.5),
p=1,
):
"""Constructor method"""
super().__init__(p=p)
self.side = side
self.width_range = width_range
self.pages = pages
self.noise_intensity_range = noise_intensity_range
def __repr__(self):
return f"PageBorder(width_range={self.width_range}, pages={self.pages}, noise_intensity_range={self.noise_intensity_range}, p={self.p})"
def add_corner_noise(self, border, intensity=0.2):
ksize = (5, 5)
blur = cv2.blur(border, ksize)
edge = cv2.Canny(blur, 100, 200)
Y, X = edge.shape
idx_list = np.where(edge == 255)
for i in range(len(idx_list[0])):
x = idx_list[0][i]
y = idx_list[1][i]
reps = random.randint(1, 3)
for i in range(reps):
if random.random() < intensity:
d = int(random.uniform(1, 5))
border[x, min(X - 1, y + d)] = 0
d = int(random.uniform(1, 5))
border[x, max(0, y - d)] = 0
return border
def create_border(
self,
channel,
border_width,
border_height,
num_pages=None,
noise_intensity=0.2,
):
pad = 0
if channel > 2:
border = np.ones((border_height, border_width + pad, channel))
else:
border = np.ones((border_height, border_width + pad))
border = border * 255
color = (0, 0, 0)
if num_pages is None:
num_pages = random.randint(2, 7)
for x in np.linspace(border_width, 0, num_pages):
x = int(x)
e = (
border_width
if x == border_width
else np.random.randint(
int(border_width - (border_width / 2)),
border_width,
)
)
start_point = (x, 0)
end_point = (e, border_height)
thickness = np.random.choice([2, 3, 4])
border = cv2.line(border, start_point, end_point, color, thickness)
border = self.add_corner_noise(np.uint8(border), noise_intensity)
border = cv2.blur(border, (3, 3))
return border
def __call__(self, image, layer=None, force=False):
if force or self.should_run():
image = image.copy()
noise_intensity = random.uniform(
self.noise_intensity_range[0],
self.noise_intensity_range[1],
)
border_width = random.randint(self.width_range[0], self.width_range[1])
height, width = image.shape[:2]
if len(image.shape) > 2:
channel = image.shape[2]
else:
channel = 1
if self.side == "random":
side = random.choice(["left", "right", "top", "bottom"])
else:
side = self.side
if side == "left":
border = self.create_border(
channel,
border_width,
height,
self.pages,
noise_intensity,
)
# print(image.shape,border.shape)
image = np.hstack((border, image))
elif side == "right":
border = self.create_border(
channel,
border_width,
height,
self.pages,
noise_intensity,
)
image = np.hstack((image, np.fliplr(border)))
elif side == "top":
border = self.create_border(
channel,
border_width,
width,
self.pages,
noise_intensity,
)
image = np.vstack((cv2.rotate(border, cv2.ROTATE_90_CLOCKWISE), image))
elif side == "bottom":
border = self.create_border(
channel,
border_width,
width,
self.pages,
noise_intensity,
)
image = np.vstack(
(image, (cv2.rotate(border, cv2.ROTATE_90_COUNTERCLOCKWISE))),
)
return image
| StarcoderdataPython |
11211034 | from mesa import Model
from mesa.space import ContinuousSpace
from mesa.time import BaseScheduler
from mesa.datacollection import DataCollector
import random
import numpy as np
import Midge
import Target
import Trap
import Deer
import Egg
import BiomeCell
import csv
class WorldModel(Model):
def __init__(self, NumMidges, NumDeer, width=100, height=100, mapfile=None, trapfile=None, dps=0.5):
Midge.Midge.setdps(dps)
Midge.Midge.createdpsarray(dps)
self.NumMidges = NumMidges # Starting number of midges
self.NumDeer = NumDeer # Starting number of deer
self.idcounter = 0 # Global counter for id of agents, increments with every new agent creation
self.mapfile = mapfile # Argument passed to decide on which map data to use (must be in CSV format)
self.trapfile = trapfile # File used to decide where the traps will be placed, used to replicate data in paper
self.running = True # Used for running model in batches
self.midges = [] # List of all current midges, can set to update every step
self.traps = [] # List of all trap agents
self.deer = [] # List of all deer agents
self.dps = dps # Daily probability of survival for all midges
# Time (days) since simulation has begun
self.day = 0
# Activates the step function for midges sequentially (no order needed)
self.schedule = BaseScheduler(self)
# Create a grid model with potential for multiple agents on one cell
self.grid = ContinuousSpace(width, height, torus=False)
# Open the map file and create a csvreader object to be used by every midge
self.mapdata = open(self.mapfile, 'r')
self.csvreader = csv.reader(self.mapdata, delimiter=',')
# Create a blank map array filled with the generic 'lightgreen' value
self.maparray = np.full(shape=(width, height), fill_value=BiomeCell.BiomeCell('lightgreen'),
dtype=BiomeCell.BiomeCell)
# Add all biome types from the mapfile to the map array
with open(self.mapfile, 'r', encoding='utf-8-sig') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
for l in csvreader:
x, y = int(l[0]), int(l[1])
self.maparray[x][y] = BiomeCell.BiomeCell(l[2])
# # Adds midges to random location in grid and to queue in scheduler
for i in range(self.NumMidges):
x = self.random.random() * self.grid.width
y = self.random.random() * self.grid.height
a = Midge.Midge(self.getid(), self)
# Gives midges random probabilities of having fed and are in stages of gonotrophic cycle
a.fed = True
a.timesincefed = np.random.randint(0, Midge.Midge.gtrclength)
self.schedule.add(a)
self.grid.place_agent(a, (x, y))
# Adds some eggs to the simulation as well, just to even out the population variability
for i in range(10000):
x = self.random.random() * (self.grid.width)
y = self.random.random() * (self.grid.height)
a = Egg.Egg(self.getid(), self)
# Gives eggs random age
a.age = random.randint(-Egg.Egg.growthtime, Egg.Egg.growthtime)
self.schedule.add(a)
self.grid.place_agent(a, (x, y))
# # Adds traps to random locations in the grid from the trap location file and to the queue in scheduler
# with open(self.trapfile, 'r', encoding='utf-8-sig') as csvfile:
# csvreader = csv.reader(csvfile, delimiter=',')
# for l in csvreader:
# x, y = int(l[0]), int(l[1])
# a = Trap.Trap(self.getid(), self)
# self.schedule.add(a)
# self.grid.place_agent(a, (x, y))
# Adds deer to random locations in the grid and to queue in scheduler
for i in range(self.NumDeer):
x = self.random.random() * (self.grid.width)
y = self.random.random() * (self.grid.height)
a = Deer.Deer(self.getid(), self)
self.schedule.add(a)
self.grid.place_agent(a, (x, y))
# Add 10 deer with BTV as a case study
for i in range(10):
a = Deer.Deer(self.getid(), self)
a.hasbtv = True
self.schedule.add(a)
self.grid.place_agent(a, (self.random.random() * self.grid.width, self.random.random() * self.grid.height))
self.traps = [i for i in self.schedule.agents if type(i) == Trap.Trap] # Fills list of traps (only once)
self.deer = [i for i in self.schedule.agents if type(i) == Deer.Deer] # Fills list of deer (only once)
self.targets = np.array(combinelists(self.traps, self.deer), dtype=Target.Target) # Combines all subclasses of the target class into one list
self.deerbites = 0 # Global counter of the total number of deer bites
# TODO: Implement datacollector that can collect data from different agent types (gonna be a pain in my ass)
self.datacollector = DataCollector(model_reporters={"Day": "day",
"MidgePop": "NumMidges",
"DeerBites": "deerbites",
"DeerwBTV": "DeerwBTV",
"NumVectors": "NumVectors",
"DPS": "DPS",
"MidgeAges": "MidgeAges",
"AvgAge": "AvgAge"})
# Returns a valid id for an agent to use and increments the counter to create a new unique counter for the next time
def getid(self):
self.idcounter += 1
return self.idcounter
def step(self):
# Update the Midge DPS to reflect time
# Possible to create hard limit on midge population
# if self.NumMidges >= 10000:
# Midge.Midge.dps = max(Midge.Midge.dps * 0.75, 0.3)
# else:
# Midge.Midge.updatedps(self.day)
Egg.Egg.updategrowthtime(self.day)
# Collect the data once per tick (day)
self.datacollector.collect(self)
# World step function. Steps each agent as well as increments the day clock
self.schedule.step()
# Global counter to count number of days in the simulation
self.day += 1
# Could optimize this in the future if performance becomes an issue, still it is only run once per day
self.midges = np.array([i for i in self.schedule.agents if type(i) == Midge.Midge]) # List containing all midges
self.MidgeAges = np.array([i.age for i in self.midges]) # Captures age of every single midge in days
self.AvgAge = self.MidgeAges.mean()
self.NumVectors = len([i for i in self.midges if i.hasbtv]) # Number of midges carrying BTV and capable of spreading
self.NumMidges = self.midges.size
self.DeerwBTV = np.sum([d.hasbtv for d in self.deer]) # Counts the number of deer currently with BTV
self.deerbites = np.sum([d.numbites for d in self.deer]) # Counts the cumulative number of deer bites (linear?)
self.DPS = Midge.Midge.dps
# Combines two lists, returns one
def combinelists(list1=[], list2=[]) -> list:
l = []
l.extend(list1)
l.extend(list2)
return np.array(l)
| StarcoderdataPython |
1773423 | <gh_stars>0
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# VulnerableCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/vulnerablecode for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import logging
import re
from collections import namedtuple
from packageurl import PackageURL
logger = logging.getLogger(__name__)
# This code has been vendored from scancode.
# https://github.com/nexB/scancode-toolkit/blob/16ae20a343c5332114edac34c7b6fcf2fb6bca74/src/packagedcode/rpm.py#L91
class EVR(namedtuple("EVR", "epoch version release")):
"""
The RPM Epoch, Version, Release tuple.
"""
def __new__(self, version, release=None, epoch=None):
"""
note: the sort order of the named tuple is the sort order.
But for creation we put the rarely used epoch last with a default to None.
"""
if not isinstance(epoch, int):
if epoch and epoch.strip():
logger.error("Invalid epoch: must be a number or empty.")
return None
if not version:
logger.error("Version is required: {}".format(repr(version)))
return None
return super().__new__(EVR, epoch, version, release)
def __str__(self, *args, **kwargs):
return self.to_string()
def to_string(self):
if self.release:
vr = f"{self.version}-{self.release}"
else:
vr = self.version
if self.epoch:
vr = ":".join([str(self.epoch), vr])
return vr
# https://github.com/nexB/scancode-toolkit/blob/16ae20a343c5332114edac34c7b6fcf2fb6bca74/src/packagedcode/nevra.py#L36
def from_name(rpm_string):
"""
Return an (E, N, V, R, A) tuple given a file name, by splitting
[e:]name-version-release.arch into the four possible subcomponents.
Default epoch, version, release and arch to None if not specified.
Accepts RPM names with and without extensions
"""
parse_nevra = re.compile("^" "(.*)" "-" "([^-]*)" "-" "([^-]*)" "\\." "([^.]*)" "$").match
m = parse_nevra(rpm_string)
if not m:
return None
n, v, r, a = m.groups()
if ":" not in v:
return None, n, v, r, a
e, v = v.split(":", 1)
if e.isdigit():
e = int(e)
return (e, n, v, r, a)
def rpm_to_purl(rpm_string, namespace):
# FIXME: there is code in scancode to handle RPM conversion AND this should
# be all be part of the packageurl library
# FIXME: the comment below is not correct, this is the Epoch in the RPM version and not redhat specific
# Red Hat uses `-:0` instead of just `-` to separate
# package name and version
# https://github.com/nexB/scancode-toolkit/blob/16ae20a343c5332114edac34c7b6fcf2fb6bca74/src/packagedcode/rpm.py#L310
envra = from_name(rpm_string)
if not envra:
logger.error(f"Invalid RPM name can't get envra: {rpm_string}")
return None
sepoch, sname, sversion, srel, sarch = envra
evr = EVR(sversion, srel, sepoch)
if not evr:
logger.error(f"Invalid RPM name can't get evr: {rpm_string}")
return None
src_evr = evr.to_string()
src_qualifiers = {}
if sarch:
src_qualifiers["arch"] = sarch
return PackageURL(
type="rpm", namespace=namespace, name=sname, version=src_evr, qualifiers=src_qualifiers
)
| StarcoderdataPython |
4931520 | <filename>locomotion_analysis/src/angle_analysis.py
import math
import numpy as np
class ProjectionVector(object):
"""
Computes the projection vector from an input segment
Input must be in the form: ((x0,y0),(x1,y1))
"""
def __init__(self, lineA):
self.lineA = lineA
self.vector = self.find_projection()
self.slope = self.find_slope()
self.intercept = self.find_intercept()
def find_projection(self):
return (self.lineA[1], (2*self.lineA[1][0]-self.lineA[0][0], 2*self.lineA[1][1]-self.lineA[0][1]))
def find_slope(self):
a = self.vector[1][1]- self.vector[0][1]
if self.vector[1][0] != self.vector[0][0]: # protect from zero division error
return (a / ( self.vector[1][0]- self.vector[0][0]))
else:
return a
def find_intercept(self):
return self.vector[1][1] - self.slope* self.vector[1][0]
def find_angle(lineB,VectorP):
"""
Line A (not present in function) is the starting segment
Line B is the following segment
VectorP is an object of class ProjectionVector and it is derived from Line A
"""
vB = [(lineB[0][0]-lineB[1][0]), (lineB[0][1]-lineB[1][1])]
vP = [(VectorP.vector[0][0]-VectorP.vector[1][0]), (VectorP.vector[0][1]-VectorP.vector[1][1])]
dot_prod = np.dot(vB,vP)
magA = np.sqrt(np.dot(vB, vB))
magB = np.sqrt(np.dot(vP, vP))
cos_ = dot_prod/(magA*magB)
ang = np.arccos(cos_)
ang_deg = math.degrees(ang)%360
if lineB[1][1]<VectorP.slope*lineB[1][0] + VectorP.intercept:
ang_deg = -ang_deg
# print(ang_deg)
return -1*ang_deg
if __name__ == '__main__':
#input lines
lineA = ((1, 1),(1, 2))
# lineA = np.array([[-2, 2],[-4, 1.5]])
lineB = ((1, 2),(2, 1))
#projected vector
vectorP = ProjectionVector(lineA)
a = find_angle(lineB,vectorP)
# for i in range(0,len(a), 12):
# vP = ProjectionVector([a[i], a[i+1]])
# find_angle([a[i+1], a[i+2]],vP)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.