blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
004e7855b08c1a4c1e62764c43bc7d5181587346 | 22aa2c87f0bfc2f381c4249ad41c5fac464819ae | /source_code/analytics/analytics.py | 1ab88a05effbe0d54b59c97a27d70b9efa33c2ff | [
"MIT"
] | permissive | sohailhabib/SecurityMetrics | a25e2bc21f1ae35a5953b0b0c9d78d2c679c5a3b | 7de3f462e89d97592e0c28a623bd6f7112b9a3b1 | refs/heads/master | 2023-08-02T08:32:43.910254 | 2021-10-01T01:56:31 | 2021-10-01T01:56:31 | 401,510,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | """
MIT License
Copyright (c) 2021, Sohail Habib
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------------------------------------------------
Analytics Base Class
=====================
This is the abstract base class for analytics.
"""
import abc
class Analytics(object):
"""Abstract base class for all biometrics."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self):
"""
Initializes the class object
"""
return
@abc.abstractmethod
def get_analytics(self, data):
"""
Returns a list containing name of all features.
@return (string): The list of features
"""
return
| [
"sohailyamin@gmail.com"
] | sohailyamin@gmail.com |
a043d06cf760add938e1077f6f8fbe72858f2ca7 | b5937131741010ec042d04a51cb51dd20157c637 | /Lists-upto-exception handling/Functions, File Handling, Modules and Exception Handling/primefactors.py | 9bc3c7663efa81f4a184303874ea2967b34d19ed | [] | no_license | Shakirsadiq6/Basic-Python-Programs | 5314961b5eb472b12dcec045e1f5516d9f2ee3c2 | 2aea264d61acf0704b0bb66cf0ba936925d30cb9 | refs/heads/master | 2023-07-07T04:37:44.664604 | 2021-08-03T09:47:03 | 2021-08-03T09:47:03 | 345,914,173 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | '''Write a function to obtain the prime factors of this number'''
__author__ = "Shakir Sadiq"
def primefactors(number):
'''function for prime factors of a number'''
while number%2 == 0:
print(2)
number = number/2
for i in range(3,int(number**2)+1,2):
while number%i== 0:
print(i)
number = number/i
if number>2:
print(number)
try:
number = int(input("Enter any number:"))
primefactors(number) #function call
except ValueError:
print("Enter a valid number.")
| [
"shakirsadiq24@gmail.com"
] | shakirsadiq24@gmail.com |
caa61f239dd804cfd346a5bfbdd0c96f9db3019c | aa853a9094fff4b6e9b0ddc7469be29ad5f0f811 | /poi_account_discount/__init__.py | fbcb99378cb4816bc628da5b14f4c85a93bfbda9 | [] | no_license | blue-connect/illuminati | 40a13e1ebeaceee39f17caa360f79e8deeaebf58 | 6682e60630064641474ddb2d8cbc520e30f64832 | refs/heads/master | 2022-01-06T00:55:58.465611 | 2018-11-24T04:30:03 | 2018-11-24T04:30:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Poiesis Consulting (<http://www.poiesisconsulting.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#import account
import order
import wizard | [
"yori.quisbert@poiesisconsulting.com"
] | yori.quisbert@poiesisconsulting.com |
a43d8200644ab55109d29dd296181d6035385a15 | c3293f55218021c8bc3856e25829fe0b4c140dd0 | /code/GUI.py | 47296cdbb867420fb2b27b703bb27e87399884e1 | [] | no_license | BoGuo86/induction-motor-design-tool | f228298c8a1ec062cd9eff1f85def81a0334a1a4 | b8489f4613dfa431a486ed0ca2f6c1f5999972cb | refs/heads/master | 2022-12-23T01:24:33.592488 | 2020-09-29T21:42:26 | 2020-09-29T21:42:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,787 | py | # Importing modules
import threading
from tkinter import *
from tkinter import ttk
from tkinter.ttk import *
from mttkinter import mtTkinter
from PIL import ImageTk, Image
from Variable_Declaration import Variable
import guli
import time
class Thread_main(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
def run(self):
Execute()
# Default Values
MagnetizingCurrent_Max = 30
MagnetizingCurrent_Min = 15
StartingToruqe_Precision = 90
MaxToruqe_Precision = 90
Calculation_Combinations = 5
# Lists
Material_Elements = ["M-15", "M-19", "M-22", "M-27", "M-36", "M-43", "M-45"]
StatorSlot_Elements = ["Rectangular", "Trapezoidal", "Rounded", "Circular"]
RotorSlot_Elements = ["Rectangular", "Trapezoidal", "Rounded", "Circular"]
# Initializing Tk
root = Tk()
root.title('Induction Motor Design Tool')
# Frame
frame_1 = LabelFrame(root, text="Nominal Data")
frame_1.grid(row=0, column=0, rowspan=3, sticky="snew", padx=4)
frame_2 = LabelFrame(root, text="Ratios")
frame_2.grid(row=2, column=1, sticky="snew", padx=4)
frame_3 = LabelFrame(root, text="Materials")
frame_3.grid(row=0, column=2, rowspan=2, sticky="nsew", padx=4, pady=4)
frame_4 = LabelFrame(root, text = "Conditions")
frame_4.grid(row=2, column=2, sticky="nsew", padx=4)
frame_5 = LabelFrame(root, text="Power [W]/Torque [Nm]")
frame_5.grid(row=1, column=1, sticky="snew", padx=4, pady=4)
frame_6 = LabelFrame(root, text="Slot Type")
frame_6.grid(row=0, column=3, rowspan=3, sticky="snew", padx=4, pady=4)
frame_7 = LabelFrame(root, text="Write in File")
frame_7.grid(row=0, column=1, sticky="snew", padx=4, pady=4)
# Fields
# Nominal Data Frame (frame_1)
Voltage = Label(frame_1, text="Voltage [V]")
Voltage_Entry = Entry(frame_1, width=10)
Frequency = Label(frame_1, text="Frequency [f]")
Frequency_Entry = Entry(frame_1, width=10)
Num_Poles = Label(frame_1, text="Num. of Poles")
Num_Poles_Entry = Entry(frame_1, width=10)
Eta = Label(frame_1, text="Efficiency")
Eta_Entry = Entry(frame_1, width=10)
PowerFactor = Label(frame_1, text="Power Factor")
PowerFactor_Entry = Entry(frame_1, width=10)
Winding = Label(frame_1, text="Winding")
Winding_Clicked = StringVar()
Winding_Clicked.set("Delta")
Winding_Menu = OptionMenu(frame_1, Winding_Clicked, "Delta", "Star")
# Conductivity
Conductivity_Label1 = Label(frame_3, text="Conductivity").pack(pady=2)
Conductivity_Label2 = Label(frame_3, text="[Sm/mm^2]").pack(pady=2)
Conductivity_Entry = Entry(frame_3, width=10)
Conductivity_Entry.pack(pady=2)
Conductivity_Entry.insert(0, "57")
# Right Side
Material = Label(frame_3, text="Core Material")
Material_Clicked = StringVar()
Material_Clicked.set(Material_Elements[0])
Material_Menu = OptionMenu(frame_3, Material_Clicked, *Material_Elements)
Material.pack(pady=2)
Material_Menu.pack(pady=2)
Kp = Label(frame_2, text="Starting Current Ratio")
Kp_Entry = Entry(frame_2, width=10)
K_Mp = Label(frame_2, text="Starting Torque Ratio")
K_Mp_Entry = Entry(frame_2, width=10)
K_Mm = Label(frame_2, text="Maximum Torque Ratio")
K_Mm_Entry = Entry(frame_2, width=10)
# Grid
# Left Side
Voltage.pack(pady=2, padx=2)
Voltage_Entry.pack(pady=2)
Frequency.pack(pady=2, padx=2)
Frequency_Entry.pack(pady=2)
Num_Poles.pack(pady=2, padx=2)
Num_Poles_Entry.pack(pady=2)
Eta.pack(pady=2, padx=2)
Eta_Entry.pack(pady=2)
PowerFactor.pack(pady=2, padx=2)
PowerFactor_Entry.pack(pady=2)
Winding.pack(pady=2, padx=2)
Winding_Menu.pack(pady=2)
"""
Voltage.grid(row=0, column=0)
Voltage_Entry.grid(row=0, column=1)
Frequency.grid(row=1, column=0)
Frequency_Entry.grid(row=1, column=1)
Num_Poles.grid(row=2, column=0)
Num_Poles_Entry.grid(row=2, column=1)
Eta.grid(row=3, column=0)
Eta_Entry.grid(row=3, column=1)
PowerFactor.grid(row=4, column=0)
PowerFactor_Entry.grid(row=4, column=1)
Winding.grid(row=5, column=0)
Winding_Menu.grid(row=5, column=1)
"""
# Right Side
Kp.pack(pady=2, padx=2)
Kp_Entry.pack(pady=2)
K_Mp.pack(pady=2, padx=2)
K_Mp_Entry.pack(pady=2)
K_Mm.pack(pady=2, padx=2)
K_Mm_Entry.pack(pady=2)
# Check Boxes
# Frame Condtiitons (frame_4)
Eta_Box_Var = IntVar()
PowerFactor_Box_Var = IntVar()
Kp_Box_Var = IntVar()
K_Mp_Box_Var = IntVar()
K_Mm_Box_Var = IntVar()
Eta_Box = Checkbutton(frame_4, text="Efficiency", variable = Eta_Box_Var)
PowerFactor_Box = Checkbutton(frame_4, text="Power Factor", variable = PowerFactor_Box_Var)
Kp_Box = Checkbutton(frame_4, text="Kp", variable = Kp_Box_Var)
K_Mp_Box = Checkbutton(frame_4, text="kMp", variable = K_Mp_Box_Var)
K_Mm_Box = Checkbutton(frame_4, text="kMm", variable = K_Mm_Box_Var)
PowerFactor_Box.pack(padx=4, pady=2, anchor="w")
Eta_Box.pack(padx=4, pady=2, anchor="w")
Kp_Box.pack(padx=4, pady=2, anchor="w")
K_Mp_Box.pack(padx=4, pady=2, anchor="w")
K_Mm_Box.pack(padx=4, pady=2, anchor="w")
# Radio Buttons
a=0
r = IntVar()
r.set("1")
def Execute():
Variable(Pn_Mn.get(),
Voltage_Entry.get(),
Frequency_Entry.get(),
Kp_Entry.get(),
K_Mm_Entry.get(),
K_Mp_Entry.get(),
Num_Poles_Entry.get(),
Eta_Entry.get(),
PowerFactor_Entry.get(),
Winding_Clicked.get(),
Material_Clicked.get(),
Conductivity_Entry.get(),
Eta_Box_Var.get(),
PowerFactor_Box_Var.get(),
Kp_Box_Var.get(),
K_Mp_Box_Var.get(),
K_Mm_Box_Var.get(),
r.get(),
StatorSlot_Clicked.get(),
RotorSlot_Clicked.get(),
MagnetizingCurrent_Max,
MagnetizingCurrent_Min,
StartingToruqe_Precision,
MaxToruqe_Precision,
Calculation_Combinations
)
def Start():
a = Thread_main("GUI")
a.start()
flag = 1
def Options():
global MagnetizingCurrent_Max
global MagnetizingCurrent_Min
global StartingToruqe_Precision
global MaxToruqe_Precision
global Calculation_Combinations
OptionsWindow = Toplevel()
OptionsWindow.title('Proektiranje na Asinhron Motor - Options')
MagnetizingCurrent_Max_Label = Label(OptionsWindow, text="Maximum Magnetizing Current [%]").grid(row=0, column=0, padx=4, pady=2, sticky="w")
MagnetizingCurrent_Max_Entry = Entry(OptionsWindow, width=10)
MagnetizingCurrent_Max_Entry.grid(row=0, column=1, pady=2, padx=4)
MagnetizingCurrent_Max_Entry.insert(0, MagnetizingCurrent_Max)
MagnetizingCurrent_Max = MagnetizingCurrent_Max_Entry.get()
MagnetizingCurrent_Min_Label = Label(OptionsWindow, text="Minimum Magnetizing Current [%]").grid(row=1, column=0, padx=4, pady=2, sticky="w")
MagnetizingCurrent_Min_Entry = Entry(OptionsWindow, width=10)
MagnetizingCurrent_Min_Entry.grid(row=1, column=1, pady=2, padx=4)
MagnetizingCurrent_Min_Entry.insert(0, MagnetizingCurrent_Min)
StartingToruqe_Precision_Label = Label(OptionsWindow, text="Starting Toruqe Accuracy [%]").grid(row=2, column=0, padx=4, pady=2, sticky="w")
StartingToruqe_Precision_Entry = Entry(OptionsWindow, width=10)
StartingToruqe_Precision_Entry.grid(row=2, column=1, pady=2, padx=4)
StartingToruqe_Precision_Entry.insert(0, StartingToruqe_Precision)
MaxToruqe_Precision_Label = Label(OptionsWindow, text="Maximum Toruqe Accuracy [%]").grid(row=3, column=0, padx=4, pady=2, sticky="w")
MaxToruqe_Precision_Entry = Entry(OptionsWindow, width=10)
MaxToruqe_Precision_Entry.grid(row=3, column=1, pady=2, padx=4)
MaxToruqe_Precision_Entry.insert(0, MaxToruqe_Precision)
Calculation_Combinations_Label = Label(OptionsWindow, text="Calculation Combinations").grid(row=4, column=0, padx=4, pady=2, sticky="w")
Calculation_Combinations_Entry = Entry(OptionsWindow, width=10)
Calculation_Combinations_Entry.grid(row=4, column=1, pady=2, padx=4)
Calculation_Combinations_Entry.insert(0, Calculation_Combinations)
Okey_2 = Button(OptionsWindow, text="OK", command=lambda: Options_Destroy(MagnetizingCurrent_Max_Entry.get(), MagnetizingCurrent_Min_Entry.get(), StartingToruqe_Precision_Entry.get(), MaxToruqe_Precision_Entry.get(), Calculation_Combinations_Entry.get())).grid(row=5, column=1, padx=4, pady=4)
"""
eta_Precision_Label = Label(OptionsWindow, text="Efficiency Deviation [%]").grid(row=4, column=0)
eta_Precision_Entry = Entry(OptionsWindow, width=10)
eta_Precision_Entry.grid(row=4, column=1)
eta_Precision_Entry.insert(0, "90")
PowerFactor_Precision_Label = Label(OptionsWindow, text="Power Factor Deviation [%]").grid(row=5, column=0)
PowerFactor_Precision_Entry = Entry(OptionsWindow, width=10)
PowerFactor_Precision_Entry.grid(row=0, column=1)
PowerFactor_Precision_Entry.insert(5, "90")
"""
def Options_Destroy(MagnetizingCurrent_Max_Entry, MagnetizingCurrent_Min_Entry, StartingToruqe_Precision_Entry, MaxToruqe_Precision_Entry, Calculation_Combinations_Entry):
global MagnetizingCurrent_Max
global MagnetizingCurrent_Min
global StartingToruqe_Precision
global MaxToruqe_Precision
global Calculation_Combinations
MagnetizingCurrent_Max = MagnetizingCurrent_Max_Entry
MagnetizingCurrent_Min = MagnetizingCurrent_Min_Entry
StartingToruqe_Precision = StartingToruqe_Precision_Entry
MaxToruqe_Precision = MaxToruqe_Precision_Entry
Calculation_Combinations = Calculation_Combinations_Entry
# Okey Buttons
Okey = Button(root, text="OK", command=Start).grid(row=3, column=3, padx=4, sticky="e")
# Frame Power/Toruqe (frame_5)
Pn_Mn = Entry(frame_5, width=7)
Pn_Mn.pack(side='left', padx=4)
Pn_Radio = Radiobutton(frame_5, text="Pn", variable=r, value=1).pack(side='left')
Mn_Radio = Radiobutton(frame_5, text="Mn", variable=r, value=2).pack(side='left')
# Option Button
OptionsButton = Button(root, text="Options", command=Options).grid(row=4, column=3, padx=4, pady=4, sticky="e")
# Help Button
HelpButton = Button(root, text="Help").grid(row=4, column=0, padx=4, pady=4, sticky="w")
# Progress Bar
global progress
progress = Progressbar(root, orient = HORIZONTAL,
length = 100, mode = 'determinate')
progress.grid(row=3, column = 0, columnspan = 3, sticky = 'nsew', padx=3, pady=4)
"""
timer = IntVar()
timer.set(0)
#global bar
#bar = guli.GuliVariable("bar").setValue(0)
def UpdateBar(bar):
progress['value'] = bar
def timer_callback(*args):
global progress
global bar
progress['value'] = bar
#time.time.trace("w", timer_callback)
"""
# Slot Type (frame_6)
# Stator Slot
StatorSlot_Label = Label(frame_6, text="Stator Slot").pack()
StatorSlot_Clicked = StringVar()
StatorSlot_Clicked.set(StatorSlot_Elements[0])
StatorSlot_Menu = OptionMenu(frame_6, StatorSlot_Clicked, *StatorSlot_Elements).pack(padx=4, pady=2)
## Rotor Slot
RotorSlot_Label = Label(frame_6, text="Rotor Slot").pack()
RotorSlot_Clicked = StringVar()
RotorSlot_Clicked.set(RotorSlot_Elements[0])
RotorSlot_Menu = OptionMenu(frame_6, RotorSlot_Clicked, *RotorSlot_Elements).pack(padx=4, pady=2)
Slot_Image = Image.open("images/Rectangular_Slot.png")
Slot_Image = Slot_Image.resize((130, 135), Image.ANTIALIAS)
Slot_Image=ImageTk.PhotoImage(Slot_Image)
Slot_Label = Label(frame_6, image=Slot_Image)
Slot_Label.pack(padx=4, pady=2)
def stator_callback(*args):
global Slot_Label
global Slot_Image
Slot_Label.pack_forget()
Slot_Image = Image.open("images/{}".format(StatorSlot_Clicked.get())+"_Slot.png")
Slot_Image = Slot_Image.resize((130, 135), Image.ANTIALIAS)
Slot_Image=ImageTk.PhotoImage(Slot_Image)
Slot_Label = Label(frame_6, image=Slot_Image)
Slot_Label.pack(padx=4, pady=2)
def rotor_callback(*args):
global Slot_Label
global Slot_Image
Slot_Label.pack_forget()
Slot_Image = Image.open("images/{}".format(RotorSlot_Clicked.get())+"_Slot.png")
Slot_Image = Slot_Image.resize((130, 135), Image.ANTIALIAS)
Slot_Image=ImageTk.PhotoImage(Slot_Image)
Slot_Label = Label(frame_6, image=Slot_Image)
Slot_Label.pack(padx=4, pady=2)
StatorSlot_Clicked.trace("w", stator_callback)
RotorSlot_Clicked.trace("w", rotor_callback)
# Ouptut File (frame_7)
WriteCSV_Var = IntVar()
WritePDF_Var = IntVar()
WriteCSV = Checkbutton(frame_7, text="Write in .csv", variable=WriteCSV_Var)
WritePDF = Checkbutton(frame_7, text="Write in .pdf", variable=WritePDF_Var)
WriteCSV.pack(padx=4, pady=2, anchor="w")
WritePDF.pack(padx=4, pady=2, anchor="w")
#myButton = Button(root, text='Da', padx=100, command=Click)
#myButton.pack()
guli.GuliVariable("bar").setValue(0)
print ("NESTO")
while 1:
try:
progress['value'] = guli.GuliVariable("bar").get()
except ValueError:
pass
root.update_idletasks()
root.update()
| [
"andrejgrunesi@gmail.com"
] | andrejgrunesi@gmail.com |
69ce65da047bda6776179e27ce17ebcda32a87e1 | 040a6cc313a6200da1d176191707bfb896053db4 | /descarteslabs/catalog/catalog_base.py | 0729318b9da2c1d50cf45c9f6c684f13b12fdd4c | [
"Apache-2.0"
] | permissive | aashish24/descarteslabs-python | 77747984994609205887262bafeec5e9d38fcd0c | 00149115e8ef6cd1f48b0a6c689f5da07f69c306 | refs/heads/master | 2022-11-19T02:02:00.959896 | 2020-07-24T16:23:55 | 2020-07-24T16:42:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,013 | py | from six import add_metaclass, iteritems, ensure_str, wraps
from types import MethodType
import json
from descarteslabs.client.exceptions import NotFoundError
from .attributes import (
AttributeMeta,
AttributeValidationError,
AttributeEqualityMixin,
DocumentState,
Timestamp,
ListAttribute,
ExtraPropertiesAttribute,
TypedAttribute,
)
from .catalog_client import CatalogClient, HttpRequestMethod
class DeletedObjectError(Exception):
"""Indicates that an action cannot be performed.
Raised when some action cannot be performed because the catalog object
has been deleted from the Descartes Labs catalog using the delete method
(e.g. :py:meth:`Product.delete`).
"""
pass
class UnsavedObjectError(Exception):
"""Indicate that an action cannot be performed.
Raised when trying to delete an object that hasn't been saved.
"""
pass
def check_deleted(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if self.state == DocumentState.DELETED:
raise DeletedObjectError("This catalog object has been deleted.")
try:
return f(self, *args, **kwargs)
except NotFoundError as e:
self._deleted = True
raise DeletedObjectError(
"{} instance with id {} has been deleted".format(
self.__class__.__name__, self.id
)
).with_traceback(e.__traceback__) from None
return wrapper
def check_derived(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if self._url is None:
raise TypeError(
"This method is only available for a derived class of 'CatalogObject'"
)
return f(self, *args, **kwargs)
return wrapper
def _new_abstract_class(cls, abstract_cls):
if cls is abstract_cls:
raise TypeError(
"You can only instantiate a derived class of '{}'".format(
abstract_cls.__name__
)
)
return super(abstract_cls, cls).__new__(cls)
class CatalogObjectMeta(AttributeMeta):
def __new__(cls, name, bases, attrs):
new_cls = super(CatalogObjectMeta, cls).__new__(cls, name, bases, attrs)
if new_cls._doc_type:
new_cls._model_classes_by_type_and_derived_type[
(new_cls._doc_type, new_cls._derived_type)
] = new_cls
if new_cls.__doc__ is not None and new_cls._instance_delete.__doc__ is not None:
# Careful with this; leading white space is very significant
new_cls.__doc__ += (
"""
Methods
-------
delete()
"""
+ new_cls._instance_delete.__doc__
)
return new_cls
@add_metaclass(CatalogObjectMeta)
class CatalogObjectBase(AttributeEqualityMixin):
"""A base class for all representations of top level objects in the Catalog API."""
# The following can be overridden by subclasses to customize behavior:
# JSONAPI type for this model (required)
_doc_type = None
# Path added to the base URL for a list request of this model (required)
_url = None
# List of related objects to include in read requests
_default_includes = []
# The derived type of this class
_derived_type = None
# Attribute to use to determine the derived type of an instance
_derived_type_switch = None
_model_classes_by_type_and_derived_type = {}
id = TypedAttribute(
str,
mutable=False,
serializable=False,
doc="""str, immutable: A unique identifier for this object.
Note that if you pass a string that does not begin with your Descartes Labs
user organization ID, it will be prepended to your `id` with a ``:`` as
separator. If you are not part of an organization, your user ID is used. Once
set, it cannot be changed.
""",
)
created = Timestamp(
readonly=True,
doc="""datetime, readonly: The point in time this object was created.
*Filterable, sortable*.
""",
)
modified = Timestamp(
readonly=True,
doc="""datetime, readonly: The point in time this object was last modified.
*Filterable, sortable*.
""",
)
def __new__(cls, *args, **kwargs):
return _new_abstract_class(cls, CatalogObjectBase)
def __init__(self, **kwargs):
self.delete = self._instance_delete
self._client = kwargs.pop("client", None) or CatalogClient.get_default_client()
self._attributes = {}
self._modified = set()
self._initialize(
id=kwargs.pop("id", None),
saved=kwargs.pop("_saved", False),
relationships=kwargs.pop("_relationships", None),
related_objects=kwargs.pop("_related_objects", None),
**kwargs
)
def __del__(self):
for attr_type in self._attribute_types.values():
attr_type.__delete__(self, validate=False)
def _clear_attributes(self):
self._mapping_attribute_instances = {}
self._clear_modified_attributes()
# This only applies to top-level attributes
sticky_attributes = {}
for name, value in self._attributes.items():
attribute_type = self._attribute_types.get(name)
if attribute_type._sticky:
sticky_attributes[name] = value
self._attributes = sticky_attributes
def _initialize(
self,
id=None,
saved=False,
relationships=None,
related_objects=None,
deleted=False,
**kwargs
):
self._clear_attributes()
self._saved = saved
self._deleted = deleted
# This is an immutable attribute; can only be set once
if id:
self.id = id
for (name, val) in iteritems(kwargs):
# Only silently ignore unknown attributes if data came from service
attribute_definition = (
self._attribute_types.get(name)
if saved
else self._get_attribute_type(name)
)
if attribute_definition is not None:
attribute_definition.__set__(self, val, validate=not saved)
for name, t in iteritems(self._reference_attribute_types):
id_value = kwargs.get(t.id_field)
if id_value is not None:
object_value = kwargs.get(name)
if object_value and object_value.id != id_value:
message = (
"Conflicting related object reference: '{}' was '{}' "
"but '{}' was '{}'"
).format(t.id_field, id_value, name, object_value.id)
raise AttributeValidationError(message)
if related_objects:
related_object = related_objects.get(
(t.reference_class._doc_type, id_value)
)
if related_object is not None:
t.__set__(self, related_object, validate=not saved)
if saved:
self._clear_modified_attributes()
def __repr__(self):
name = ensure_str(self.name) if getattr(self, "name", None) is not None else ""
sections = [
# Document type and ID
"{}: {}\n id: {}".format(self.__class__.__name__, name, self.id)
]
# related objects and their ids
for name in sorted(self._reference_attribute_types):
t = self._reference_attribute_types[name]
# as a temporary hack for image upload, handle missing image_id field
sections.append(" {}: {}".format(name, getattr(self, t.id_field, None)))
if self.created:
sections.append(" created: {:%c}".format(self.created))
if self.state == DocumentState.DELETED:
sections.append("* Deleted from the Descartes Labs catalog.")
elif self.state != DocumentState.SAVED:
sections.append(
"* Not up-to-date in the Descartes Labs catalog. Call `.save()` to save or update this record."
)
return "\n".join(sections)
def __eq__(self, other):
if (
not isinstance(other, self.__class__)
or self.id != other.id
or self.state != other.state
):
return False
return super(CatalogObjectBase, self).__eq__(other)
def __setattr__(self, name, value):
if not (name.startswith("_") or isinstance(value, MethodType)):
# Make sure it's a proper attribute
self._get_attribute_type(name)
super(CatalogObjectBase, self).__setattr__(name, value)
@property
def is_modified(self):
"""bool: Whether any attributes were changed (see `state`).
``True`` if any of the attribute values changed since the last time this
catalog object was retrieved or saved. ``False`` otherwise.
Note that assigning an identical value does not affect the state.
"""
return bool(self._modified)
@classmethod
def _get_attribute_type(cls, name):
try:
return cls._attribute_types[name]
except KeyError:
raise AttributeError("{} has no attribute {}".format(cls.__name__, name))
@classmethod
def _get_model_class(cls, serialized_object):
class_type = serialized_object["type"]
klass = cls._model_classes_by_type_and_derived_type.get((class_type, None))
if klass._derived_type_switch:
derived_type = serialized_object["attributes"][klass._derived_type_switch]
klass = cls._model_classes_by_type_and_derived_type.get(
(class_type, derived_type)
)
return klass
@classmethod
def _serialize_filter_attribute(cls, name, value):
"""Serialize a single value for a filter.
Allow the given value to be serialized using the serialization logic
of the given attribute. This method should only be used to serialize
a filter value.
Parameters
----------
name : str
The name of the attribute used for serialization logic.
value : object
The value to be serialized.
Raises
------
AttributeValidationError
If the attribute is not serializable.
"""
attribute_type = cls._get_attribute_type(name)
if isinstance(attribute_type, ListAttribute):
attribute_type = attribute_type._attribute_type
return attribute_type.serialize(value)
def _set_modified(self, attr_name, changed=True, validate=True):
# Verify it is allowed to to be set
attr = self._get_attribute_type(attr_name)
if validate:
if attr._readonly:
raise AttributeValidationError(
"Can't set '{}' because it is a readonly attribute".format(
attr_name
)
)
if not attr._mutable and attr_name in self._attributes:
raise AttributeValidationError(
"Can't set '{}' because it is an immutable attribute".format(
attr_name
)
)
if changed:
self._modified.add(attr_name)
def _serialize(self, attrs, jsonapi_format=False):
serialized = {}
for name in attrs:
value = self._attributes[name]
attribute_type = self._get_attribute_type(name)
if attribute_type._serializable:
serialized[name] = attribute_type.serialize(
value, jsonapi_format=jsonapi_format
)
return serialized
@check_deleted
def update(self, ignore_errors=False, **kwargs):
"""Update multiple attributes at once using the given keyword arguments.
Parameters
----------
ignore_errors : bool, optional
``False`` by default. When set to ``True``, it will suppress
`AttributeValidationError` and `AttributeError`. Any given attribute that
causes one of these two exceptions will be ignored, all other attributes
will be set to the given values.
Raises
------
AttributeValidationError
If one or more of the attributes being updated are immutable.
AttributeError
If one or more of the attributes are not part of this catalog object.
DeletedObjectError
If this catalog object was deleted.
"""
original_values = dict(self._attributes)
original_modified = set(self._modified)
for (name, val) in iteritems(kwargs):
try:
# A non-existent attribute will raise an AttributeError
attribute_definition = self._get_attribute_type(name)
# A bad value will raise an AttributeValidationError
attribute_definition.__set__(self, val)
except (AttributeError, AttributeValidationError):
if ignore_errors:
pass
else:
self._attributes = original_values
self._modified = original_modified
raise
def serialize(self, modified_only=False, jsonapi_format=False):
"""Serialize the catalog object into json.
Parameters
----------
modified_only : bool, optional
Whether only modified attributes should be serialized. ``False`` by
default. If set to ``True``, only those attributes that were modified since
the last time the catalog object was retrieved or saved will be included.
jsonapi_format : bool, optional
Whether to use the ``data`` element for catalog objects. ``False`` by
default. When set to ``False``, the serialized data will directly contain
the attributes of the catalog object. If set to ``True``, the serialized
data will follow the exact JSONAPI with a top-level ``data`` element which
contains ``id``, ``type``, and ``attributes``. The latter will contain
the attributes of the catalog object.
"""
keys = self._modified if modified_only else self._attributes.keys()
attributes = self._serialize(keys, jsonapi_format=jsonapi_format)
if jsonapi_format:
return self._client.jsonapi_document(self._doc_type, attributes, self.id)
else:
return attributes
def _clear_modified_attributes(self):
self._modified = set()
@property
def state(self):
"""DocumentState: The state of this catalog object."""
if self._deleted:
return DocumentState.DELETED
if self._saved is False:
return DocumentState.UNSAVED
elif self.is_modified:
return DocumentState.MODIFIED
else:
return DocumentState.SAVED
@classmethod
def get(cls, id, client=None):
"""Get an existing object from the Descartes Labs catalog.
If the Descartes Labs catalog object is found, it will be returned in the
`~descarteslabs.catalog.DocumentState.SAVED` state. Subsequent changes will
put the instance in the `~descarteslabs.catalog.DocumentState.MODIFIED` state,
and you can use :py:meth:`save` to commit those changes and update the Descartes
Labs catalog object. Also see the example for :py:meth:`save`.
For bands, if you request a specific band type, for example
:meth:`SpectralBand.get`, you will only receive that type. Use :meth:`Band.get`
to receive any type.
Parameters
----------
id : str
The id of the object you are requesting.
client : CatalogClient, optional
A `CatalogClient` instance to use for requests to the Descartes Labs
catalog. The
:py:meth:`~descarteslabs.catalog.CatalogClient.get_default_client` will
be used if not set.
Returns
-------
:py:class:`~descarteslabs.catalog.CatalogObject` or None
The object you requested, or ``None`` if an object with the given `id`
does not exist in the Descartes Labs catalog.
Raises
------
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
"""
try:
data, related_objects = cls._send_data(
method=HttpRequestMethod.GET, id=id, client=client
)
except NotFoundError:
return None
model_class = cls._get_model_class(data)
if not issubclass(model_class, cls):
return None
return model_class(
id=data["id"],
client=client,
_saved=True,
_relationships=data.get("relationships"),
_related_objects=related_objects,
**data["attributes"]
)
@classmethod
def get_or_create(cls, id, client=None, **kwargs):
"""Get an existing object from the Descartes Labs catalog or create a new object.
If the Descartes Labs catalog object is found, and the remainder of the
arguments do not differ from the values in the retrieved instance, it will be
returned in the `~descarteslabs.catalog.DocumentState.SAVED` state.
If the Descartes Labs catalog object is found, and the remainder of the
arguments update one or more values in the instance, it will be returned in
the `~descarteslabs.catalog.DocumentState.MODIFIED` state.
If the Descartes Labs catalog object is not found, it will be created and the
state will be `~descarteslabs.catalog.DocumentState.UNSAVED`. Also see the
example for :py:meth:`save`.
Parameters
----------
id : str
The id of the object you are requesting.
client : CatalogClient, optional
A `CatalogClient` instance to use for requests to the Descartes Labs
catalog. The
:py:meth:`~descarteslabs.catalog.CatalogClient.get_default_client` will
be used if not set.
kwargs : dict, optional
With the exception of readonly attributes (`created`, `modified`), any
attribute of a catalog object can be set as a keyword argument (Also see
`ATTRIBUTES`).
Returns
-------
:py:class:`~descarteslabs.catalog.CatalogObject`
The requested catalog object that was retrieved or created.
"""
obj = cls.get(id, client=client)
if obj is None:
obj = cls(id=id, client=client, **kwargs)
else:
obj.update(**kwargs)
return obj
@classmethod
def get_many(cls, ids, ignore_missing=False, client=None):
"""Get existing objects from the Descartes Labs catalog.
All returned Descartes Labs catalog objects will be in the
`~descarteslabs.catalog.DocumentState.SAVED` state. Also see :py:meth:`get`.
For bands, if you request a specific band type, for example
:meth:`SpectralBand.get_many`, you will only receive that type. Use
:meth:`Band.get_many` to receive any type.
Parameters
----------
ids : list(str)
A list of identifiers for the objects you are requesting.
ignore_missing : bool, optional
Whether to raise a `~descarteslabs.client.exceptions.NotFoundError`
exception if any of the requested objects are not found in the Descartes
Labs catalog. ``False`` by default which raises the exception.
client : CatalogClient, optional
A `CatalogClient` instance to use for requests to the Descartes Labs
catalog. The
:py:meth:`~descarteslabs.catalog.CatalogClient.get_default_client` will
be used if not set.
Returns
-------
list(:py:class:`~descarteslabs.catalog.CatalogObject`)
List of the objects you requested in the same order.
Raises
------
NotFoundError
If any of the requested objects do not exist in the Descartes Labs catalog
and `ignore_missing` is ``False``.
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
"""
if not isinstance(ids, list) or any(not isinstance(id_, str) for id_ in ids):
raise TypeError("ids must be a list of strings")
id_filter = {"name": "id", "op": "eq", "val": ids}
raw_objects, related_objects = cls._send_data(
method=HttpRequestMethod.PUT,
client=client,
json={"filter": json.dumps([id_filter], separators=(",", ":"))},
)
if not ignore_missing:
received_ids = set(obj["id"] for obj in raw_objects)
missing_ids = set(ids) - received_ids
if len(missing_ids) > 0:
raise NotFoundError(
"Objects not found for ids: {}".format(", ".join(missing_ids))
)
objects = [
model_class(
id=obj["id"],
client=client,
_saved=True,
_relationships=obj.get("relationships"),
_related_objects=related_objects,
**obj["attributes"]
)
for obj in raw_objects
for model_class in (cls._get_model_class(obj),)
if issubclass(model_class, cls)
]
return objects
@classmethod
@check_derived
def exists(cls, id, client=None):
"""Checks if an object exists in the Descartes Labs catalog.
Parameters
----------
id : str
The id of the object.
client : CatalogClient, optional
A `CatalogClient` instance to use for requests to the Descartes Labs
catalog. The
:py:meth:`~descarteslabs.catalog.CatalogClient.get_default_client` will
be used if not set.
Returns
-------
bool
Returns ``True`` if the given ``id`` represents an existing object in
the Descartes Labs catalog and ``False`` if not.
Raises
------
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
"""
client = client or CatalogClient.get_default_client()
r = None
try:
r = client.session.head(cls._url + "/" + id)
except NotFoundError:
return False
return r and r.ok
@classmethod
@check_derived
def search(cls, client=None):
"""A search query for all objects of the type this class represents.
Parameters
----------
client : CatalogClient, optional
A `CatalogClient` instance to use for requests to the Descartes Labs
catalog. The
:py:meth:`~descarteslabs.catalog.CatalogClient.get_default_client` will
be used if not set.
Returns
-------
Search
An instance of the :py:class:`~descarteslabs.catalog.Search`
class.
Example
-------
>>> search = Product.search().limit(10)
>>> for result in search:
print(result.name)
"""
from .search import Search
return Search(cls, client=client)
@check_deleted
def save(self, extra_attributes=None):
"""Saves this object to the Descartes Labs catalog.
If this instance was created using the constructor, it will be in the
`~descarteslabs.catalog.DocumentState.UNSAVED` state and is considered a new
Descartes Labs catalog object that must be created. If the catalog object
already exists in this case, this method will raise a
`~descarteslabs.client.exceptions.BadRequestError`.
If this instance was retrieved using :py:meth:`get`, :py:meth:`get_or_create`
or any other way (for example as part of a :py:meth:`search`), and any of its
values were changed, it will be in the
`~descarteslabs.catalog.DocumentState.MODIFIED` state and the existing catalog
object will be updated.
If this instance was retrieved using :py:meth:`get`, :py:meth:`get_or_create`
or any other way (for example as part of a :py:meth:`search`), and none of its
values were changed, it will be in the
`~descarteslabs.catalog.DocumentState.SAVED` state, and if no `extra_attributes`
parameter is given, nothing will happen.
Parameters
----------
extra_attributes : dict, optional
A dictionary of attributes that should be sent to the catalog along with
attributes already set on this object. Empty by default. If not empty,
and the object is in the `~descarteslabs.catalog.DocumentState.SAVED`
state, it is updated in the Descartes Labs catalog even though no attributes
were modified.
Raises
------
ConflictError
If you're trying to create a new object and the object with given ``id``
already exists in the Descartes Labs catalog.
BadRequestError
If any of the attribute values are invalid.
DeletedObjectError
If this catalog object was deleted.
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
Example
-------
>>> new_product = Product(
... id="my-product",
... name="My Product",
... description="This is a test product"
... )
>>> new_product.state
<DocumentState.UNSAVED: 'unsaved'>
>>> new_product.save()
>>> # ids will be automatically prefixed by the Descartes Labs catalog
>>> # with your organization id
>>> new_product.id
my_org_id:my-product
>>> # Now you can retrieve the product and update it
>>> existing_product = Product.get(new_product.id)
>>> existing_product.state
<DocumentState.SAVED: 'saved'>
>>> existing_product.name = "My Updated Product"
>>> existing_product.state
<DocumentState.MODIFIED: 'modified'>
>>> existing_product.save()
>>> existing_product.state
<DocumentState.SAVED: 'saved'>
>>> # After you delete it...
>>> existing_product.delete()
True
>>> product.state
<DocumentState.DELETED: 'deleted'>
"""
if self.state == DocumentState.SAVED and not extra_attributes:
# Noop, already saved in the catalog
return
if self.state == DocumentState.UNSAVED:
method = HttpRequestMethod.POST
json = self.serialize(modified_only=False, jsonapi_format=True)
else:
method = HttpRequestMethod.PATCH
json = self.serialize(modified_only=True, jsonapi_format=True)
if extra_attributes:
json["data"]["attributes"].update(extra_attributes)
data, related_objects = self._send_data(
method=method, id=self.id, json=json, client=self._client
)
self._initialize(
id=data["id"],
saved=True,
relationships=data.get("relationships"),
related_objects=related_objects,
**data["attributes"]
)
@check_deleted
def reload(self):
"""Reload all attributes from the Descartes Labs catalog.
Refresh the state of this catalog object from the object in the Descartes Labs
catalog. This may be necessary if there are concurrent updates and the object
in the Descartes Labs catalog was updated from another client. The instance
state must be in the `~descarteslabs.catalog.DocumentState.SAVED` state.
If you want to revert a modified object to its original one, you should use
:py:meth:`get` on the object class with the object's `id`.
Raises
------
ValueError
If the catalog object is not in the ``SAVED`` state.
DeletedObjectError
If this catalog object was deleted.
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
Example
-------
>>> p = Product("my_org_id:my_product_id")
>>> # Some time elapses and a concurrent change was made
>>> p.state
<DocumentState.SAVED: 'saved'>
>>> p.reload()
>>> # But once you make changes, you cannot use this method any more
>>> p.name = "My name has changed"
>>> p.reload()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python3/site-packages/descarteslabs/catalog/catalog_base.py", line 47, in wrapper
return f(self, *args, **kwargs)
File "/usr/lib/python3/site-packages/descarteslabs/catalog/catalog_base.py", line 879, in reload
\"""Reload all attributes from the Descartes Labs catalog.
ValueError: Product instance with id my_org_id:my_product_id has not been saved
>>> # But you can revert
>>> p = Product.get(p.id)
>>> p.state
<DocumentState.SAVED: 'saved'>
"""
if self.state != DocumentState.SAVED:
raise ValueError(
"{} instance with id {} has not been saved".format(
self.__class__.__name__, self.id
)
)
data, related_objects = self._send_data(
method=HttpRequestMethod.GET, id=self.id, client=self._client
)
# this will effectively wipe all current state & caching
self._initialize(
id=data["id"],
saved=True,
relationships=data.get("relationships"),
related_objects=related_objects,
**data["attributes"]
)
@classmethod
@check_derived
def delete(cls, id, client=None):
"""Delete the catalog object with the given `id`.
Parameters
----------
id : str
The id of the object to be deleted.
client : CatalogClient, optional
A `CatalogClient` instance to use for requests to the Descartes Labs
catalog. The
:py:meth:`~descarteslabs.catalog.CatalogClient.get_default_client` will
be used if not set.
Returns
-------
bool
``True`` if this object was successfully deleted. ``False`` if the
object was not found.
Raises
------
ConflictError
If the object has related objects (bands, images) that exist.
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
Example
-------
>>> Image.delete('my-image-id')
"""
if client is None:
client = CatalogClient.get_default_client()
try:
client.session.delete(cls._url + "/" + id)
return True # non-200 will raise an exception
except NotFoundError:
return False
@check_deleted
def _instance_delete(self):
"""Delete this catalog object from the Descartes Labs catalog.
Once deleted, you cannot use the catalog object and should release any
references.
Raises
------
DeletedObjectError
If this catalog object was already deleted.
UnsavedObjectError
If this catalog object is being deleted without having been saved.
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
"""
if self.state == DocumentState.UNSAVED:
raise UnsavedObjectError("You cannot delete an unsaved object.")
self._client.session.delete(self._url + "/" + self.id)
self._deleted = True # non-200 will raise an exception
@classmethod
@check_derived
def _send_data(cls, method, id=None, json=None, client=None):
client = client or CatalogClient.get_default_client()
session_method = getattr(client.session, method.lower())
url = cls._url
if method not in (HttpRequestMethod.POST, HttpRequestMethod.PUT):
url += "/" + id
if cls._default_includes:
url += "?include=" + ",".join(cls._default_includes)
r = session_method(url, json=json).json()
data = r["data"]
related_objects = cls._load_related_objects(r, client)
return data, related_objects
@classmethod
def _load_related_objects(cls, response, client):
related_objects = {}
related_objects_serialized = response.get("included")
if related_objects_serialized:
for serialized in related_objects_serialized:
model_class = cls._get_model_class(serialized)
if model_class:
related = model_class(
id=serialized["id"],
client=client,
_saved=True,
**serialized["attributes"]
)
related_objects[(serialized["type"], serialized["id"])] = related
return related_objects
class CatalogObject(CatalogObjectBase):
"""A base class for all representations of objects in the Descartes Labs catalog.
"""
owners = ListAttribute(
TypedAttribute(str),
doc="""list(str), optional: User, group, or organization IDs that own this object.
Defaults to [``user:current_user``, ``org:current_org``]. The owner can edit,
delete, and change access to this object. :ref:`See this note <product_note>`.
*Filterable*.
""",
)
readers = ListAttribute(
TypedAttribute(str),
doc="""list(str), optional: User, group, or organization IDs that can read this object.
Will be empty by default. This attribute is only available to the `owners`
of a catalog object. :ref:`See this note <product_note>`.
""",
)
writers = ListAttribute(
TypedAttribute(str),
doc="""list(str), optional: User, group, or organization IDs that can edit this object.
Writers will also have read permission. Writers will be empty by default.
See note below. This attribute is only available to the `owners` of a catalog
object. :ref:`See this note <product_note>`.
""",
)
extra_properties = ExtraPropertiesAttribute(
doc="""dict, optional: A dictionary of up to 50 key/value pairs.
The keys of this dictonary must be strings, and the values of this dictionary
can be strings or numbers. This allows for more structured custom metadata
to be associated with objects.
"""
)
tags = ListAttribute(
TypedAttribute(str),
doc="""list, optional: A list of up to 20 tags.
The tags may support the classification and custom filtering of objects.
*Filterable*.
""",
)
def __new__(cls, *args, **kwargs):
return _new_abstract_class(cls, CatalogObject)
| [
"support@descarteslabs.com"
] | support@descarteslabs.com |
9c19ad2a558a99b718e3e70fbe64d7679a09444d | cc89320c0c261eb31a971bd01399b173a014a380 | /basics/dbconnect_01.py | 6c49973e9fa4f4316b2e445cc204f33c425bf0c8 | [] | no_license | Nivedha221998/python | 41ecceb6f23b598ab749a2f0cf60cf7672893e88 | 82a4a94f5a0f5305a80f68177a49b7f66a553ccc | refs/heads/master | 2023-02-02T22:41:07.182187 | 2020-12-18T09:14:38 | 2020-12-18T09:14:38 | 322,485,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | import datetime;
import mysql.connector;
import json;
from environment import host;
def timestamp():
ct = datetime.datetime.now()
return ct
def timediff(a,b):
return(b-a)
def converter(d):
return d.__str__()
def connection():
a = timestamp()
print(a)
cnx = mysql.connector.connect(user='nivedha', password='nivedha',
host = host,
port=3306,
database='placement')
mycursor = cnx.cursor(dictionary=True)
mycursor.execute("SELECT * FROM company")
myresult = mycursor.fetchall()
cnx.close()
json_object = json.dumps(myresult,indent = 4 , sort_keys = True,default=converter)
print(json_object)
b = timestamp()
print(b)
print(timediff(a,b))
connection()
| [
"nivedha221998@gmail.com"
] | nivedha221998@gmail.com |
d5d7bc6f783064bdf9f3c5a83dec9a899defc356 | 060967fa3e6e390ac0504172e6dea8421ffb9d98 | /2022/python2022/aoc/day01.py | f8899599170d8fd6ebfed8fd5aa9f6cefed79066 | [] | no_license | mreishus/aoc | 677afd18521b62c9fd141a45fec4b7bc844be259 | e89db235837d2d05848210a18c9c2a4456085570 | refs/heads/master | 2023-02-22T12:00:52.508701 | 2023-02-09T04:37:50 | 2023-02-09T04:39:44 | 159,991,022 | 16 | 3 | null | 2023-01-05T10:00:46 | 2018-12-01T22:00:22 | Python | UTF-8 | Python | false | false | 901 | py | #!/usr/bin/env python
"""
Advent Of Code 2022 Day 1
https://adventofcode.com/2022/day/1
"""
from typing import List
import heapq
def parse(filename: str) -> List[int]:
"""
Parse the input file into a list of integers.
Each integer is the sum of the numbers in a block.
"""
with open(filename) as file:
lines = file.read().strip()
blocks = lines.split("\n\n")
return [parse_block(block) for block in blocks]
def parse_block(block: str) -> int:
"""
param block: '1000\n2000\n3000'
return: 6000
"""
return sum(int(line) for line in block.splitlines())
class Day01:
"""AoC 2022 Day 01"""
@staticmethod
def part1(filename: str) -> int:
data = parse(filename)
return max(data)
@staticmethod
def part2(filename: str) -> int:
data = parse(filename)
return sum(heapq.nlargest(3, data))
| [
"mreishus@users.noreply.github.com"
] | mreishus@users.noreply.github.com |
bcf3914323be65181a8ba743c160c877da58bb0b | 89082e16362f25e559f1f928641d26566b248cd1 | /classwork20181022/range.py | 9da58ec202263f1f578e37ba629f296733212ed9 | [] | no_license | Andoree/Python-Study | fde1936607a9a49afd556b5f8c956a1076bbf2f7 | 10a6dad4b0c70639c9b2f54ebae3c56a35cbaafb | refs/heads/master | 2020-03-29T13:41:28.587138 | 2018-12-05T20:55:42 | 2018-12-05T20:55:42 | 149,976,797 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | def our_range(n):
i = 0
while i < n:
yield i
i += 1
def my_range(end, start=0, step=1):
i = start
while i < end:
yield i
i += step
for k in my_range(100, 0, 3):
print(k)
'''
i = our_range(5)
print(i.next())
for k in our_range(5):
print(k)
''' | [
"and2900@inbox.ru"
] | and2900@inbox.ru |
3e3a29d645d76489564f31b5593d1a2360b96136 | ee3859a2d8bf0c2486fba02e7b0a7effa8f36563 | /2022/python/day11.py | a10b7c76518ceef5b4a74c8b642491e7948e0fe5 | [
"MIT"
] | permissive | majormunky/advent_of_code | a4a1727611021e57b02c2014669621614f784221 | a5c22cbd37162234b28f9db8e32281286dcbbd26 | refs/heads/main | 2023-08-20T06:18:18.060244 | 2023-08-02T22:32:08 | 2023-08-02T22:32:08 | 226,026,329 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,794 | py | import time
import sys
from common import get_file_contents
class Monkey:
def __init__(self, lines, trainer):
self.trainer = trainer
self.name = self.parse_name(lines[0])
self.holding_items = self.get_starting_item_list(lines[1])
self.operation_val = None
self.operation_type = None
self.item_test_val = None
self.item_test = self.build_test_func(lines[3])
self.passes_test_next_monkey = self.get_monkey_num(lines[4])
self.fails_test_next_monkey = self.get_monkey_num(lines[5])
self.inspection_count = 0
self.output = []
def parse_name(self, line):
"""
Get the name out of the line that includes the name
"""
parts = line.split(" ")
return parts[1].replace(":", "").strip()
def perform_operation(self, val):
"""
Performs the specific operation configured for this monkey
"""
if self.operation_type == "increases":
return self.operation_val + val
if self.operation_type == "multiplied":
if self.operation_val == "old":
return self.operation_val * self.operation_val
return self.operation_val * val
print("Unknown operation type")
return False
def get_starting_item_list(self, line):
"""
Takes in the line that has our starting items,
and returns a list with just those items as ints
"""
result = []
parts = line.split(":")
for item in parts[1].split(","):
result.append(int(item))
return result
def catch_item(self, item):
self.holding_items.append(item)
def build_test_func(self, line):
parts = line.split(" ")
self.item_test_val = int(parts[-1])
def new_func(x):
return x % self.item_test_val == 0
return new_func
def build_operation(self, line):
parts = line.split(" ")
if parts[-1] == "old":
self.operation_val = parts[-1]
else:
self.operation_val = int(parts[-1])
# Remember what operation we have to do later
self.operation_type = parts[-2]
def get_monkey_num(self, line):
parts = line.split(" ")
return parts[-1]
def process_items(self):
# this will be a list of actions that we need to do
result = []
# list of messages
# self.output = []
# self.output.append(f"Monkey: {self.name}")
# we will be modifying the list of held items
# so we want to make a copy of them when we process them
current_items = list(self.holding_items)
for item in current_items:
# count how many times we process an item
self.inspection_count += 1
# self.output.append(f" Monkey inspects an item with a worry level of {item}.")
# each monkey has a different operation it takes on the held item
# the result of that sets our worry level
worry_level = self.perform_operation(item)
if worry_level is False:
print("Worry level is false, stopping!")
sys.exit(1)
# self.output.append(f" Worry level is {self.operation_type} by {self.operation_val} to {worry_level}")
# if we are in part 1, part of the process reduces our worry level
if self.trainer.test_name == "p1":
worry_level = worry_level // 3
# self.output.append(f" Monkey gets bored with item. Worry level is divided by 3 to {worry_level}.")
# each monkey has a different test that it does with the worry level of the item
# depending on the result of that test, we send the item to a different monkey
passes_test = self.item_test(worry_level)
if passes_test:
# self.output.append(f" Current worry level is divisible by {self.item_test_val}.")
# self.output.append(f" Item with worry level {worry_level} is thrown to monkey {self.passes_test_next_monkey}.")
# self.trainer.send_item_to_monkey(worry_level, self.passes_test_next_monkey)
result.append((worry_level, self.passes_test_next_monkey),)
else:
# self.output.append(f" Current worry level is not divisible by {self.item_test_val}.")
# self.output.append(f" Item with worry level {worry_level} is thrown to monkey {self.fails_test_next_monkey}.")
# self.trainer.send_item_to_monkey(worry_level, self.fails_test_next_monkey)
result.append((worry_level, self.fails_test_next_monkey),)
# remove the item from the monkey
self.holding_items.remove(item)
return result
def has_items(self):
return len(self.holding_items) > 0
class MonkeyTrainer:
def __init__(self, lines, test_name):
self.test_name = test_name
self.monkeys = {}
self.build_monkeys(lines)
print(f"Trainer built {len(self.monkeys.keys())} monkeys")
def get_chunks(self, lines):
result = []
monkey = []
for line in lines:
if line == "":
result.append(monkey)
monkey = []
continue
monkey.append(line)
if len(monkey):
result.append(monkey)
return result
def send_item_to_monkey(self, item, monkey_name):
self.monkeys[monkey_name].catch_item(item)
def build_monkeys(self, lines):
chunks = self.get_chunks(lines)
for chunk in chunks:
monkey = Monkey(chunk, self)
self.monkeys[monkey.name] = monkey
def process_monkey(self, key):
tic = time.perf_counter()
item_count = len(self.monkeys[key].holding_items)
process_results = self.monkeys[key].process_items()
for item in process_results:
self.send_item_to_monkey(item[0], item[1])
toc = time.perf_counter()
time_taken = toc - tic
return time_taken, item_count
def run(self, round_limit):
ROUND_LIMIT = round_limit
round_count = 0
sub_count = 0
sub_limit = 100
if sub_limit > round_limit:
sub_limit = round_limit
while True:
round_count += 1
sub_count += 1
time_taken = None
result_count = None
for i in range(len(self.monkeys.keys())):
key = str(i)
time_taken, result_count = self.process_monkey(key)
if sub_count == sub_limit:
print("Process Time: ", round(time_taken, 4))
print("Result Count: ", result_count)
sub_count = 0
print("Round Count:", round_count)
if round_count == ROUND_LIMIT:
break
counts = []
for name, monkey in self.monkeys.items():
counts.append(monkey.inspection_count)
counts = sorted(counts, reverse=True)
return counts
def get_answer_from_count_list(count_list):
return count_list[0] * count_list[1]
def p1():
lines = get_file_contents("data/day11_input.txt")
trainer = MonkeyTrainer(lines, "p1")
answer_list = trainer.run(20)
# Answer: 110888
print("Answer: ", get_answer_from_count_list(answer_list))
def p2():
# lines = get_file_contents("data/day11_input.txt")
lines = get_file_contents("data/day11_test.input")
trainer = MonkeyTrainer(lines, "p2")
answer_list = trainer.run(1000)
print("Answer: ", answer_list)
def main():
p2()
if __name__ == "__main__":
main()
| [
"josh.bright@gmail.com"
] | josh.bright@gmail.com |
da7a3b085d5e6082e0cb6992493c90567a8c0749 | 0046e32be68708ad9fb59d240077de7bc890d118 | /cart/migrations/0004_auto_20190818_1605.py | 4b986d243eece50941cba23959f33acf94c2f8ed | [] | no_license | ajitesh-30/Food-Service | bf6d07ae2020a88ed10690c3be54040b74d9f610 | f1b56b24331355666c92590a2df3ab944807545f | refs/heads/master | 2020-07-07T02:20:38.381964 | 2019-08-19T18:42:15 | 2019-08-19T18:42:15 | 203,214,942 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | # Generated by Django 2.1.8 on 2019-08-18 16:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cart', '0003_remove_cart_number_of_items'),
]
operations = [
migrations.RemoveField(
model_name='cart',
name='total',
),
migrations.RemoveField(
model_name='cartitem',
name='cart',
),
migrations.AddField(
model_name='cart',
name='completed',
field=models.CharField(choices=[('pending', 'Pending'), ('shipped', 'Shipped')], default='created', max_length=120),
),
migrations.AddField(
model_name='cartitem',
name='completed',
field=models.BooleanField(default=False, max_length=120),
),
migrations.AddField(
model_name='cartitem',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='cart',
name='ordered_item',
field=models.ManyToManyField(to='cart.CartItem'),
),
]
| [
"ajiteshs10@gmail.com"
] | ajiteshs10@gmail.com |
ac42de600d441111b8757f73f24b818eb59e9234 | 462019a24638e96e57aa72c4b4435e10117642ae | /Test Modules/keyClass.py | fa1ca5ca0aae4cb5e01ade84acb56af2abab831a | [] | no_license | AkagiCoder/FR_System | b0cbe633eaa4661c25e97497108b282993b20160 | 8c279d50f3d3ccb37b8d7336f821f2d57c3d8eb9 | refs/heads/master | 2020-09-16T17:56:29.723666 | 2020-05-07T04:11:10 | 2020-05-07T04:11:10 | 223,846,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # Class that defines a key
class key:
def __init__(self, num, code, dateCreate, timeCreate):
self.num = num # Key's number indicating the position of the list
self.code = code # Key's code
self.dateCreate = dateCreate # Date when the key was generated
self.timeCreate = timeCreate # Time when the key was generated
key1 = key("1", "55544", "today", "4:00")
print(key1.num) | [
"noreply@github.com"
] | AkagiCoder.noreply@github.com |
5a77dfd5ba44232e1bc973bae201fe2b77eaf6f6 | 65ca528fbb4f5ed5d62806b6225c861dcf420084 | /insert.py | 34fe0cdc1423350a97474dd9ec9a5444d1f939a3 | [] | no_license | pradeepganesamoorthy/python-basics | 35bb3b7ac4fb382089f91cad7935f146760b4503 | a0ac602d48167c72e3d6b85fd652d1893ba511dc | refs/heads/main | 2023-03-09T06:23:47.410073 | 2021-03-03T05:28:45 | 2021-03-03T05:28:45 | 342,138,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | #Insert
List1 = [1, 4, 5, 7, 8]
List1.insert(3,10)
print "Updated list" , List1 | [
"noreply@github.com"
] | pradeepganesamoorthy.noreply@github.com |
7f2890e6eef8d3b97cf77dd2662ed98a917826d1 | 631448f7133a783f6de769a36648bea049866077 | /python/matrix/matrix.py | a7ee11d3f374dfb97aa5d52584ae79a961ff256f | [
"MIT"
] | permissive | parkerbxyz/exercism | 1537c9e972864341d3c74f989f2a87f63ec9ac5c | cf5b82f5e80fbdf53d6939e5c997966a548a857a | refs/heads/main | 2023-08-08T03:14:21.031642 | 2023-03-18T04:12:14 | 2023-03-18T04:14:35 | 179,165,373 | 0 | 0 | MIT | 2023-09-11T22:34:04 | 2019-04-02T22:01:29 | Shell | UTF-8 | Python | false | false | 368 | py | class Matrix:
def __init__(self, matrix_string):
self.row_list = [list(map(int, row.split()))
for row in matrix_string.splitlines()]
self.col_list = list(map(list, zip(*self.row_list)))
def row(self, index):
return self.row_list[index - 1]
def column(self, index):
return self.col_list[index - 1]
| [
"17183625+parkerbrownxyz@users.noreply.github.com"
] | 17183625+parkerbrownxyz@users.noreply.github.com |
634e31435d8933c07718142efced149f7d9640e0 | dbcc35274e1d2cc6cda75c825a6dca7c3aeb1a20 | /game.py | c3475256d8cc748a66907060b8c685f59dfee4e1 | [] | no_license | NullCodex/Flappy | 2c130f51f5e56b481c0e64ef591df2985978bed8 | f06efc3a34dc9167934b3476c5aff68601e41501 | refs/heads/master | 2021-01-10T15:02:05.949461 | 2015-11-27T02:59:28 | 2015-11-27T02:59:28 | 46,956,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,713 | py | #!/usr/bin/env python
import pygame
from pygame.locals import * # noqa
import sys
import random
class FlappyBird:
def __init__(self):
self.screen = pygame.display.set_mode((400, 708))
self.bird = pygame.Rect(65, 50, 50, 50)
self.background = pygame.image.load("assets/background.png").convert()
self.birdSprites = [pygame.image.load("assets/1.png").convert_alpha(),
pygame.image.load("assets/2.png").convert_alpha(),
pygame.image.load("assets/dead.png")]
self.wallUp = pygame.image.load("assets/bottom.png").convert_alpha()
self.wallDown = pygame.image.load("assets/top.png").convert_alpha()
self.gap = 130
self.wallx = 400
self.birdY = 350
self.jump = 0
self.jumpSpeed = 10
self.gravity = 5
self.dead = False
self.sprite = 0
self.counter = 0
self.offset = random.randint(-110, 110)
def updateWalls(self):
self.wallx -= 2
if self.wallx < -80:
self.wallx = 400
self.counter += 1
self.offset = random.randint(-110, 110)
def birdUpdate(self):
if self.jump:
self.jumpSpeed -= 1
self.birdY -= self.jumpSpeed
self.jump -= 1
else:
self.birdY += self.gravity
self.gravity += 0.2
self.bird[1] = self.birdY
upRect = pygame.Rect(self.wallx,
360 + self.gap - self.offset + 10,
self.wallUp.get_width() - 10,
self.wallUp.get_height())
downRect = pygame.Rect(self.wallx,
0 - self.gap - self.offset - 10,
self.wallDown.get_width() - 10,
self.wallDown.get_height())
if upRect.colliderect(self.bird):
self.dead = True
if downRect.colliderect(self.bird):
self.dead = True
if not 0 < self.bird[1] < 720:
self.bird[1] = 50
self.birdY = 50
self.dead = False
self.counter = 0
self.wallx = 400
self.offset = random.randint(-110, 110)
self.gravity = 5
def run(self):
clock = pygame.time.Clock()
pygame.font.init()
font = pygame.font.SysFont("Arial", 50)
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN and not self.dead:
self.jump = 17
self.gravity = 5
self.jumpSpeed = 10
self.screen.fill((255, 255, 255))
self.screen.blit(self.background, (0, 0))
self.screen.blit(self.wallUp,
(self.wallx, 360 + self.gap - self.offset))
self.screen.blit(self.wallDown,
(self.wallx, 0 - self.gap - self.offset))
self.screen.blit(font.render(str(self.counter),
-1,
(255, 255, 255)),
(200, 50))
if self.dead:
self.sprite = 2
elif self.jump:
self.sprite = 1
self.screen.blit(self.birdSprites[self.sprite], (70, self.birdY))
if not self.dead:
self.sprite = 0
self.updateWalls()
self.birdUpdate()
pygame.display.update()
if __name__ == "__main__":
FlappyBird().run()
| [
"jamesonyu95@gmail.com"
] | jamesonyu95@gmail.com |
a7d44b84305b45be2479048a5cb552f85539f5a2 | 38d3b3f4caf93c152070a660e4625bbdd99de5d8 | /data.py | 83bed3840916c9f6fe2c6181baaddc7fe2a1622d | [] | no_license | adwaye/neuralNetworks | d568282f685f589674d1d13ebf7130f4531a59b5 | 70a404c875d090359cbb2256ee1f758ddb2bbcd5 | refs/heads/master | 2020-03-31T17:43:13.259611 | 2018-10-10T13:59:48 | 2018-10-10T13:59:48 | 152,432,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,303 | py | import numpy as np
import gzip
import matplotlib.pyplot as plt
import cv2
import os
from build_image_data import _process_dataset
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
"""
takes as input a gz-compressed ubyte object from http://yann.lecun.com/exdb/mnist/
and outputs it in folders that are named after the labels of each image
"""
def make_jpg_data(destinationLoc = '/home/amr62/Documents/github examples/neuralNetworks/MNIST-data/images/train'
,image_filename = '/home/amr62/Documents/github examples/neuralNetworks/MNIST-data/t10k-images-idx3-ubyte.gz'
,label_filename = '/home/amr62/Documents/github examples/neuralNetworks/MNIST-data/t10k-labels-idx1-ubyte.gz'
,num_images = 100,plot= False):
bytestream_label = gzip.open(label_filename)
bytestream_image = gzip.open(image_filename)
bytestream_image.read(16)
image_buffer = bytestream_image.read(IMAGE_SIZE * IMAGE_SIZE*num_images)
bytestream_label.read(8)
label_buffer = bytestream_label.read(IMAGE_SIZE * IMAGE_SIZE*num_images)
if plot: plt.figure()
for i in range(num_images):
data_image = np.frombuffer(image_buffer[i*(IMAGE_SIZE * IMAGE_SIZE):(i+1)*IMAGE_SIZE * IMAGE_SIZE],
dtype=np.uint8).astype(np.float32)
lab = label_buffer[i]
saveLoc = os.path.join(destinationLoc,str(lab))
if not os.path.isdir(saveLoc):
os.makedirs(saveLoc)
im = data_image.reshape((IMAGE_SIZE,IMAGE_SIZE))
cv2.imwrite(os.path.join(saveLoc,str(i) + '.jpeg') ,im)
if plot:
plt.imshow(im)
plt.title(str(lab))
plt.pause(0.01)
if not os.path.isfile(os.path.join(destinationLoc,'label.txt')):
with open(os.path.join(destinationLoc,'label.txt'), 'a') as fp:
for name in range(10):
fp.write(str(name)+'\n')
print('done saving in '+destinationLoc)
if __name__=='__main__':
print('extracting JPEG Mnist')
train_image_filename = '/home/amr62/Documents/github examples/neuralNetworks/MNIST-data/train-images-idx3-ubyte.gz'
train_label_filename = '/home/amr62/Documents/github examples/neuralNetworks/MNIST-data/train-labels-idx1-ubyte.gz'
test_image_filename = '/home/amr62/Documents/github examples/neuralNetworks/MNIST-data/t10k-images-idx3-ubyte.gz'
test_label_filename = '/home/amr62/Documents/github examples/neuralNetworks/MNIST-data/t10k-labels-idx1-ubyte.gz'
make_jpg_data(destinationLoc = '/home/amr62/Documents/github examples/neuralNetworks/MNIST-data/images/train',
image_filename=train_image_filename,label_filename=train_label_filename)
make_jpg_data(destinationLoc = '/home/amr62/Documents/github examples/neuralNetworks/MNIST-data/images/test',
image_filename=test_image_filename,label_filename=test_label_filename)
trainLoc = '/home/amr62/Documents/github examples/neuralNetworks/MNIST-data/images/train'
print('')
_process_dataset('./train',trainLoc,1,
os.path.join(trainLoc,'label.txt'))
testLoc = '/home/amr62/Documents/github examples/neuralNetworks/MNIST-data/images/test'
_process_dataset('./validation',testLoc,1,
os.path.join(testLoc,'label.txt')) | [
"adwayerambojun@gmail.com"
] | adwayerambojun@gmail.com |
6544e45fed1939fd5ed348d360e7d109637bf1ed | b498fc42a8ad2fe90cc61a51eb00f0511a83bb4c | /Lists/combine_lst.py | 54db543ca275887d0056382aaba899992c7b83d5 | [] | no_license | chutki-25/python_ws | 3d58ff2bc64b30da0ec9dd85265be94a3ad89bca | b0441c17367c356d9c92b4dce49165d2c8508f46 | refs/heads/master | 2022-02-25T09:08:06.692574 | 2019-07-24T18:22:12 | 2019-07-24T18:22:12 | 198,151,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | nums=[[1,2,3],[4,5,6],[7,8,9]]
all_nums=[j for i in nums for j in i ]
print(all_nums)
| [
"sirikollur25@gmail.com"
] | sirikollur25@gmail.com |
b06352210f635b17200179bc0e7ebec438a609f0 | 00c250de250d0993fae5dd9d8b17a4d4b6c46f0f | /master.py | ada4437cfc498f81ac57fce4b16ef5b014b9e1e6 | [] | no_license | junnychen/git_exercise2 | 105f3b9b4ddc64333cda22a9594dca338e25e3f4 | 22d05cbbf6a3cfadd7687f6eee498edbd58ec4e5 | refs/heads/master | 2020-05-01T11:37:21.355453 | 2019-03-24T18:05:02 | 2019-03-24T18:05:02 | 177,447,764 | 0 | 0 | null | 2019-03-24T18:05:02 | 2019-03-24T17:38:26 | Python | UTF-8 | Python | false | false | 52 | py | sum = 0
for i in range(10):
sum += i
print(sum) | [
"junlin2222@Gmail.com"
] | junlin2222@Gmail.com |
272cb28ffcea219a7b52db8a0dbf7f5718528369 | f38078d2ae83f54b12bbccb298000668643d191d | /ImageClassificationCNN/model_loader.py | f06ad2d69ceb1d711454138b3b6b9999bf509408 | [] | no_license | royd1990/DeepLearnInfilect | 26de605d36c617da405346255690df0f4c4f0836 | 61eb515eb8e88d981ef6147650fe1e1663f52af2 | refs/heads/master | 2022-11-15T08:23:55.726407 | 2017-07-15T10:18:30 | 2017-07-15T10:18:30 | 95,906,386 | 0 | 1 | null | 2022-11-03T20:58:51 | 2017-06-30T16:33:59 | Python | UTF-8 | Python | false | false | 2,693 | py | import argparse
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from week1.CNN.train import model_freezer
if __name__ == '__main__':
# Let's allow the user to pass the filename as an argument
parser = argparse.ArgumentParser()
parser.add_argument("--frozen_model_filename",
default="/media/royd1990/fd0ff253-17a9-49e9-a4bb-0e4529adb2cb/home/royd1990/Documents/deep_learning_tensorFlow/ml-course1/week1/CNN/train/checkpoints/convnet_mnist/frozen_model.pb",
type=str,
help="Frozen model file to import")
mnist = input_data.read_data_sets(
"/media/royd1990/fd0ff253-17a9-49e9-a4bb-0e4529adb2cb/home/royd1990/Documents/deep_learning_tensorFlow/ml-course1/week1/CNN/train/mnist",
one_hot=True)
args = parser.parse_args()
# We use our "load_graph" function
graph = model_freezer.load_graph(args.frozen_model_filename)
# We can verify that we can access the list of operations in the graph
for op in graph.get_operations():
print(op.name)
# prefix/Placeholder/inputs_placeholder
# ...
# prefix/Accuracy/predictions
# We access the input and output nodes
X = graph.get_tensor_by_name('prefix/data/X_placeholder:0')
# Y = graph.get_tensor_by_name('prefix/data/Y_placeholder:0')
preds = graph.get_tensor_by_name('prefix/loss/pred:0')
# loss = graph.get_tensor_by_name('prefix/loss/loss:0')
dropout = graph.get_tensor_by_name('prefix/dropout:0')
# We launch a Session
with tf.Session(graph=graph) as sess:
n_batches = int(mnist.test.num_examples / 128)
total_correct_preds = 0
for i in range(n_batches):
X_batch, Y_batch = mnist.test.next_batch(128)
# Note: we didn't initialize/restore anything, everything is stored in the graph_def
# _, pred = sess.run([loss, preds], feed_dict={X: X_batch,Y: Y_batch, dropout: 0.75}) # , Y:Y_batch,dropout: 0.75
# preds = tf.nn.softmax(logits_batch)
# x = pred[0]
# correct_preds = tf.equal(tf.argmax(pred,1), tf.argmax(Y_batch, 1))
# accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
# total_correct_preds += sess.run(accuracy)
y_out=sess.run(preds,feed_dict={X: X_batch,dropout: 0.75})
correct_preds = tf.equal(tf.argmax(y_out, 1), tf.argmax(Y_batch, 1))
accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
total_correct_preds += sess.run(accuracy)
#print(y_out)
print("Accuracy {0}".format(total_correct_preds / mnist.test.num_examples))
| [
"royd1990@gmail.com"
] | royd1990@gmail.com |
c16804ef65ddaba3604e91cc5575aeae51e724d9 | 9442107662ffd710ab44a26218481bfc6286baeb | /python 3.6.4/chatbot.py | d5b5b68a4dd41530ac3b9ad298fbf0f183070c29 | [] | no_license | TheSlothinatoor/Chatbot-Project | 7a77ac1fd8ac1f8da1a314fb88016152c06bc08c | 531e789b9db92a98f9137ea903ed261fb1202a3b | refs/heads/master | 2020-08-07T22:14:39.885346 | 2019-11-15T11:31:02 | 2019-11-15T11:31:02 | 213,601,780 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | import discord
from discord.ext.command import Bot
from discord.ext import commands
import asyncio
import time
Client = discord.Client()
client = command.Bot(command_prefix = "!")
@client.event
async def on_ready():
print("We have logged in")
@client.event
async def on_message(message):
if message.content == "cookie":
await client.send_message(message.channel, ":cookie:")
client.run("2QZgvTyxIq8WyjgPRZMHhZJgiLd3A93T")
| [
"31484277+TheSlothinatoor@users.noreply.github.com"
] | 31484277+TheSlothinatoor@users.noreply.github.com |
126074cc00f2903010c558fb7fffdd45e6708f18 | 2b362b5f6b2839613f4e4360d3b922d90162704f | /app/gitmanager.py | 39daa072bd938c66ab98d6309b8c74f9356fb4d6 | [
"MIT"
] | permissive | snehesht/blog | c4200de90efba1e2664937ec88aa6b1234e47fed | e579fce1d4f4a17fb6c9c7adfa3e05bbdc24aae6 | refs/heads/master | 2021-01-16T23:33:30.733846 | 2018-12-21T06:55:53 | 2018-12-21T06:55:53 | 64,437,625 | 196 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,693 | py | import subprocess
import os
from config import *
# Update the GIT database repo
def git_update():
current_dir = os.getcwd().split('/')[-1]
try:
if check_repo_exist():
# Checks if the directory is GIT_REPO_DIR_NAME or not, if not changes to that dir
# This is to avoid recursive chdir calls
if current_dir != GIT_REPO_DIR_NAME:
print("Changing directory")
os.chdir(GIT_REPO_DIR_NAME)
proc = subprocess.run(['git','pull'])
# Sleeps for 10 secs, waits till the pull is completed
# time.sleep(10)
else:
print('Some problem with the repo, repo doesnt exist')
except Exception as e:
raise e
finally:
os.chdir("../")
def get_current_dir():
tmp = os.getcwd().split('/')
return tmp[-1]
# GIT DATA repo doesn't exist, pull the repo
def git_clone():
try:
os.chdir(GIT_REPO_DIR_NAME)
proc = subprocess.run(['git','clone',GIT_REPO_URL])
except Exception as e:
raise e
finally:
return 0
# Check if git data repo exist
def check_repo_exist():
try:
repo_exists = False
# List dir
for item in os.listdir():
# print(item)
if item == GIT_REPO_DIR_NAME:
repo_exists = True
current_dir = os.getcwd().split('/')[-1]
# If curr directory is GIT_REPO_DIR_NAME
if current_dir == GIT_REPO_DIR_NAME:
repo_exists = True
if repo_exists == False:
proc = subprocess.run(['git','clone',GIT_REPO_URL])
except Exception as e:
raise
finally:
return True
| [
"mail@snehesh.me"
] | mail@snehesh.me |
d2d048ae60943070dda3483a74b20995a1d2e5b6 | 016b23a5ba5fb0efdc5f5c8437176b0078cfa800 | /thesale/theteam/migrations/0009_img.py | 7302b56f818678c9ea5e7d776018fad19ce43cfd | [] | no_license | wcqy-ye/ourwork | 3bae7161019444e527e760ff50c9bde97364d787 | 1ec80af71579200dc5c2dc34ac74fb6803107888 | refs/heads/master | 2020-05-09T09:50:49.682238 | 2019-05-21T05:37:26 | 2019-05-21T05:37:26 | 181,018,458 | 2 | 0 | null | 2019-04-12T13:55:51 | 2019-04-12T13:55:51 | null | UTF-8 | Python | false | false | 527 | py | # Generated by Django 2.1.7 on 2019-05-20 13:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('theteam', '0008_team'),
]
operations = [
migrations.CreateModel(
name='IMG',
fields=[
('img', models.ImageField(upload_to='img2')),
('name', models.CharField(max_length=20)),
('img_id', models.IntegerField(primary_key=True, serialize=False)),
],
),
]
| [
"2312309705@qq.com"
] | 2312309705@qq.com |
9e011f833190c003c501b34093c98fea67323259 | 6bf492920985e3741440ba53e1c7f8426b66ac1f | /snakemake_rules/rules/gatk/gatk_combine_variants.smk | 4aeb72ab60e819d714f462e05f027c1fd761730a | [
"MIT"
] | permissive | ukaraoz/snakemake-rules | 5b2ba7c9ec19d88b56067a46f66fd0c72e48c368 | 07e96afeb39307cdf35ecc8482dc1f8b62c120b9 | refs/heads/master | 2020-03-31T15:20:44.444006 | 2018-09-07T08:53:47 | 2018-09-07T08:53:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | smk | # -*- snakemake -*-
include: 'gatk.settings.smk'
include: 'gatk_variant_snp_JEXL_filtration.smk'
include: 'gatk_variant_indel_JEXL_filtration.smk'
config_default = {'gatk': {'combine_variants': _gatk_config_rule_default.copy()}}
update_config(config_default, config)
config = config_default
cmd = re.sub("-Xmx[0-9a-zA-Z]+", "-Xmx{mem}".format(mem=config['gatk']['combine_variants']['java_mem']), config['gatk']['cmd'])
rule gatk_combine_variants:
"""Run GATK CombineVariants to combine variant files.
The default rule combines files with suffixes filteredSNP.vcf and
filteredINDEL.vcf.
"""
wildcard_constraints:
suffix = "(.vcf|.vcf.gz)"
params: cmd = cmd + " -T " + COMBINE_VARIANTS,
options = " ".join(["-R", config['gatk']['combine_variants']['ref'],
config['gatk']['combine_variants']['options']]),
runtime = config['gatk']['combine_variants']['runtime']
input: "{prefix}.snp.filteredSNP{suffix}", "{prefix}.indel.filteredINDEL{suffix}"
output: "{prefix}.variants{suffix}"
threads: config['gatk']['combine_variants']['threads']
conda: "env.yaml"
shell: "command=\"{params.cmd} {params.options} $(echo {input} | sed -e 's/[^ ][^ ]*/-V &/g') -o {output}\"; eval \"${{command}}\""
| [
"per.unneberg@scilifelab.se"
] | per.unneberg@scilifelab.se |
82e714f9d7bf40c18e7816ef186fca4e30fa9493 | 48e43eec0e8399b034705b02916104be9335d64e | /SourceCode/MapReduce Stripes Mapper.py | 6e6a0d878fff460ea63861749297d1919fc1d3ee | [
"MIT"
] | permissive | prakhardogra921/Movie-Pair-Analysis-using-Hadoop-and-Spark | c8d5f39e2d59aa65888c3f87e65a9bf0a1145926 | aa5587bb4ee29e46ae34bf299ab70475f1cde5a5 | refs/heads/master | 2021-05-14T10:00:32.616508 | 2018-01-05T05:03:52 | 2018-01-05T05:03:52 | 116,341,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | #!/usr/bin/env python
import sys
import json
movie_dict = {}
skip_first = True
count = 0
user = 0
movie_list = []
all_movies = ""
for line in sys.stdin:
if skip_first:
skip_first = False
continue
info = line.split(",")
if float(info[2]) >= 4.0:
if user != int(info[0]):
movie_list = sorted(movie_list)
l = len(movie_list)
if l > 1:
for i in range(l-1):
for j in range(i+1, l):
if movie_list[j] in movie_dict:
movie_dict[movie_list[j]] += 1
else:
movie_dict[movie_list[j]] = 1
print (str(movie_list[i]) + "\t" + json.dumps(movie_dict))
movie_dict.clear()
movie_list[:] = []
user = int(info[0])
movie_list.append(int(info[1]))
movie_list = sorted(movie_list)
l = len(movie_list)
if l > 1:
for i in range(l-2):
for j in range(i+1, l-1):
if movie_list[j] in movie_dict:
movie_dict[movie_list[j]] += 1
else:
movie_dict[movie_list[j]] = 1
print (str(movie_list[i]) + "\t" + json.dumps(movie_dict)) | [
"pdogra@cafex.com"
] | pdogra@cafex.com |
d052fff3e9a8ca167ab284868d1d61e0dbb654ce | 23f6dbacd9b98fdfd08a6f358b876d3d371fc8f6 | /rootfs/usr/lib/pymodules/python2.6/papyon/sip/transport.py | 20fe3f535a9615f5870fc8c179f1e13f2a9f1010 | [] | no_license | xinligg/trainmonitor | 07ed0fa99e54e2857b49ad3435546d13cc0eb17a | 938a8d8f56dc267fceeb65ef7b867f1cac343923 | refs/heads/master | 2021-09-24T15:52:43.195053 | 2018-10-11T07:12:25 | 2018-10-11T07:12:25 | 116,164,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | /usr/share/pyshared/papyon/sip/transport.py | [
"root@xinli.xinli"
] | root@xinli.xinli |
bcdd0abe6750285e7fa6b8a7a95cdf85baaf302a | 3bb1cf4309e0e6488aeb3e5ae8b78138cfdaa002 | /kyopro_tenkei/90_54.py | 8de332b75aa23b0227743cdd237feacaa92f0a7a | [] | no_license | show2214/atcoder | 18a2dd0c2167fadeda2725a67d2d68d593b0bef9 | 7aae17b41b07bece746b34258b9514e145186327 | refs/heads/master | 2022-06-27T19:17:46.514876 | 2022-06-19T23:21:48 | 2022-06-19T23:21:48 | 249,148,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | N, M = map(int, input().split())
g = [[] for _ in range(N + M)]
for i in range(M):
input()
for j in map(int, input().split()):
g[N + i] += j - 1,
g[j - 1] += N + i,
from collections import *
q = deque([0])
v = [0] + [-1] * (N + M)
while q:
c = q.popleft()
for b in g[c]:
if v[b] < 0:
v[b] = v[c] + 1
q += b,
print(*[i//2 for i in v[:N]]) | [
"show2214@icloud.com"
] | show2214@icloud.com |
dd8f89f1291812fd7b64a055c14821e6086c0c2a | 2de2141dc66caf1dbdcae973e9ce54567b6d6b96 | /Du/views.py | f44ee681540fd93052c4c6ed6e3d9e906b2a1dfd | [] | no_license | JacksonYANG/Du | c2f7bbd36bb9bd83821c38f3fc32a107e4c51f80 | d1042a23aeeb329a7ce4dab7f8909309acd801c2 | refs/heads/master | 2020-03-25T20:12:32.860913 | 2018-09-17T06:30:46 | 2018-09-17T06:30:46 | 144,120,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,508 | py | from django.contrib.auth.models import User
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth import authenticate, login, logout
import markdown
# Create your views here.
# 首页
from django.views.generic import ListView, DetailView
from Du.forms import LoginForm, RegisterForm, CommentForm
from Du.models import News, Blog, Music, Comment
def index(request):
blog_list = Blog.objects.all()
return render(request, 'index.html', context={'blog_list': blog_list})
# 热点页面
class hot(ListView):
model = News
template_name = 'hot.html'
context_object_name = 'news_list'
# 热点详细页面
class hot_detail(DetailView):
model = News
template_name = 'hot_detail.html'
context_object_name = 'news_detail'
def get(self, request, *args, **kwargs):
response = super(hot_detail, self).get(request, *args, **kwargs)
self.object.increase_browse()
return response
# 博客页面
class blog(ListView):
model = Blog
template_name = 'blog.html'
context_object_name = 'blog_list'
# 详细页面
class blog_detail(DetailView):
model = Blog
template_name = 'blog_detail.html'
context_object_name = 'blog_detail'
def get(self, request, *args, **kwargs):
response = super(blog_detail, self).get(request, *args, **kwargs)
self.object.increase_browse()
return response
def get_object(self, queryset=None):
blog = super(blog_detail, self).get_object(queryset=None)
blog.article = markdown.markdown(blog.article, extensions=['markdown.extensions.extra', 'markdown.extensions.codehilite', 'markdown.extensions.toc'])
return blog
def get_context_data(self, **kwargs):
context =super().get_context_data(**kwargs)
comment_list = Comment.objects.all()
comment_form = CommentForm()
context.update({
'comment_list': comment_list,
'comment_form': comment_form
})
return context
class music(ListView):
model = Music
template_name = 'music.html'
context_object_name = 'music_list'
# 登录页面
def login_view(request):
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('Du:index')
else:
message = '您输入的用户名或密码错误,请重新输入'
return render(request, 'login.html', {'form': login_form, 'message': message})
else:
login_form = LoginForm()
return render(request, 'login.html', {'form': login_form})
# 注销页面
def logout_view(request):
logout(request)
return redirect('Du:index')
# 注册页面
def register(request):
if request.method == 'POST':
register_form = RegisterForm(request.POST)
user = User()
if register_form.is_valid():
user.username = request.POST.get('username', '')
user.email = request.POST.get('email', '')
user.password = request.POST.get('password', '')
user.save()
return redirect('Du:index')
else:
register_form = RegisterForm()
return render(request, 'register.html', {'form': register_form})
# 处理评论
def blog_comment(request, blog_pk):
blog = get_object_or_404(Blog, pk=blog_pk)
author = get_object_or_404(User)
# 只有POST的时候才处理
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
# 先关联,不保存
comment = form.save(commit=False)
# 关联博客数据
comment.blog = blog
# 关联评论数据
comment.author = author
# 保存到数据库
comment.save()
return redirect(blog)
else:
comment_list = blog.comment_set.all()
context = {
'blog': blog,
'form': form,
'comment_list': comment_list
}
return render(request, 'blog_detail.html', context=context)
return redirect('Du:blog')
# sitemap页面
def get_sitemap(request):
return render(request, 'sitemap.xml')
| [
"353904675@qq.com"
] | 353904675@qq.com |
d37f9e22a0ce9602b392ea28978bc30abdfac727 | 25f4e2ba489b3a55e59cb5d7b35c3cf643a5162d | /ambientevirtualromulo/meuaplicativo/migrations/0004_auto_20190813_1357.py | 70a24fa6b46bad6e6c709d155b32e0ee8e5b0dba | [] | no_license | romulopin/crudpython | f3afa3888e39243003c7ca71f8b2c3a6183e8c83 | a7ab12653293638b9fae7e7dcebf431c18fad667 | refs/heads/master | 2020-07-02T21:35:14.233611 | 2019-08-14T02:45:24 | 2019-08-14T02:45:24 | 201,671,846 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | # Generated by Django 2.2.4 on 2019-08-13 16:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meuaplicativo', '0003_auto_20190812_1824'),
]
operations = [
migrations.CreateModel(
name='Produto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=100)),
('preco', models.FloatField()),
],
),
migrations.CreateModel(
name='Vendedor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=100)),
('cpf', models.CharField(max_length=11, unique=True, verbose_name='CPF')),
],
),
migrations.AlterModelOptions(
name='cliente',
options={'ordering': ['criado_em'], 'verbose_name': 'nome', 'verbose_name_plural': 'nomes'},
),
]
| [
"romulo.s.pinheiro@gmail.com"
] | romulo.s.pinheiro@gmail.com |
e77c7a685926bb163fb95b1898c1c03200116012 | d84f7c22cc61958e9670eb0c6528ff208691aa5e | /homepage/homepage/settings.py | fdbe1a1cea8bbbd2c5565b21f012a4882e4f16da | [] | no_license | ronnnwu/RVEX | b1ad2a58e2399d4d70732b76dff4106d4ad9b7f4 | 537c06083a432e5030863c3fcb1e40ac5eb81dc5 | refs/heads/master | 2022-08-29T23:35:45.289001 | 2017-05-03T16:52:39 | 2017-05-03T16:52:39 | 90,172,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,100 | py | """
Django settings for homepage project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u_khtksno9*-9n7y@0)h!cocx@p=(f@hqso4$)181l@qh$w2m1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'homepage.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'homepage.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"rcw278@nyu.edu"
] | rcw278@nyu.edu |
a675be1dee75baf40dfd7bedb70f50cd49c2bf55 | 003f009950f4d5d69556c6505160e44c0f3392eb | /C-Cpp/Fibonacci/JobArray/wrapper.py | 7e756594097dc715b6eb7f2c37002bee2c1e4afa | [
"MIT"
] | permissive | llsc-supercloud/teaching-examples | 995e1727849d8e64f3cc97a434bcb660f991f6c5 | 2e5d1d4b59d2808eea82705165709b50289c1fb7 | refs/heads/main | 2023-07-06T09:25:28.669596 | 2023-06-23T15:38:51 | 2023-06-23T15:38:51 | 181,913,936 | 32 | 11 | MIT | 2023-06-23T15:38:52 | 2019-04-17T14:57:08 | Jupyter Notebook | UTF-8 | Python | false | false | 692 | py | import sys,os
# Read in the file of inputs
finputs = open(sys.argv[1],"r+")
inputs = finputs.readlines()
# The name of the executable we want to run
exec_name = '../bin/fibonacci'
# Determine which inputs to run
if len(sys.argv) > 2: # if we pass in the task_id and number of tasks
# This is the task id and number of tasks that can be used
# to determine which indices this process/task is assigned
my_task_id = int(sys.argv[2])
num_tasks = int(sys.argv[3])
# Assign indices to this process/task
myinputs = inputs[my_task_id-1:len(inputs):num_tasks]
else:
myinputs = inputs
for input in myinputs:
cmd = './' + exec_name + ' ' + input
os.system(cmd)
| [
"lauren.milechin@mit.edu"
] | lauren.milechin@mit.edu |
c73594f3fd6f39702628fa34cba3b2585af4b651 | 8a7660a4e592f2fbb5c8e353dc06743754e9e606 | /python/pencilnew/math/is_int.py | 1995f40c4221659c652ac53267dbf5755b0a1b08 | [] | no_license | yangjian615/pencil-code | e5af53deb8d2e0aa895ce2f2f4dcf8505ec96b6c | 50a158369926ed742fe6881cbf14c7cced7199b0 | refs/heads/master | 2021-01-24T18:12:52.291589 | 2017-03-08T18:10:29 | 2017-03-08T18:10:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py |
def is_int(s):
""" Checks if string s is an int. """
try:
a = float(s)
b = int(a)
except ValueError:
return False
else:
return a == b
| [
"andreas.schreiber88@googlemail.com"
] | andreas.schreiber88@googlemail.com |
b115ae9cbf8febf57bc66fe07a163378faedddf7 | e7b2c4a271d95edaec0b9d071ef95f4afded3023 | /cloud/HyperlinkManager.py | 725fa3edbdca16016fce3d3fc6503b56e7fe31cb | [] | no_license | gmy/CloudStorageAndTransmission | c5dc4792b0ca3c6f574aca42e4df0bb044339b1e | d22fb731081a56dc259708e5cfb4c003c09f2cb7 | refs/heads/master | 2016-08-05T08:02:15.948909 | 2014-06-07T05:18:20 | 2014-06-07T05:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | __author__ = 'gumengyuan'
from Tkinter import *
class HyperlinkManager:
def __init__(self, text):
self.text = text
self.text.tag_config("hyper", foreground="blue", underline=1)
self.text.tag_bind("hyper", "<Enter>", self._enter)
self.text.tag_bind("hyper", "<Leave>", self._leave)
self.text.tag_bind("hyper", "<Button-1>", self._click)
self.reset()
def reset(self):
self.links = {}
def add(self, action):
# add an action to the manager. returns tags to use in
# associated text widget
tag = "hyper-%d" % len(self.links)
self.links[tag] = action
return "hyper", tag
def _enter(self, event):
self.text.config(cursor="hand2")
def _leave(self, event):
self.text.config(cursor="")
def _click(self, event):
for tag in self.text.tag_names(CURRENT):
if tag[:6] == "hyper-":
self.links[tag]()
return | [
"gumengyuan@resnet-38-197.resnet.ucsb.edu"
] | gumengyuan@resnet-38-197.resnet.ucsb.edu |
2b03c7cc20444724c7de6226946be343a76065e4 | d29882486d57d5ba2c62c8557fd8b052c844555a | /tests/test_rules.py | ea77c9382da81d078db3ef73bd8b956bfd5146dc | [
"MIT"
] | permissive | Awesome-Of-the-Internet/algorithms-keeper | 42dcd21827edafcd95746c2f81b7b6727b7b3200 | a23941489ced44170e13f9f64aa9b8893bb3d8a7 | refs/heads/master | 2023-07-15T07:07:37.367689 | 2021-08-31T05:21:00 | 2021-08-31T05:21:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,531 | py | import textwrap
from pathlib import Path
from typing import List, Optional, Tuple, Type, Union
import pytest
from fixit import CstLintRule
from fixit.common.utils import InvalidTestCase, LintRuleCollectionT, ValidTestCase
from fixit.rule_lint_engine import lint_file
from algorithms_keeper.parser.rules import (
NamingConventionRule,
RequireDescriptiveNameRule,
RequireDoctestRule,
RequireTypeHintRule,
UseFstringRule,
)
GenTestCaseType = Tuple[Type[CstLintRule], Union[ValidTestCase, InvalidTestCase], str]
# ``get_rules_from_config`` will generate all the rules including the ones directly
# from the ``fixit`` package. We only care about testing our custom rules.
CUSTOM_RULES: LintRuleCollectionT = {
NamingConventionRule,
RequireDoctestRule,
RequireDescriptiveNameRule,
RequireTypeHintRule,
UseFstringRule,
}
def _parametrized_id(obj: object) -> str:
if isinstance(obj, type):
return obj.__name__
elif isinstance(obj, str):
return obj
else:
return ""
def _dedent(src: str) -> str:
"""Remove the leading newline, if present, and all the common leading whitespace
from every line in `src`.
This can be used to make triple-quoted strings line up with the left edge of the
display, while still presenting them in the source code in indented form.
"""
if src[0] == "\n":
src = src[1:]
return textwrap.dedent(src)
def _gen_all_test_cases(rules: LintRuleCollectionT) -> List[GenTestCaseType]:
"""Generate all the test cases for the provided rules."""
cases: Optional[List[Union[ValidTestCase, InvalidTestCase]]]
all_cases: List[GenTestCaseType] = []
for rule in rules:
if not issubclass(rule, CstLintRule):
continue
for test_type in {"VALID", "INVALID"}:
if cases := getattr(rule, test_type, None):
for index, test_case in enumerate(cases):
all_cases.append((rule, test_case, f"{test_type}_{index}"))
return all_cases
@pytest.mark.parametrize(
"rule, test_case, test_case_id",
_gen_all_test_cases(CUSTOM_RULES),
ids=_parametrized_id,
)
def test_rules(
rule: Type[CstLintRule],
test_case: Union[ValidTestCase, InvalidTestCase],
test_case_id: str,
) -> None:
"""Test all the rules with the generated test cases.
All the test cases comes directly from the `VALID` and `INVALID` attributes for the
provided rules. Some of the points to keep in mind:
- Invalid test case should be written so as to generate only one report.
- Attributes should be in all caps: `INVALID` and `VALID`
- The code can be written in triple quoted string with indented blocks, they will
be removed with the helper function: ``_dedent``
The logic of the code is the same as that of ``fixit.common.testing`` but this has
been converted to using ``pytest`` and removed the fixture feature. This might be
added if there's any need for that in the future.
"""
reports = lint_file(
Path(test_case.filename),
_dedent(test_case.code).encode("utf-8"),
config=test_case.config,
rules={rule},
)
if isinstance(test_case, ValidTestCase):
assert len(reports) == 0, (
'Expected zero reports for this "valid" test case. Instead, found:\n'
+ "\n".join(str(e) for e in reports),
)
else:
assert len(reports) > 0, (
'Expected a report for this "invalid" test case but `self.report` was '
+ "not called:\n"
+ test_case.code,
)
assert len(reports) <= 1, (
'Expected one report from this "invalid" test case. Found multiple:\n'
+ "\n".join(str(e) for e in reports),
)
report = reports[0] # type: ignore
if test_case.line is not None:
assert (
test_case.line == report.line
), f"Expected line: {test_case.line} but found line: {report.line}"
if test_case.column is not None:
assert (
test_case.column == report.column
), f"Expected column: {test_case.column} but found column: {report.column}"
kind = test_case.kind if test_case.kind is not None else rule.__name__
assert (
kind == report.code
), f"Expected:\n {test_case.expected_str}\nBut found:\n {report}"
if test_case.expected_message is not None:
assert test_case.expected_message == report.message, (
f"Expected message:\n {test_case.expected_message}\n"
+ f"But got:\n {report.message}"
)
patch = report.patch
expected_replacement = test_case.expected_replacement
if patch is None:
assert expected_replacement is None, (
"The rule for this test case has no auto-fix, but expected source was "
+ "specified."
)
return
assert expected_replacement is not None, (
"The rule for this test case has an auto-fix, but no expected source was "
+ "specified."
)
expected_replacement = _dedent(expected_replacement)
patched_code = patch.apply(_dedent(test_case.code))
assert patched_code == expected_replacement, (
"Auto-fix did not produce expected result.\n"
+ f"Expected:\n{expected_replacement}\n"
+ f"But found:\n{patched_code}"
)
| [
"dhruvmanila@gmail.com"
] | dhruvmanila@gmail.com |
04f818c8d8d6447a4a6615bbd06b414243ac722d | 3c95972abacdb1556a0df80eecebba2694492865 | /test/request_network.py | fcd1c8682678e0f892ce8079eeec38c3fde9c1a7 | [
"Apache-2.0"
] | permissive | chenweixu/bunnyc_mgr | 6e6536ef21b17aeacf46263f1d5cca02ac648a78 | 1243fa951c45c665442212247d682ce3d39aec08 | refs/heads/master | 2022-04-30T09:15:56.618674 | 2019-08-09T06:01:38 | 2019-08-09T06:01:38 | 165,891,860 | 0 | 0 | Apache-2.0 | 2022-03-29T21:57:33 | 2019-01-15T17:13:22 | Python | UTF-8 | Python | false | false | 1,832 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Email: chenwx716@163.com
# DateTime: 2017-07-09 18:26:45
__author__ = "chenwx"
import json
import requests
app_url = "http://127.0.0.1:9002"
req_url = app_url + "/api/v2/network"
json_headers = {"content-type": "application/json"}
class Network(object):
"""docstring for Network"""
def __init__(self):
super(Network, self).__init__()
def pinghost(self, ip):
print(">> Network test ping host %s" % ip)
r = requests.get(req_url, timeout=10, params={"ping": ip})
print("http status--------->> %s" % r.status_code)
a = r.text
print(a)
return r.status_code
def check_url(self, arg):
print(">> Network check_url %s" % arg)
r = requests.get(req_url, timeout=10, params={"checkurl": arg})
print("http status--------->> %s" % r.status_code)
a = r.text
print(a)
return r.status_code
def check_local_port(self, ip, port, source="localhost"):
mess = {
"key": "c1c2",
"obj": "network",
"content": {"task": "check_port", "ip": ip, "port": port, "source": source},
}
print(
">> Network check sip-> %s , ip-> %s ,port-> %s" % (source, ip, str(port))
)
r = requests.post(req_url, data=json.dumps(mess), headers=json_headers)
print("http status--------->> %s" % r.status_code)
print(r.text)
return r.status_code
netcheck = Network()
# netcheck.pinghost('10.2.1.5')
# netcheck.check_url('http://10.2.1.5:9000/')
# netcheck.pinghost('10.2.1.67')
# netcheck.pinghost('10.23.12.68')
# netcheck.check_local_port("10.2.1.5", 9001)
# netcheck.check_local_port("10.2.1.5", 9001, source="10.2.1.67")
# netcheck.check_local_port("10.2.1.5", 22, source="10.2.1.67")
| [
"chenwx716@163.com"
] | chenwx716@163.com |
f1c5e69189bc8a90462b021c01db2e9eb96a1b0a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03239/s478967614.py | 7b473b8df981d413d6bb9ee6fe7d2eb9b2bdec4c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | n, t = map(int, input().split())
ans = 100000
for i in range(n):
c, tt = map(int, input().split())
if tt <= t:
ans = min(ans, c)
if ans == 100000:
print("TLE")
else:
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
08adebbe826f788b8506ac12bfe64a9e064ce640 | 95f32d9fa064222ae4b091f66500fad52667e246 | /progress.py | 5c10e49df438f34a7d666b3fa80b724ea70af586 | [] | no_license | sehugg/cupaloy | ef2d483db57df76756aa776f7eef29854d54a6ee | 522dab4e1785ba688db78c9fbbb21286454cedd1 | refs/heads/master | 2020-03-28T21:53:20.343750 | 2019-01-03T20:46:00 | 2019-01-03T20:46:00 | 149,189,630 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | #!/usr/bin/python
import sys,time
class ProgressTracker:
def __init__(self):
self.files_visited = 0
self.size_visited = 0
self.files_max = 0
self.size_max = 0
self.count = 0
self.last_refresh_time = 0
self.refresh_interval = 0
self.current_name = ''
self.goals = []
def pushGoal(self, dfiles, dsize=0):
self.files_max += dfiles
self.size_max += dsize
self.goals.append((self.files_visited+dfiles, self.size_visited+dsize))
def popGoal(self):
nf,ns = self.goals.pop()
self.files_visited = nf
self.size_visited = ns
self.refresh()
def inc(self, name, dsize=0):
self.files_visited += 1
if dsize:
self.size_visited += dsize
self.files_max = max(self.files_max, self.files_visited)
self.size_max = max(self.size_max, self.size_visited)
self.current_name = name
self.refresh()
def incGoal(self, name, dsize=0):
self.files_max += 1
if dsize:
self.size_max += dsize
self.inc(name, dsize)
def refresh(self, force=False):
t = time.time()
if force or t - self.last_refresh_time >= self.refresh_interval:
self.output()
self.last_refresh_time = t
def output(self):
sys.stdout.write(str(self))
sys.stdout.write('\r')
sys.stdout.flush()
def __repr__(self):
# TODO: unicode escape?
n = ('%s' % [self.current_name])[0:60]
if len(self.goals) == 0:
s = "(%d)" % (self.files_visited)
elif self.size_max > 0:
pct = self.size_visited*100.0/self.size_max
s = "(%d/%d) %5.1f%%" % (self.files_visited, self.files_max, pct)
else:
s = "(%d/%d)" % (self.files_visited, self.files_max)
return ("%s %" + str(80-len(s)) + "s") % (s, n)
###
if __name__ == '__main__':
pt = ProgressTracker()
pt.push('Foo', 10, 3)
pt.pop()
| [
"hugg@fasterlight.com"
] | hugg@fasterlight.com |
a810239db0aeedff351d3d9efc5207a589a7f567 | f1d3591ebc611960b5b3330159929f4134145f6d | /csqlite3/client.py | 56c6c3787ba580fdc4609ea0bd29bc6fa7848bd3 | [
"MIT"
] | permissive | AlanCristhian/csqlite3 | bb2acc0acf9e09bee8a05770a77a7f3c7170ca84 | c7b9fc1578fd7bd5d21d3fd7edcefbf563264929 | refs/heads/master | 2020-03-18T05:03:28.477142 | 2018-05-27T21:31:33 | 2018-05-27T21:31:33 | 134,321,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,603 | py | import atexit
import logging
import os
import socket
import threading
import warnings
import pathlib
from . import utils
_logger = logging.getLogger("Client")
_PID = os.getpid()
class _ConnectionSocket(utils.PickleSocket):
def request(self, *message):
self.write(message)
response = self.read()
if isinstance(response, utils.ServerError):
raise response.error
elif isinstance(response, utils.ServerWarning):
warnings.warn(response.warning.args[1], response.warning.__class__)
return response
class Cursor:
"""SQLite database cursor class."""
def __init__(self, database, socket, row_factory, text_factory):
self._socket = socket
self._request = self._socket.request
self._row_factory = row_factory
self._text_factory = text_factory
self._database = database
self._request(_PID, "cursor", "open", {})
def execute(self, sql, parameters=()):
"""Executes a SQL statement."""
self._request(_PID, "cursor", "execute", [sql, parameters])
return self
def fetchone(self):
"""Fetches one row from the resultset."""
data = self._request(_PID, "cursor", "fetchone", {})
if self._row_factory:
data = self._row_factory(self, data)
return data
def fetchall(self):
"""Fetches all rows from the resultset."""
data = self._request(_PID, "cursor", "fetchall", ())
if self._row_factory:
if isinstance(data, list):
for i, row in enumerate(data):
data[i] = self._row_factory(self, row)
return data
def fetchmany(self, size=None):
"""Repeatedly executes a SQL statement."""
if size is None:
size = self.arraysize
return self._request(_PID, "cursor", "fetchmany", [size])
def close(self):
"""Closes the cursor."""
return self._request(_PID, "cursor", "close", {})
@property
def rowcount(self):
return self._request(_PID, "cursor", "_get_attribute", ["rowcount"])
@property
def lastrowid(self):
return self._request(_PID, "cursor", "_get_attribute", ["lastrowid"])
@property
def arraysize(self):
return self._request(_PID, "cursor", "_get_attribute", ["arraysize"])
@arraysize.setter
def arraysize(self, size):
return self._request(_PID, "cursor", "_set_attribute",
["arraysize", size])
def __iter__(self):
"""Implement iter(self)."""
return iter(self.fetchall())
def executemany(self, sql, seq_of_parameters):
"""Repeatedly executes a SQL statement."""
self._request(_PID, "cursor", "executemany", [sql, seq_of_parameters])
return self
def executescript(self, sql_script):
"""Executes a multiple SQL statements at once."""
self._request(_PID, "cursor", "executescript", [sql_script])
return self
@property
def description(self):
return self._request(_PID, "cursor", "_get_attribute", ["description"])
class Connection:
"""connect(database[, timeout, detect_types, isolation_level,
check_same_thread, cached_statements, uri])
Opens a connection to the SQLite database file *database*. You can use
":memory:" to open a database connection to a database that resides in
RAM instead of on disk."""
def __init__(self, database, timeout=5, detect_types=False,
isolation_level="", check_same_thread=True,
cached_statements=100, uri=False):
self.isolation_level = isolation_level
self._socket = _ConnectionSocket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(timeout)
self._socket.connect((utils.HOST, utils.PORT))
self._cursor = None
self._progress = None
self._trace = None
self._row_factory = None
self._text_factory = None
if ":memory:" in database:
self._database = database
else:
self._database = str(pathlib.Path(database).resolve())
kwargs = {"database": database,
"timeout": timeout,
"detect_types": detect_types,
"isolation_level": isolation_level,
"check_same_thread": False,
"cached_statements": cached_statements,
"uri": uri}
self._request = self._socket.request
self._request(_PID, "connection", "open", kwargs)
@property
def in_transaction(self):
return self._request(_PID, "connection", "_get_attribute",
["in_transaction"])
def cursor(self, factory=Cursor):
"""Return a cursor for the connection."""
if not self._cursor:
self._cursor = factory(self._database, self._socket,
self._row_factory, self._text_factory)
return self._cursor
def commit(self):
"""Commit the current transaction."""
return self._request(_PID, "connection", "commit", ())
def rollback(self):
"""Roll back the current transaction."""
return self._request(_PID, "connection", "rollback", ())
def close(self):
"""Closes the connection."""
self._request(_PID, "connection", "close", {})
self._socket.close()
if self._progress:
self._progress.shutdown()
def execute(self, sql, parameters=()):
"""Executes a SQL statement. Non-standard."""
return self.cursor().execute(sql, parameters)
def executemany(self, sql, seq_of_parameters):
"""Repeatedly executes a SQL statement. Non-standard."""
return self.cursor().executemany(sql, seq_of_parameters)
def executescript(self, sql_script):
"""Executes a multiple SQL statements at once. Non-standard."""
return self.cursor().executescript(sql_script)
def create_function(self, name, num_params, func):
"""Creates a new function. Non-standard."""
return self._request(_PID, "connection", "create_function",
[name, num_params, func])
def create_aggregate(self, name, num_params, aggregate_class):
"""Creates a new aggregate. Non-standard."""
return self._request(_PID, "connection", "create_aggregate",
[name, num_params, aggregate_class])
def create_collation(self, name, callable):
"""Creates a collation function. Non-standard."""
return self._request(_PID, "connection", "create_collation",
[name, callable])
def interrupt(self):
"""Abort any pending database operation. Non-standard."""
return self._request(_PID, "connection", "interrupt", ())
def set_authorizer(self, authorizer_callback):
"""Sets authorizer callback. Non-standard."""
return self._request(_PID, "connection", "set_authorizer",
[authorizer_callback])
def set_progress_handler(self, handler, n):
"""Sets progress handler callback. Non-standard."""
if not self._progress:
self._progress = utils.new_progress_server(handler)
thread = threading.Thread(target=self._progress.serve_forever)
thread.daemon = True
thread.start()
arguments = (self._progress.server_address, n)
return self._request(_PID, "connection", "set_progress_handler",
arguments)
def set_trace_callback(self, trace_callback):
"""Sets a trace callback called for each SQL
statement (passed as unicode). Non-standard.
"""
if not self._trace:
self._trace = utils.new_trace_server(trace_callback)
thread = threading.Thread(target=self._trace.serve_forever)
thread.daemon = True
thread.start()
return self._request(_PID, "connection", "set_trace_callback",
[self._trace.server_address])
def enable_load_extension(self, enabled):
"""Enable dynamic loading of SQLite
extension modules. Non-standard.
"""
return self._request(_PID, "connection", "enable_load_extension",
[enabled])
def load_extension(self, path):
"""Load SQLite extension module. Non-standard."""
return self._request(_PID, "connection", "load_extension", [path])
@property
def row_factory(self):
return self._row_factory
@row_factory.setter
def row_factory(self, factory):
self._request(_PID, "connection", "_set_attribute",
["row_factory", factory])
self._row_factory = factory
if self._cursor:
self._cursor._row_factory = factory
@property
def text_factory(self):
return self._text_factory
@text_factory.setter
def text_factory(self, factory):
self._request(_PID, "connection", "_set_attribute",
["text_factory", factory])
self._text_factory = factory
@property
def total_changes(self):
return self._request(_PID, "connection", "_get_attribute",
["total_changes"])
def iterdump(self):
self._request(_PID, "connection", "iterdump", ())
def iter_dump():
while True:
row = self._request(_PID, "connection", "_next_iterdump", ())
if row is StopIteration:
break
else:
yield row
return iter_dump()
def connect(database, timeout=5, detect_types=False, isolation_level="",
check_same_thread=True, factory=Connection, cached_statements=100,
uri=False):
"""connect(database[, timeout, detect_types, isolation_level,
check_same_thread, factory, cached_statements, uri])
Opens a connection to the SQLite database file *database*. You can use
":memory:" to open a database connection to a database that resides in
RAM instead of on disk."""
return factory(database, timeout, detect_types, isolation_level,
check_same_thread, cached_statements, uri)
@atexit.register
def close_client_app():
with _ConnectionSocket(socket.AF_INET, socket.SOCK_STREAM) as _socket:
_socket.settimeout(1/200)
try:
_socket.connect((utils.HOST, utils.PORT))
_socket.request(_PID, "client_app", "close", {})
except (ConnectionRefusedError, ConnectionResetError,
socket.timeout):
pass
def register_converter(typename, callable):
with _ConnectionSocket(socket.AF_INET, socket.SOCK_STREAM) as _socket:
_socket.settimeout(5)
_socket.connect((utils.HOST, utils.PORT))
args = (typename, callable)
_socket.request(_PID, "csqlite3", "register_converter", args)
def register_adapter(type, callable):
with _ConnectionSocket(socket.AF_INET, socket.SOCK_STREAM) as _socket:
_socket.settimeout(5)
_socket.connect((utils.HOST, utils.PORT))
args = (type, callable)
_socket.request(_PID, "csqlite3", "register_adapter", args)
def enable_callback_tracebacks(flag=False):
with _ConnectionSocket(socket.AF_INET, socket.SOCK_STREAM) as _socket:
_socket.settimeout(5)
_socket.connect((utils.HOST, utils.PORT))
_socket.request(_PID, "csqlite3", "enable_callback_tracebacks", [flag])
| [
"alan.cristh@gmail.com"
] | alan.cristh@gmail.com |
68021c77c0ee0ad4339ea6f035207dae6ea9a485 | 1a7e6b0f6281c7705e75e4ec57520388e9eac0bc | /loops/for.py | c4552e5eea11b9ffd5335eee02e07b0f508cfa3f | [] | no_license | drafski89/useful-python | 139954cf521c4eec1c0bab3420185c6612c6fbd6 | ebc3ff2f3ab89b1b9e4fb1c051564baddbeec8e8 | refs/heads/master | 2021-09-06T21:27:14.766923 | 2018-02-11T17:50:59 | 2018-02-11T17:50:59 | 108,578,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # Basic example of implementing a for-loop
# Create a variable called count to hold the current count
count = 1
print x
# For loop
# for [variable] in range (start amount, stop amount, increment amount)
for count in range(1, 12, 1):
# Add 1 to count and print the result
count = count + 1
print count | [
"brandt@mmsi.com"
] | brandt@mmsi.com |
01ba65d8da0f32d363289cae1846027df987e112 | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/es2017/EscapeRegExpPattern.spec | 2289280aaa2efa7ea17a57ad0c73afcb15409c9a | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 1,405 | spec | 1. Let _S_ be a String in the form of a |Pattern[~U]| (|Pattern[+U]| if _F_ contains `"u"`) equivalent to _P_ interpreted as UTF-16 encoded Unicode code points (<emu-xref href="#sec-ecmascript-language-types-string-type"></emu-xref>), in which certain code points are escaped as described below. _S_ may or may not be identical to _P_; however, the internal procedure that would result from evaluating _S_ as a |Pattern[~U]| (|Pattern[+U]| if _F_ contains `"u"`) must behave identically to the internal procedure given by the constructed object's [[RegExpMatcher]] internal slot. Multiple calls to this abstract operation using the same values for _P_ and _F_ must produce identical results.
1. The code points `/` or any |LineTerminator| occurring in the pattern shall be escaped in _S_ as necessary to ensure that the String value formed by concatenating the Strings `"/"`, _S_, `"/"`, and _F_ can be parsed (in an appropriate lexical context) as a |RegularExpressionLiteral| that behaves identically to the constructed regular expression. For example, if _P_ is `"/"`, then _S_ could be `"\\/"` or `"\\u002F"`, among other possibilities, but not `"/"`, because `///` followed by _F_ would be parsed as a |SingleLineComment| rather than a |RegularExpressionLiteral|. If _P_ is the empty String, this specification can be met by letting _S_ be `"(?:)"`.
1. Return _S_. | [
"h2oche22@gmail.com"
] | h2oche22@gmail.com |
dece9dbbfd2dd780ee3a8047c874acb51b1a0d50 | 03ff74fff064b69e5b41af42372a6cc33738c294 | /project_advance_views/Blog/migrations/0013_auto_20190213_0516.py | 43dedb029ef0a8f5093863b4fbf0e2cbbafa56d0 | [] | no_license | rosaridho/DJANGO_MVC | 189a6ba400984e91dd0b057072f8e60348619242 | 44e6f0bd0dcd7e9d268dc2580770c79d54aeda85 | refs/heads/master | 2022-11-28T04:57:03.973009 | 2019-02-13T13:55:28 | 2019-02-13T13:55:28 | 170,068,288 | 0 | 0 | null | 2022-11-22T03:25:38 | 2019-02-11T04:34:06 | HTML | UTF-8 | Python | false | false | 396 | py | # Generated by Django 2.1.5 on 2019-02-13 05:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Blog', '0012_auto_20190213_0512'),
]
operations = [
migrations.AlterField(
model_name='artikel',
name='gambar',
field=models.ImageField(upload_to='media/images'),
),
]
| [
"muhammadridhorosa@gmail.com"
] | muhammadridhorosa@gmail.com |
a18b89fb83c54798265c1232a5612a39c65e53ff | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ke4FSMdG2XYxbGQny_5.py | 3eb2cfb5e413340d184121753557a8220852eae5 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py |
def even_odd_transform(lst, n):
l=lst
if len(l)==0:
return l
for i in range(n):
for j in range(len(l)):
if l[j]%2==0:
l[j]=l[j]-2
else:
l[j]=l[j]+2
return l
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a1dd46d126b3b32636fc69f0ddcb514cf076741c | ea35facf6d823e93706b5f551408250b1e089be9 | /共通問題/9_2.py | e241ba9b38b5616e7210f25d70710da375922582 | [] | no_license | YukiNGSM/PythonStudy | 7a2d24f4762e384531eadd691858296b00b6a6b3 | 26310d0e007745ff4920ccd0fc3e51771cb2d5f1 | refs/heads/master | 2023-07-19T00:06:29.061255 | 2021-09-22T01:29:49 | 2021-09-22T01:29:49 | 409,025,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | def hello():
for i in range(10):
print(('Hello'))
hello() | [
"ykh2135239@o-hara.ac.jp"
] | ykh2135239@o-hara.ac.jp |
9fce20f8fc036410b74c53272e3f3ba7e0bbea05 | 9468507c1beeb2cb69591889605ea155d2cb7a63 | /mysite/urls.py | 3c3cb29f215257dcd4b0b3f45a2b59dd078c5b1b | [] | no_license | nimal54/drf-polls | 2375e2f5b78670de40c72b51eb616a69e7f49a65 | 9b29230998146eb225e0cffa0703d6bed1cc876a | refs/heads/master | 2020-04-25T00:21:14.952917 | 2018-03-16T11:54:53 | 2018-03-16T11:54:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | from django.urls import include, path
from django.contrib import admin
urlpatterns = [
path('api/', include('polls.urls')),
path('admin/', admin.site.urls),
] | [
"will@wsvincent.com"
] | will@wsvincent.com |
2105b7ef7452e7d5e92c95d2a3523538a6855b8b | 0354baf04e26275d42858b8430e5a7de85d4ca98 | /Tema3/main.py | 20ee2025304e2d8b183cd71ef137d4844e83254d | [] | no_license | RazvanOprea/Numerical-Calculus | 1e30d0ca67559b07eafee516f9d578ac2241c7ac | 503b1a6b58dd128c8318439f5e2dfbd670ea2882 | refs/heads/master | 2020-03-16T22:40:20.789442 | 2018-05-11T14:15:42 | 2018-05-11T14:15:42 | 133,048,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,897 | py | import numpy as np
def read_file(filename):
f = open(filename, "r")
n = int(f.readline()) # value of n
vector_b = []
f.readline() # empty line
for i in range(0, n):
line = float(f.readline())
vector_b += [line]
f.readline() # empty line
lines = f.readlines() # matrix
matrix = []
for line in lines:
data = line.split(',')
matrix.append((float(data[0]), int(data[1]), int(data[2])))
f.close()
return n, vector_b, matrix
def diagonal_element(vector, line):
found = False
index = 0
for index in range(len(vector[line])):
if vector[line][index][1] == line:
found = True
break
if found:
n = len(vector[line]) - 1
temp_val = vector[line][n][0]
temp_col = vector[line][n][1]
vector[line][n][0] = vector[line][index][0]
vector[line][n][1] = vector[line][index][1]
vector[line][index][0] = temp_val
vector[line][index][1] = temp_col
def sparse_matrix(n, matrix):
new_matrix = dict()
for element in matrix:
el = element[0]
i = element[1]
j = element[2]
row_elements = new_matrix.get(i)
if row_elements != None:
same_col = False
for row_element in row_elements:
if row_element[1] == j:
row_element[0] += el
same_col = True
break
if not same_col:
row_elements.append([el, j])
new_matrix[i] = row_elements
#diagonal_element(new_matrix[i], i)
else:
temp_list = list()
temp_list.append([el, j])
new_matrix[i] = temp_list
my_vector = [[] for _ in range(n)]
for index in range(0, n):
elem = new_matrix.get(index)
if elem != None:
temp = list()
for (val, col) in elem:
temp.append([val, col])
my_vector[index].extend(temp)
else:
my_vector[index].append(0)
return my_vector
def sparse_matrix2(n, matrix):
new_matrix = dict()
for element in matrix:
el = element[0]
j = element[1]
i = element[2]
row_elements = new_matrix.get(i)
if row_elements != None:
same_col = False
for row_element in row_elements:
if row_element[1] == j:
row_element[0] += el
same_col = True
break
if not same_col:
row_elements.append([el, j])
new_matrix[i] = row_elements
else:
temp_list = list()
temp_list.append([el, j])
new_matrix[i] = temp_list
my_vector = [[] for _ in range(n)]
for index in range(0, n):
elem = new_matrix.get(index)
if elem != None:
temp = list()
for (val, col) in elem:
temp.append([val, col])
my_vector[index].extend(temp)
else:
my_vector[index].append(0)
return my_vector
def equal_matrices(m1, m2, epsilon):
if len(m1) != len(m2):
return False
for i in range(0, len(m1)):
if len(m1[i]) != len(m2[i]):
#print(str(len(m1[i])) + ' '+ str(len(m2[i])) + '<------' +str(i))
return False
m1_ord_line = sorted(m1[i], key=lambda el: (el[1], el[0]))
m2_ord_line = sorted(m2[i], key=lambda el: (el[1], el[0]))
for j in range(0, len(m1[i])):
if m1_ord_line[j][1] != m2_ord_line[j][1] or abs(m1_ord_line[j][0] - m2_ord_line[j][0]) > epsilon:
return False
return True
def equal_vectors(v1, v2):
epsilon = 0.1
if len(v1) != len(v2):
return False
for i in range(0, len(v1)):
if abs(v1[i] - v2[i]) > epsilon:
return False
return True
def add_matrices(m1, m2):
if len(m1) != len(m2):
print("Error matrices addition")
return -1
m = [[] for _ in range(len(m1))]
for i in range(0, len(m1)):
for j in range(0, len(m1[i])):
m[i].append([m1[i][j][0], m1[i][j][1]])
for i in range(0, len(m2)):
for j in range(0, len(m2[i])):
found = False
for k in range(0, len(m[i])):
if m2[i][j][1] == m[i][k][1]:
m[i][k][0] += m2[i][j][0]
found = True
break
if not found:
m[i].append([m2[i][j][0], m2[i][j][1]])
return m
def column_element(m, line, col):
for i in range(0, len(m[line])):
if m[line][i][1] == col:
return m[line][i][0]
return 0
def multiply_matrices(m1, m2):
if len(m1) != len(m2):
print("Error multiply matrices")
return -1
m = [[] for _ in range(len(m1))]
for i in range(0, len(m1)):
for col in range(0, len(m1)):
element_sum = 0
for j in range(0, len(m1[i])):
element_sum += m1[i][j][0] * column_element(m2, m1[i][j][1], col)
if element_sum:
m[i].append([element_sum, col])
return m
def multiply_matrices2(m1, m2):
# 0 x 0
m = [[] for _ in range(len(m1))]
for i in range(0, len(m1)):
for j in range(0,len(m1)):
a = m1[i]
b = m2[j]
v = np.zeros((len(m1),), dtype=float)
sum = 0
for x in a:
v[x[1]] = x[0]
for x in b:
if v[x[1]] != 0:
sum += v[x[1]] * x[0]
if sum != 0:
m[i].append([sum, j])
return m
def multiply_vector(m):
#m = matrix, b = vector
x = [i for i in range(1, len(m) + 1)]
x.sort(reverse=True)
my_vector = list()
for i in range(0, len(m)):
temp_sum = 0
for j in range(0, len(m[i])):
temp_sum += m[i][j][0] * x[m[i][j][1]]
my_vector.append(temp_sum)
return my_vector
def print_matrix(m, filename):
f = open(filename, 'w')
for i in range(0, len(m)):
f.write(str(i) + ': ')
m_sorted = sorted(m[i], key=lambda el: (el[1], el[0]))
for j in m_sorted:
f.write(str(j) + ', ')
f.write('\n')
f.close()
if __name__ == "__main__":
n1, b1, A = read_file("a.txt")
n2, b2, B = read_file("b.txt")
n3, b3, AplusB = read_file("aplusb.txt")
n4, b4, AoriB = read_file("aorib.txt")
A_sparse = sparse_matrix(n1, A)
B_sparse = sparse_matrix(n2, B)
AplusB_sparse = sparse_matrix(n3, AplusB)
AoriB_sparse = sparse_matrix(n4, AoriB)
B_sparse_reverse = sparse_matrix2(n2, B) # swap rows with columns
matrices_sum = add_matrices(A_sparse, B_sparse) # A + B
m_vector = multiply_vector(A_sparse) # A * x
m2_vector = multiply_vector(B_sparse) # B * x
matrices_multiplication = multiply_matrices2(A_sparse, B_sparse_reverse) # A * B
print("A + B = AplusB --> " + str(equal_matrices(AplusB_sparse, matrices_sum, 0.1)))
print("A * x = b --> " + str(equal_vectors(m_vector, b1)))
print("B * x = b --> " + str(equal_vectors(m2_vector, b2)))
print("A * B = AoriB --> " + str(equal_matrices(AoriB_sparse, matrices_multiplication, 0.1)))
print_matrix(AplusB_sparse, "aplusb_fisier.txt")
print_matrix(matrices_sum, "aplusb_calculat.txt")
print_matrix(AoriB_sparse, "aorib_fisier.txt")
print_matrix(matrices_multiplication, "aorib_calculat.txt")
print("------------------------------")
print("A * x [first 10]: " + str(m_vector[:10]))
print("b a.txt [first 10]: " + str(b1[:10]))
print("B * x [first 10]: " + str(m2_vector[:10]))
print("b b.txt [first 10]: " + str(b2[:10]))
| [
"razvan.oprea96@yahoo.com"
] | razvan.oprea96@yahoo.com |
6b3a7c7b06a0375d7d6fa057dced0728ce6731e2 | 645ed62c32f02dc3216c3e0b9cb8fcf93bd51813 | /hello.py | 05330901a44c34043476817809a076ed35906d39 | [] | no_license | pstefy/Practica | c9f1308d56afec1613edf38453280dc81a334e6b | 59e0d4fe3054e1ad0e89a18259c687b8ecde334e | refs/heads/master | 2022-12-23T20:57:13.248330 | 2020-09-30T23:20:52 | 2020-09-30T23:20:52 | 300,057,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | x = 3
y = 7
u = 2
d = 10
p = 9 | [
"p.stefani16@gmail.com"
] | p.stefani16@gmail.com |
e3be1a1f6685b9fd2ffac2f8b1023cb485b10a05 | 6d0cc0dec09ce05158a6e9133efb5b5a91999a13 | /demo/test/test_resources.py | 2302e7b9fac077a2aa8635eb50501277ef37f5ef | [] | no_license | Anafi/Simplification-processing | f05c7ccc3a822c131fb32797f01e02b08eb4f391 | a49fe2c05b9b83f2bad13b74bf9dd878bea7eb04 | refs/heads/master | 2021-01-21T14:40:27.810677 | 2018-08-10T17:08:11 | 2018-08-10T17:08:11 | 56,625,463 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | # coding=utf-8
"""Resources test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'a.acharya@spacesyntax.com'
__date__ = '2016-02-12'
__copyright__ = 'Copyright 2016, AA'
import unittest
from PyQt4.QtGui import QIcon
class demoDialogTest(unittest.TestCase):
"""Test rerources work."""
def setUp(self):
"""Runs before each test."""
pass
def tearDown(self):
"""Runs after each test."""
pass
def test_icon_png(self):
"""Test we can click OK."""
path = ':/plugins/demo/icon.png'
icon = QIcon(path)
self.assertFalse(icon.isNull())
if __name__ == "__main__":
suite = unittest.makeSuite(demoResourcesTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| [
"ioanna.kolovou@gmail.com"
] | ioanna.kolovou@gmail.com |
4c46ee602378f6706ed2aef9bf2b0ede9745bb0d | a43f4af7867763fef334e6870972de943a608551 | /pyplotter/hists/hists.py | bf9f0bdd86c0935b7c8bc59d12554dfe317883c2 | [] | no_license | fscutti/FTKPlotter | 79803c7a98c8ae7832e975aa6d3b2e7b87a29401 | 32c8a0fa1a87f0bfa7592f347a62d2df1dad51c2 | refs/heads/master | 2018-09-21T19:21:51.607374 | 2018-08-05T08:53:34 | 2018-08-05T08:53:34 | 111,771,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,694 | py | # encoding: utf-8
'''
hist.py
description: histogram class
'''
"""
from numba import jit, jitclass, njit # import the decorators
from numba import int32, float32 # import the types
spec = [
('value', int32), # a simple scalar field
('array', float32[:]), # an array field
]
"""
import ROOT
from math import sqrt
#from numba import jit
#________________________________________________________
#@jit
def _get_moments(data,moment):
N = len(data)
if N == 0:
#print "No events"
return 0
mean = sigma = rms = sigsigma = 0
for i in data:
mean += i
mean /= N
if moment=="mean": return mean
for i in data:
sigma += pow(i-mean,2)
rms += pow(i,2)
sigma = sqrt( sigma / (N-1) )
rms = sqrt(rms/N) # this is the default error in a Projection
sig_rms = rms / sqrt(N)
if moment=="rms": return rms
if moment=="sig_rms": return sig_rms
if moment=="sigma": return sigma
if moment=="sigsigma":
sigsigma = 2 * pow(sigma,4) / (N-1)
return sqrt(sigsigma)
# - - - - - - - - - - - class defs - - - - - - - - - - - - #
#------------------------------------------------------------
class Hist1D(object):
'''
class to hold histogram info for plotting
one-dimensional histograms
'''
#________________________________________________________
def __init__(self,
hname = None,
leg_entry = None,
xtitle = None,
ytitle = None,
nbins = None,
xmin = None,
xmax = None,
ymin = None,
ymax = None,
fitmin = None,
fitmax = None,
var_fill = None,
vec_fill = None,
instance = None,
selection = "",
num_selection = "",
style_dict = None,
chain = None,
is_profile = False,
use_roostat = False,
use_fit = True,
get_slices = False,
slices = None,
**kw):
self.hname = hname
self.leg_entry = leg_entry
self.xtitle = xtitle
self.ytitle = ytitle
self.nbins = nbins
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.fitmin = fitmin
self.fitmax = fitmax
self.var_fill = var_fill
self.vec_fill = vec_fill
self.instance = instance
self.selection = selection
self.num_selection = num_selection
self.style_dict = style_dict
self.chain = chain
self.is_profile = is_profile
self.use_roostat = use_roostat
self.use_fit = use_fit
self.get_slices = get_slices
self.slices = slices
## set additional key-word args
# ----------------------------------------------------
for k,w in kw.iteritems():
setattr(self, k, w)
#________________________________________________________
def get_name(self,chain=None):
return self.__class__.__name__
#________________________________________________________
def set_style(self,h=None):
"""
set style of histogram
"""
h.GetXaxis().SetTitle(self.xtitle)
h.GetYaxis().SetTitle(self.ytitle)
if self.style_dict:
for k,v in self.style_dict.iteritems():
if k=="line_style": h.SetLineStyle(self.style_dict[k])
if k=="line_color": h.SetLineColor(self.style_dict[k])
if k=="line_width": h.SetLineWidth(self.style_dict[k])
if k=="marker_style": h.SetMarkerStyle(self.style_dict[k])
if k=="marker_color": h.SetMarkerColor(self.style_dict[k])
if k=="marker_size": h.SetMarkerSize(self.style_dict[k])
return h
#________________________________________________________
def build_data_dict(self,h=None):
bin_dict = {}
for ibin in xrange(1,h.GetNbinsX()+1):
bin_dict[ibin] = {"lowedge":0., "hiedge":0., "entries":[], "content":0., "error":0., "mean":0., "RMS":0., "RMSError":0.}
bin_dict[ibin]["lowedge"] = h.GetBinLowEdge(ibin)
bin_dict[ibin]["hiedge"] = h.GetBinLowEdge(ibin) + h.GetBinWidth(ibin)
bin_dict[ibin]["content"] = h.GetBinContent(ibin)
bin_dict[ibin]["error"] = h.GetBinError(ibin)
h_name = h.GetName()+"_slice_%s_%s"%(bin_dict[ibin]["lowedge"],bin_dict[ibin]["hiedge"])
# just a dummy hist
bin_dict[ibin]["h_slice"] = ROOT.TH1D(h_name,h_name, 4, -1.,1. )
bin_dict[ibin]["h_slice"].GetXaxis().SetTitle(self.ytitle)
bin_dict[ibin]["h_slice"].GetYaxis().SetTitle("Entries")
bin_dict[ibin]["h_slice"].GetYaxis().SetTitleOffset(1.3)
bin_dict[ibin]["h_slice"].Sumw2()
return bin_dict
#________________________________________________________
def get_moments(self,data,moment):
return _get_moments(data,moment)
#________________________________________________________
def create_hist(self,chain=None):
if chain: self.chain=chain
assert self.chain, "ERROR: chain not initialised for %s"%self.hname
if self.is_profile:
#h_prof = ROOT.TProfile(self.hname,self.hname, self.nbins, self.xmin, self.xmax, self.ymin, self.ymax)
h_prof = ROOT.TProfile(self.hname,self.hname, self.nbins, self.xmin, self.xmax)
if self.use_roostat:
self.chain.Draw(self.var_fill_y+":"+self.var_fill_x+">>"+self.hname,self.selection,"prof")
h = h_prof.ProjectionX()
if self.use_roostat:
"""
Use ROOT facilities to compute moments
"""
for ibin in xrange(1,h.GetNbinsX()+1):
h.SetBinContent(ibin,h.GetBinError(ibin))
h.SetBinError(ibin,10e-10)
if not self.use_roostat:
"""
Compute moments by hand
"""
ddict = self.build_data_dict(h)
nentries = self.chain.GetEntries()
for i in xrange(nentries):
self.chain.GetEntry(i+1)
for s in xrange(getattr(self.chain,self.var_fill_x).size()):
ibin = h.FindBin(getattr(self.chain,self.var_fill_x).at(s))
ddict[ibin]["entries"].append(getattr(self.chain,self.var_fill_y).at(s))
#ddict[ibin]["h_slice"].Fill(getattr(self.chain,self.var_fill_y).at(s))
outfile = None
#if self.get_slices:
# outfile = ROOT.TFile.Open("fits_"+self.hname+".root","RECREATE")
for ibin in xrange(1,h.GetNbinsX()+1):
if len(ddict[ibin]["entries"]):
fit_range = []
hist_range = []
if not self.ymin or not self.ymax:
pass
#fit_range = [-0.15*ibin_sigma,0.15*ibin_sigma]
#hist_range = [-1.5*ibin_sigma,1.5*ibin_sigma]
#fit_range = [-3.*ibin_mean,3.*ibin_mean]
#hist_range = [-6.*ibin_mean,6.*ibin_mean]
else:
hist_range = [self.ymin, self.ymax]
# fill the slices
ddict[ibin]["h_slice"].SetBins(70,min(hist_range),max(hist_range))
for i in ddict[ibin]["entries"]: ddict[ibin]["h_slice"].Fill(i)
if self.use_fit:
"""
Perform a gaussian fit to get the resolution
"""
if not self.fitmin or not self.fitmax:
fit_range = [0.1*self.ymin, 0.1*self.ymax]
else:
fit_range = [self.fitmin, self.fitmax]
f_ibin = ROOT.TF1("f_ibin_%s"%ibin,"gaus", min(fit_range), max(fit_range));
ddict[ibin]["h_slice"].Fit(f_ibin,"R")
if self.get_slices:
ddict[ibin]["slice_fit"] = f_ibin
self.slices = ddict
h.SetBinContent(ibin,f_ibin.GetParameter(2))
h.SetBinError(ibin,f_ibin.GetParError(2))
else:
"""
Compute the moments by hand for each slice
"""
# use user defined moments
# ------------------------
#ibin_mean = self.get_moments(ddict[ibin]["entries"],"mean")
#ibin_rms = self.get_moments(ddict[ibin]["entries"],"rms")
#ibin_sigrms = self.get_moments(ddict[ibin]["entries"],"sig_rms")
#ibin_sigma = self.get_moments(ddict[ibin]["entries"],"sigma")
#ibin_sigsigma = self.get_moments(ddict[ibin]["entries"],"sigsigma")
# use root moments
# ------------------------
ibin_sigma = ddict[ibin]["h_slice"].GetStdDev()
ibin_sigsigma = ddict[ibin]["h_slice"].GetStdDevError()
h.SetBinContent(ibin,ibin_sigma)
h.SetBinError(ibin,ibin_sigsigma)
self.instance = self.set_style(h)
else:
h = ROOT.TH1D(self.hname,self.hname, self.nbins, self.xmin, self.xmax)
h.Sumw2()
assert h, "ERROR: histogram % not initialised!!!" % self.hname
if self.num_selection:
if self.selection:
self.num_selection = " && ".join([self.num_selection,self.selection])
h_num = ROOT.TH1D(self.hname+"_num",self.hname+"_num", self.nbins, self.xmin, self.xmax)
h_num.Sumw2()
self.chain.Draw(self.var_fill+">>"+self.hname+"_num",self.num_selection)
h_den = ROOT.TH1D(self.hname+"_den",self.hname+"_den", self.nbins, self.xmin, self.xmax)
h_den.Sumw2()
self.chain.Draw(self.var_fill+">>"+self.hname+"_den",self.selection)
h.Divide(h_num,h_den,1.,1.,"b")
else:
"""
mean = 0.
n = 1.
nentries = self.chain.GetEntries()
for i in xrange(nentries):
self.chain.GetEntry(i+1)
for s in xrange(getattr(self.chain,self.var_fill).size()):
h.Fill(getattr(self.chain,self.var_fill).at(s))
mean += getattr(self.chain,self.var_fill).at(s)
n += 1
print "hist: ", self.hname, " mean: ", mean/n
"""
self.chain.Draw(self.var_fill+">>"+self.hname,self.selection)
self.instance = self.set_style(h)
#self.instance.Print("all")
return self.instance
#------------------------------------------------------------
class Hist2D(object):
'''
class to hold histogram info for plotting
two-dimensional histograms
'''
#________________________________________________________
def __init__(self,
hname = None,
leg_entry = None,
xtitle = None,
ytitle = None,
nbinsx = None,
nbinsy = None,
xmin = None,
xmax = None,
ymin = None,
ymax = None,
var_fill = None,
instance = None,
selection = "",
num_selection = "",
style_dict = None,
chain = None,
**kw):
self.hname = hname
self.leg_entry = leg_entry
self.xtitle = xtitle
self.ytitle = ytitle
self.nbinsx = nbinsx
self.nbinsy = nbinsy
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.var_fill = var_fill
self.instance = instance
self.selection = selection
self.num_selection = num_selection
self.style_dict = style_dict
self.chain = chain
## set additional key-word args
# ----------------------------------------------------
for k,w in kw.iteritems():
setattr(self, k, w)
#________________________________________________________
def get_name(self,chain=None):
return self.__class__.__name__
#________________________________________________________
def set_style(self,h=None):
"""
set style of histogram
"""
h.GetXaxis().SetTitle(self.xtitle)
h.GetXaxis().SetTitleOffset( 1.3 * h.GetXaxis().GetTitleOffset())
h.GetXaxis().SetLabelOffset( 0.7 * h.GetXaxis().GetLabelOffset())
h.GetXaxis().SetTitleSize( 1.3 * h.GetXaxis().GetTitleSize())
h.GetXaxis().SetLabelSize( 1.1 * h.GetXaxis().GetLabelSize())
h.GetYaxis().SetTitle(self.ytitle)
h.GetYaxis().SetTitleOffset( 1.3 * h.GetYaxis().GetTitleOffset())
h.GetYaxis().SetLabelOffset( 0.7 * h.GetYaxis().GetLabelOffset())
h.GetYaxis().SetTitleSize( 1.3 * h.GetYaxis().GetTitleSize())
h.GetYaxis().SetLabelSize( 1.1 * h.GetYaxis().GetLabelSize())
return h
#________________________________________________________
def create_hist(self,chain=None):
if chain: self.chain=chain
assert self.chain, "ERROR: chain not initialised for %s"%self.hname
h = ROOT.TH2D(self.hname,self.hname, self.nbinsx, self.xmin, self.xmax,self.nbinsy, self.ymin, self.ymax)
h.Sumw2()
assert h, "ERROR: histogram % not initialised!!!" % self.hname
if self.num_selection:
if self.selection:
self.num_selection = " && ".join([self.num_selection,self.selection])
h_num = ROOT.TH2D(self.hname+"_num",self.hname+"_num",self.nbinsx, self.xmin, self.xmax,self.nbinsy, self.ymin, self.ymax)
h_num.Sumw2()
self.chain.Draw(":".join([self.vary_fill,self.varx_fill])+">>"+self.hname+"_num",self.num_selection)
h_den = ROOT.TH2D(self.hname+"_den",self.hname+"_den",self.nbinsx, self.xmin, self.xmax,self.nbinsy, self.ymin, self.ymax)
h_den.Sumw2()
self.chain.Draw(":".join([self.vary_fill,self.varx_fill])+">>"+self.hname+"_den",self.selection)
#self.chain.Draw(self.var_fill+">>"+self.hname+"_den",self.selection)
h.Divide(h_num,h_den,1.,1.,"b")
else:
self.chain.Draw(":".join([self.vary_fill,self.varx_fill])+">>"+self.hname,self.selection)
self.instance = self.set_style(h)
return self.instance
## EOF
| [
"federicoscutti@gmail.com"
] | federicoscutti@gmail.com |
f6ff32c2fbc6e0dd788eb9b46df526c2ed159555 | e902c4dfca00539b4907e5d7cb497ed5276d9b63 | /s3filter/op/join_expression.py | 7ccff8c0a2047f36ba9416662670e12b26dbb06d | [] | no_license | jorgermurillo/s3filter | 8584413bc1bfb57d439dcb7ff4596bcfc01be23d | 068c6758d796d30d34198a0a3d2dd6748b5c0697 | refs/heads/master | 2020-06-18T09:58:16.785816 | 2020-02-20T16:57:19 | 2020-02-20T16:57:19 | 196,262,900 | 0 | 0 | null | 2019-07-10T19:20:39 | 2019-07-10T19:20:39 | null | UTF-8 | Python | false | false | 535 | py | class JoinExpression(object):
"""Represents a join expression, as in the column name (field) to join on.
"""
def __init__(self, l_field, r_field):
"""Creates a new join expression.
:param l_field: Field of the left tuple to join on
:param r_field: Field of the right tuple to join on
"""
self.l_field = l_field
self.r_field = r_field
def __repr__(self):
return {
'l_field': self.l_field,
'r_field': self.r_field
}.__repr__()
| [
"matt.youill@burnian.com"
] | matt.youill@burnian.com |
e70f03225989cdb95aa90466d2d58693e5923a6e | dc2d0aff11ac347b6079d339f04cbe0276968973 | /TORTUGA/problema7.py | 83e9cf3e7b4a5f92b532e238d632e676c8c9837d | [] | no_license | CertifiedErickBaps/Python36 | 418f6b4815cb9fb329957d2fdb3a297ff2d1c3c1 | 447fdf6d1779ef73e4910ac94b5732553c0bb7dd | refs/heads/master | 2021-09-24T00:00:09.207766 | 2017-05-20T23:25:19 | 2017-05-20T23:25:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | #Práctica 1: Gráficas de tortuga
# Fecha: 24-Ago-2016
# Autor: A01379896 Erick Bautista Pérez
# A01378568 Leonardo Valencia Benitez
#-------------------------------------------------------------------------------
from turtle import fd, lt, done, home, rt
def repeticion(lado, pequeño):
for i in range(10):
lt(90)
fd(lado)
rt(90)
fd(pequeño)
rt(90)
fd(lado)
lt(90)
fd(pequeño)
def figura_7(lado, pequeño, largo):
repeticion(lado, pequeño)
rt(90)
fd(pequeño)
rt(90)
fd(largo)
rt(90)
home()
figura_7(100, 20, 400)
done()
| [
"erickburn01@hotmail.com"
] | erickburn01@hotmail.com |
a8a992e957e28a9e6fc026939435cbf906993323 | 14d54067ff5bf1672349e4281b1da2cdd075e3ba | /scrapper/scrapper_clima_tempo/modulos/scrapper.py | 0312ae726d1f3718d87f2d1cfa1a60e4ff6a6e1d | [] | no_license | rafaelaraujobsb/clima-tempo | 17588b6e7623b9f03bff3b8e30f0433b1f75ffea | e3e93f09725ec812d55523ed63e02abd94b8c473 | refs/heads/main | 2023-04-06T02:07:45.108630 | 2021-05-11T13:06:26 | 2021-05-11T13:06:26 | 355,996,664 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | import traceback
from loguru import logger
from scrapper_clima_tempo import celery
from scrapper_clima_tempo.exceptions import Falha
from scrapper_clima_tempo.servicos.scrapper import ScrapperClimaTempo
from scrapper_clima_tempo.servicos.database import CollectionTempos, CollectionExecucoes
@celery.task(name="scrapper.task_buscar_clima")
def task_buscar_clima(estado: str, municipio: str):
logger.info(f"TASK {task_buscar_clima.name} INICIADA")
with CollectionExecucoes() as collection_execucoes:
id_execucao = collection_execucoes.cadastar(estado=estado, municipio=municipio)
erro = None
try:
clima = ScrapperClimaTempo().buscar_clima(estado=estado, municipio=municipio)
except Falha as error:
erro = {"mensagem": error.mensagem, "stacktrace": error.stacktrace}
except Exception:
erro = {"mensagem": "Ocorreu um erro inesperado!", "stacktrace": traceback.format_exc()}
logger.critical(f"TASK {task_buscar_clima.name} ERRO INESPERADO: {estado} - {municipio}")
finally:
with CollectionExecucoes() as collection_execucoes:
collection_execucoes.atualizar(id_execucao, erro=erro)
if not erro:
with CollectionTempos() as collection_climas:
collection_climas.cadastar(estado=estado, municipio=municipio, **clima)
logger.info(f"TASK {task_buscar_clima.name} FINALIZADA")
| [
"bsb.rafaelaraujo@gmail.com"
] | bsb.rafaelaraujo@gmail.com |
dd3846c36379f2ba6f577ad7a9d6873817778d0e | 1f399edf85d995443d01f66d77eca0723886d0ff | /misc/config_tools/scenario_config/schema_slicer.py | 26f29fed8aeda8fe07e61514b852614b50b3c4c8 | [
"BSD-3-Clause"
] | permissive | projectacrn/acrn-hypervisor | f9c5864d54929a5d2fa36b5e78c08f19b46b8f98 | 390740aa1b1e9d62c51f8e3afa0c29e07e43fa23 | refs/heads/master | 2023-08-18T05:07:01.310327 | 2023-08-11T07:49:36 | 2023-08-16T13:20:27 | 123,983,554 | 1,059 | 686 | BSD-3-Clause | 2023-09-14T09:51:10 | 2018-03-05T21:52:25 | C | UTF-8 | Python | false | false | 9,635 | py | #!/usr/bin/env python3
#
# Copyright (C) 2022 Intel Corporation.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import argparse
from copy import deepcopy
from pipeline import PipelineObject, PipelineStage, PipelineEngine
class SchemaTypeSlicer:
xpath_ns = {
"xs": "http://www.w3.org/2001/XMLSchema",
"acrn": "https://projectacrn.org",
}
@classmethod
def get_node(cls, element, xpath):
return element.find(xpath, namespaces=cls.xpath_ns)
@classmethod
def get_nodes(cls, element, xpath):
return element.findall(xpath, namespaces=cls.xpath_ns)
def __init__(self, etree):
self.etree = etree
def get_type_definition(self, type_name):
type_node = self.get_node(self.etree, f"//xs:complexType[@name='{type_name}']")
if type_node is None:
type_node = self.get_node(self.etree, f"//xs:simpleType[@name='{type_name}']")
return type_node
def slice_element_list(self, element_list_node, new_nodes):
sliced = False
for element_node in self.get_nodes(element_list_node, "xs:element"):
if not self.is_element_needed(element_node):
element_list_node.remove(element_node)
sliced = True
continue
# For embedded complex type definition, also slice in place. If the sliced type contains no sub-element,
# remove the element itself, too.
element_type_node = self.get_node(element_node, "xs:complexType")
if element_type_node is not None:
new_sub_nodes = self.slice(element_type_node, in_place=True)
if len(self.get_nodes(element_type_node, ".//xs:element")) > 0:
new_nodes.extend(new_sub_nodes)
else:
element_list_node.remove(element_node)
continue
# For external type definition, create a copy to slice. If the sliced type contains no sub-element, remove
# the element itself.
element_type_name = element_node.get("type")
if element_type_name:
element_type_node = self.get_type_definition(element_type_name)
if element_type_node is not None:
sliced_type_name = self.get_name_of_slice(element_type_name)
# If a sliced type already exists, do not duplicate the effort
type_node = self.get_type_definition(sliced_type_name)
if type_node is not None:
element_node.set("type", sliced_type_name)
sliced = True
else:
new_sub_nodes = self.slice(element_type_node)
if len(new_sub_nodes) == 0:
continue
elif new_sub_nodes[-1].tag.endswith("simpleType") or len(self.get_nodes(new_sub_nodes[-1], ".//xs:element")) > 0:
new_nodes.extend(new_sub_nodes)
element_node.set("type", sliced_type_name)
sliced = True
else:
element_list_node.remove(element_node)
return sliced
def slice_restriction(self, restriction_node):
sliced = False
for restriction in self.get_nodes(restriction_node, "xs:enumeration"):
if not self.is_element_needed(restriction):
restriction_node.remove(restriction)
sliced = True
return sliced
def slice(self, type_node, in_place=False, force_copy=False):
new_nodes = []
sliced = False
if in_place:
new_type_node = type_node
else:
new_type_node = deepcopy(type_node)
type_name = type_node.get("name")
if type_name != None:
sliced_type_name = self.get_name_of_slice(type_name)
new_type_node.set("name", sliced_type_name)
element_list_node = self.get_node(new_type_node, "xs:all")
if element_list_node is not None:
sliced = self.slice_element_list(element_list_node, new_nodes)
restriction_node = self.get_node(new_type_node, "xs:restriction")
if restriction_node is not None:
sliced = self.slice_restriction(restriction_node)
if not in_place and (sliced or force_copy):
new_nodes.append(new_type_node)
return new_nodes
def is_element_needed(self, element_node):
return True
def get_name_of_slice(self, name):
return f"Sliced{name}"
class SlicingSchemaByVMTypeStage(PipelineStage):
uses = {"schema_etree"}
provides = {"schema_etree"}
class VMTypeSlicer(SchemaTypeSlicer):
def is_element_needed(self, element_node):
annot_node = self.get_node(element_node, "xs:annotation")
if annot_node is None:
return True
applicable_vms = annot_node.get("{https://projectacrn.org}applicable-vms")
return applicable_vms is None or applicable_vms.find(self.vm_type_indicator) >= 0
def get_name_of_slice(self, name):
return f"{self.type_prefix}{name}"
class PreLaunchedTypeSlicer(VMTypeSlicer):
vm_type_indicator = "pre-launched"
type_prefix = "PreLaunched"
class ServiceVMTypeSlicer(VMTypeSlicer):
vm_type_indicator = "service-vm"
type_prefix = "Service"
class PostLaunchedTypeSlicer(VMTypeSlicer):
vm_type_indicator = "post-launched"
type_prefix = "PostLaunched"
def run(self, obj):
schema_etree = obj.get("schema_etree")
vm_type_name = "VMConfigType"
vm_type_node = SchemaTypeSlicer.get_node(schema_etree, f"//xs:complexType[@name='{vm_type_name}']")
slicers = [
self.PreLaunchedTypeSlicer(schema_etree),
self.ServiceVMTypeSlicer(schema_etree),
self.PostLaunchedTypeSlicer(schema_etree)
]
for slicer in slicers:
new_nodes = slicer.slice(vm_type_node, force_copy=True)
for n in new_nodes:
schema_etree.getroot().append(n)
for node in SchemaTypeSlicer.get_nodes(schema_etree, "//xs:complexType[@name='ACRNConfigType']//xs:element[@name='vm']//xs:alternative"):
test = node.get("test")
if test.find("PRE_LAUNCHED_VM") >= 0:
node.set("type", slicers[0].get_name_of_slice(vm_type_name))
elif test.find("SERVICE_VM") >= 0:
node.set("type", slicers[1].get_name_of_slice(vm_type_name))
elif test.find("POST_LAUNCHED_VM") >= 0:
node.set("type", slicers[2].get_name_of_slice(vm_type_name))
obj.set("schema_etree", schema_etree)
class SlicingSchemaByViewStage(PipelineStage):
uses = {"schema_etree"}
provides = {"schema_etree"}
class ViewSlicer(SchemaTypeSlicer):
def is_element_needed(self, element_node):
annot_node = self.get_node(element_node, "xs:annotation")
if annot_node is None:
return True
views = annot_node.get("{https://projectacrn.org}views")
return views is None or views.find(self.view_indicator) >= 0
def get_name_of_slice(self, name):
if name.find("ConfigType") >= 0:
return name.replace("ConfigType", f"{self.type_prefix}ConfigType")
else:
return f"{self.type_prefix}{name}"
class BasicViewSlicer(ViewSlicer):
view_indicator = "basic"
type_prefix = "Basic"
class AdvancedViewSlicer(ViewSlicer):
view_indicator = "advanced"
type_prefix = "Advanced"
def run(self, obj):
schema_etree = obj.get("schema_etree")
type_nodes = list(filter(lambda x: x.get("name") and x.get("name").endswith("VMConfigType"), SchemaTypeSlicer.get_nodes(schema_etree, "//xs:complexType")))
type_nodes.append(SchemaTypeSlicer.get_node(schema_etree, "//xs:complexType[@name = 'HVConfigType']"))
slicers = [
self.BasicViewSlicer(schema_etree),
self.AdvancedViewSlicer(schema_etree),
]
for slicer in slicers:
for type_node in type_nodes:
new_nodes = slicer.slice(type_node, force_copy=True)
for n in new_nodes:
schema_etree.getroot().append(n)
obj.set("schema_etree", schema_etree)
def main(args):
from lxml_loader import LXMLLoadStage
pipeline = PipelineEngine(["schema_path"])
pipeline.add_stages([
LXMLLoadStage("schema"),
SlicingSchemaByVMTypeStage(),
SlicingSchemaByViewStage(),
])
obj = PipelineObject(schema_path = args.schema)
pipeline.run(obj)
obj.get("schema_etree").write(args.out)
print(f"Sliced schema written to {args.out}")
if __name__ == "__main__":
# abs __file__ path to ignore `__file__ == 'schema_slicer.py'` issue
config_tools_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
schema_dir = os.path.join(config_tools_dir, "schema")
parser = argparse.ArgumentParser(description="Slice a given scenario schema by VM types and views")
parser.add_argument("out", nargs="?", default=os.path.join(schema_dir, "sliced.xsd"), help="Path where the output is placed")
parser.add_argument("--schema", default=os.path.join(schema_dir, "config.xsd"), help="the XML schema that defines the syntax of scenario XMLs")
args = parser.parse_args()
main(args)
| [
"63571455+acrnsi-robot@users.noreply.github.com"
] | 63571455+acrnsi-robot@users.noreply.github.com |
3012a6bf4e8b1775a832b551f85b72760fb45ddc | 8e44c510f2fde463d345c85bfe34f7fc044681f2 | /brat_multitask/dataset_readers/span_field.py | f3b50ddde8865825349cbcf2b70544aaa2f6016c | [
"BSD-3-Clause"
] | permissive | nlpersECJTU/cmu-multinlp | 87f35d945eee7a56b20b8abd2d72d4fe8b3c5582 | f98f45d021358147171b9aba559b223dcad6a616 | refs/heads/master | 2023-02-21T06:55:24.098429 | 2021-01-12T23:15:32 | 2021-01-12T23:15:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,797 | py | from typing import Dict
from overrides import overrides
import torch
from allennlp.data.fields.field import Field
from allennlp.data.fields.sequence_field import SequenceField
class SpanField(Field[torch.Tensor]):
"""
A ``SpanField`` is a pair of inclusive, zero-indexed (start, end) indices into a
:class:`~allennlp.data.fields.sequence_field.SequenceField`, used to represent a span of text.
Because it's a pair of indices into a :class:`SequenceField`, we take one of those as input
to make the span's dependence explicit and to validate that the span is well defined.
Parameters
----------
span_start : ``int``, required.
The index of the start of the span in the :class:`SequenceField`.
span_end : ``int``, required.
The inclusive index of the end of the span in the :class:`SequenceField`.
sequence_field : ``SequenceField``, required.
A field containing the sequence that this ``SpanField`` is a span inside.
"""
def __init__(self, span_start: int, span_end: int, sequence_field: SequenceField, check_sentence: bool=True) -> None:
self.span_start = span_start
self.span_end = span_end
self.sequence_field = sequence_field
if not isinstance(span_start, int) or not isinstance(span_end, int):
raise TypeError(f"SpanFields must be passed integer indices. Found span indices: "
f"({span_start}, {span_end}) with types "
f"({type(span_start)} {type(span_end)})")
if span_start > span_end:
raise ValueError(f"span_start must be less than span_end, "
f"but found ({span_start}, {span_end}).")
if check_sentence:
if span_end > self.sequence_field.sequence_length() - 1:
raise ValueError(f"span_end must be < len(sequence_length) - 1, but found "
f"{span_end} and {self.sequence_field.sequence_length() - 1} respectively.")
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
# pylint: disable=no-self-use
return {}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
# pylint: disable=unused-argument
tensor = torch.LongTensor([self.span_start, self.span_end])
return tensor
@overrides
def empty_field(self):
return SpanField(-1, -1, self.sequence_field.empty_field())
def __str__(self) -> str:
return f"SpanField with spans: ({self.span_start}, {self.span_end})."
def __eq__(self, other) -> bool:
if isinstance(other, tuple) and len(other) == 2:
return other == (self.span_start, self.span_end)
else:
return id(self) == id(other)
| [
"zhengbaj@cs.cmu.edu"
] | zhengbaj@cs.cmu.edu |
23beb603408e812540ec6088aba8d12c6bfbe746 | e5502f889db8f23d571eed21b6786208e375eaca | /peogen/peogen/wsgi.py | b637d51265fa3f33f93a29c4132d6a80a52b7c32 | [] | no_license | kurosh-wss/chat_django3 | 1e497bb335b41834acccb69805307fc70f7aa088 | 5fe92cad8df467a1e8d38c54344b5528192aacdb | refs/heads/master | 2023-02-14T20:35:47.835401 | 2021-01-17T09:23:36 | 2021-01-17T09:23:36 | 328,913,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'peogen.settings')
application = get_wsgi_application()
| [
"kurosh7899@gmail.com"
] | kurosh7899@gmail.com |
dc07e4c5023b62bbac3b5ed25bf1cbde99182341 | 54516826a15e4588decd4a040c3f3ae73b1f49df | /supplier/admin.py | d4cd50319ed56dfbd6c7cc180afdbbb36f403d02 | [] | no_license | boyombo/shylock | 9454b53ef285af692675be4fe7a176d1aa29ced1 | c63ac02b3ee18160ec94c9e8462165eaf7e0f3b5 | refs/heads/master | 2021-05-05T11:10:13.523616 | 2018-02-06T08:10:47 | 2018-02-06T08:10:47 | 118,116,949 | 0 | 1 | null | 2018-02-06T08:10:48 | 2018-01-19T11:24:14 | JavaScript | UTF-8 | Python | false | false | 147 | py | from django.contrib import admin
from supplier.models import Supplier
@admin.register(Supplier)
class SupplierAdmin(admin.ModelAdmin):
pass
| [
"bayokrapht@gmail.com"
] | bayokrapht@gmail.com |
fc2c228b13be87058b739109ebd181f664a0efde | f1011ea46e0447cb2bb82137fb53c4673cf307be | /Day2/day2Presentation/crawlerAutismStudents2.py | 70dfc62bd933a56c6439e7afff6cb85101ee5d2e | [] | no_license | RiptideStar/Python | 4659eecfc1ec9186f3d694bd4764364205a498e6 | 71a914b69a9b31c68c0a924b86e253d4b7843d3b | refs/heads/master | 2023-06-16T06:48:36.969699 | 2021-07-09T16:56:41 | 2021-07-09T16:56:41 | 349,828,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,500 | py | import requests
import sys
from bs4 import BeautifulSoup
import sqlite3
print("--- Command Line:", sys.argv)
api_url = "https://www.greatvaluecolleges.net/best-colleges-for-students-with-autism/"
print("--- api_url:", api_url)
def retrieveData(api_url):
try:
response = requests.get(api_url)
except requests.exceptions.ConnectionError as e:
print('Error', e.args)
exit(1)
html = response.content
# parsing html with BS
soup = BeautifulSoup(html, 'html.parser')
parentClass = soup.findChild("div", class_="entry-content clearfix")
children = parentClass.findChildren()
# print(children)
datalist = []
for i in range(0, len(children)):
# print(type(children[i]))
try:
int(children[i].getText()[0:1])
except ValueError:
continue
# print("Child we are on: ", children[i])
row = []
try:
ranking_name = children[i].getText().split(". ")
ranking = ranking_name[0]
univ_name = ranking_name[1]
i += 1
except IndexError:
break
row.append(univ_name)
location = children[i].getText()
row.append(location)
row.append(ranking)
i += 1
url = children[i].findChild('a')["href"]
row.append(url)
i += 2
desc = ""
desc += children[i].getText()
i += 1
while True:
try: #no error, we need to break
int(children[i].getText()[0:1])
#might need to decrement i because it will increment after because of for loop
i -= 1
break
except ValueError:
#hard coded to make Drexel University not have the related rankings description
if (children[i].getText()[0:3] == 'Rel'):
break
desc += children[i].getText()
i += 1
row.append(desc)
row.append(api_url)
# print(row)
datalist.insert(0, row)
# print(datalist)
return datalist
datalist = retrieveData(api_url)
# print(datalist)
def saveToDataBase(datalist, dbpath):
init_db(dbpath)
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
for data in datalist:
# print(data)
for index in range(len(data)):
if index == 2:
continue
data[index] = '"'+data[index]+'"'
print(data[index])
sql = '''
insert into autism_universities2(
univ_name,location,ranking,description,url,source_url)
values (?,?,?,?,?,?)'''
# print(sql)
cur.execute(sql, (data[0], data[1],data[2],data[3],data[4],data[5]))
conn.commit()
cur.close
conn.close()
def init_db(dbpath):
sql = '''
create table autism_universities2
(
id integer primary key autoincrement,
univ_name varchar,
location varchar,
ranking integer,
description text,
url text,
source_url text,
misc text
);
'''
conn = sqlite3.connect(dbpath)
c = conn.cursor()
c.execute(sql)
conn.commit()
conn.close()
saveToDataBase(datalist, "autismUniversitiesDB.db")
##### VERSION IDEAS THAT DIDN'T MAKE THE CUT ######
# h3Children = children.findChildren("h3") #error since children is a list, find child can't be done on a list since it isn't aggregate
# try if children[i].getText()[0:1] contains number in front using int(children[i]) and except ValueError (if value error, just "continue" on the loop)
# do a cycle (get name [i], location[i+1]...)
# post check if children[i] in h3Children for descripion
# ranking_AND_names = parentClass.findChildren("h3")
# ranking_AND_names.pop(0)
# ranking_AND_names.pop(len(ranking_AND_names)-1)
# # print(ranking_AND_names[0])
# locationList = parentClass.findChildren("h4")
# # print(location)
# pList_Link_Desc = parentClass.findChildren('p')
# print("Length of pList:", len(pList_Link_Desc))
# datalist = []
# for i in range(0, 17):
# pList_Link_Desc.pop(0)
# # print(pList_Link_Desc)
# # 34, 35, 36
# j = 0
# for i in range(0, len(ranking_AND_names)):
# row = []
# ranking_name = ranking_AND_names[i].getText().split(". ")
# ranking = ranking_name[0]
# univ_name = ranking_name[1]
# row.append(univ_name)
# location = locationList[i].getText()
# row.append(location)
# row.append(ranking)
# url = pList_Link_Desc[j].findChild('a')["href"]
# desc = ""
# desc += pList_Link_Desc[j + 1].getText()
# while (pList_Link_Desc[j + 2].findChild('a') is None or len(pList_Link_Desc[j + 2]) > 30):
# desc += pList_Link_Desc[j + 2].getText()
# j += 1
# j += 2
# row.append(desc)
# row.append(url)
# row.append(api_url)
# print(row)
# ### the initial way of thought for extracting univ_name and ranking
# # if (i < 10):
# # # "8. UniversityName"
# # univ_name = ranking_AND_names[i][3:]
# # ranking = ranking_AND_names[i][0:1]
# # else:
# # # "12. UniversityName"
# # univ_name = ranking_AND_names[i][4:]
# # ranking = ranking_AND_names[i][0:2] | [
"kyle1001001@gmail.com"
] | kyle1001001@gmail.com |
c774de124f79f545af197812c70eacb83585d451 | 51709f4859383274aa72e8609e2e39bca565f050 | /src/mini_psp/utils/metric_utils.py | e864696355ecbafb5ff1ba61c06c9a10671fa83e | [] | no_license | bochuxt/mini_psp | ba8adfb3c3bba88c6412b76d94a63d40f5741022 | 8c8796aaaac4b4e909a4c68a69adc01969626966 | refs/heads/master | 2023-03-19T11:43:33.041446 | 2020-07-30T10:22:41 | 2020-07-30T10:22:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,026 | py | import numpy as np
from sklearn import metrics
def get_iou(target,prediction):
'''Returns Intersection over Union (IoU).'''
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / np.sum(union)
return iou_score
def get_class_iou(target,prediction,n_classes):
'''Returns class IoUs.'''
assert len(target.shape)==4
assert len(prediction.shape)==4
sum =0
IoU = {}
for i in range(n_classes):
cur_iou = get_iou(prediction[:,:,:,i],target[:,:,:,i])
sum+=cur_iou
IoU[i+1] = cur_iou
IoU['mean'] = sum/n_classes
return IoU
def get_class_f1(target,prediction,n_classes):
'''Returns class F1-scores.'''
assert len(target.shape)==4
assert len(prediction.shape)==4
sum =0
f1 = {}
for i in range(n_classes):
cur_f1 = metrics.f1_score(prediction[:,:,:,i].reshape(-1,1),target[:,:,:,i].reshape(-1,1))
sum+=cur_f1
f1[i+1] = cur_f1
f1['mean'] = sum/n_classes
return f1
def evaluate(target,prediction,n_classes):
'''Returns class accuracies, IoUs and F1-scores.'''
#acc = get_class_accuracies(target,prediction,n_classes)
iou = get_class_iou(target,prediction,n_classes)
f1 = get_class_f1(target,prediction,n_classes)
#return acc,iou,f1
return iou,f1
def conf_matrix(target,prediction,n_classes):
'''Returns confusion matrix.'''
# Need to remove the 0 values in the target mask if any.
prediction = np.reshape(prediction,(-1,n_classes))
target = np.reshape(target,(-1,n_classes))
cm = metrics.confusion_matrix(prediction.argmax(axis=1),target.argmax(axis=1))
return cm
def eval_conf_matrix(cm,n_classes):
'''Returns evaluation metrics from confusion matrix.'''
cm = np.array(cm)
sum=0;
total =0;
prod_acc = [0]*n_classes
user_acc = [0]*n_classes
total_pred = [0]*n_classes
total_test = [0]*n_classes
gc =0
for i in range(n_classes):
for j in range(n_classes):
total_pred[i]+= cm[i][j]
total_test[j]+=cm[i][j]
if i==j:
sum+=cm[i][j]
total+=cm[i][j]
# User and Producer Accuracies
for i in range(n_classes):
gc+=total_pred[i]*total_test[i]
prod_acc[i] = cm[i][i]/total_test[i]
user_acc[i] = cm[i][i]/total_pred[i]
# Overall Accuracy
ovAc = sum/total
# Kappa coefficient
kappa = (total*sum - gc)/(total*total - gc)
print("Total pred :",total_pred)
print("Total target :",total_test)
print("Total :",total)
return ovAc, kappa, prod_acc, user_acc
if __name__=='__main__':
######################################################################
#### TESTING
######################################################################
n_classes = 5
prediction = np.load('prediction.npy')
target = np.load('target.npy')
iou, f1 = evaluate(target,prediction,n_classes)
print("IoU : ",iou)
print("F1 : ",f1)
#cm = conf_matrix(target,prediction,n_classes)
#Combined1
# cm = [ [119397,540,304,12182,7327],
# [243,7169,43,4319,1737],
# [134,0,5776,721,200],
# [827,2,28,7655,811],
# [793,0,57,278,31494]
# ]
#Combined2
cm = [ [119320,540,372,12259,7327],
[243,7169,43,4319,1737],
[266,0,6445,1636,248],
[827,2,28,7655,811],
[793,0,57,278,31494]
]
ovAc, kappa, prod_acc, user_acc = eval_conf_matrix(cm,n_classes)
print("Overall Accuracy : ",ovAc)
print("Kappa coeff : ",kappa)
print("Producer Accuracy : ",prod_acc)
print("User Accuracy : ",user_acc)
# Kappa checks
# prediction = np.reshape(prediction,(-1,n_classes))
# target = np.reshape(target,(-1,n_classes))
# print("Kappa score : ",metrics.cohen_kappa_score(target.argmax(axis=1),prediction.argmax(axis=1)))
| [
"Surya.dheeshjith@gmail.com"
] | Surya.dheeshjith@gmail.com |
d947e01525da7cc192a8dec795755db3aa8b2b1b | 7f3e27fd94a79ce8c21c1e3a488eb38a1ce614da | /clase_12/leer.py | 3d7cfbceddb31af2cfd4720cbba52453b0c72629 | [] | no_license | peligro/taller_practico_python | 282262aa4030c132396a5c54b3d3731600fee51d | 85b4bf78474246090b1206cc3ef124bd1e93ec70 | refs/heads/master | 2021-07-06T20:49:08.060073 | 2020-10-19T13:05:49 | 2020-10-19T13:05:49 | 197,222,488 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | from xml.dom import minidom
ruta= "/var/www/html/clientes/tamila/videotutoriales/python/clase_12/"
xml = minidom.parse(ruta+"ejemplo.xml")
docs = xml.getElementsByTagName("doc")
for doc in docs:
nodo1 = doc.getElementsByTagName("nodo1")[0]
nodo2 = doc.getElementsByTagName("nodo2")[0]
print(f"nodo1={nodo1.firstChild.data} | nodo2={nodo2.firstChild.data}")
"""
<root>
<doc>
<nodo1 name="nodo">Texto nodo1</nodo1>
<nodo2 atributo="manzana">Texto nodo2</nodo2>
</doc>
</root>
""" | [
"yo@cesarcancino.com"
] | yo@cesarcancino.com |
23828eded591399cd9e25c17718e2bad16a679b9 | 4e67c4120a76085bf0810c4d44265262ce480fee | /text-based_adventure_game.py | e9714745cced134574b020157de39f2bc997c0d0 | [] | no_license | jmuhlenberg/text_based_adventure_game | 43c392a8f59d55fc39ef2faa9b61480780a0abd3 | 1cc42b480191327d539c293073d9bd6bac2326c3 | refs/heads/master | 2022-01-16T23:33:15.010648 | 2019-08-02T01:33:25 | 2019-08-02T01:33:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | #Text-based Adventure Game
'''The Goal: Remember Adventure? Well, we’re going to build a more basic version of that.
A complete text game, the program will let users move through rooms based on user input
and get descriptions of each room. To create this, you’ll need to establish the directions
in which the user can move, a way to track how far the user has moved (and therefore
which room he/she is in), and to print out a description. You’ll also need to set limits
for how far the user can move. In other words, create “walls” around the rooms that tell
the user, “You can’t move further in this direction.”
Concepts to keep in mind:
Strings
Variables
Input/Output
If/Else Statements
Print
List
Integers
The tricky parts here will involve setting up the directions and keeping track of just
how far the user has “walked” in the game. I suggest sticking to just a few basic
descriptions or rooms, perhaps 6 at most. This project also continues to build on using
userinputted data. It can be a relatively basic game, but if you want to build this into
a vast, complex word, the coding will get substantially harder, especially if you want
your user to start interacting with actual objects within the game. That complexity could
be great, if you’d like to make this into a longterm project. *Hint hint.
'''
| [
"noreply@github.com"
] | jmuhlenberg.noreply@github.com |
591449b125d681f0d0437280802fbedb02c911c8 | ffdc77394c5b5532b243cf3c33bd584cbdc65cb7 | /tests/ut/python/train/summary/test_summary_ops_params_valid_check.py | cac9f18eeffc2b9351bd6aebc7b0a0aa6db27a80 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-f... | permissive | mindspore-ai/mindspore | ca7d5bb51a3451c2705ff2e583a740589d80393b | 54acb15d435533c815ee1bd9f6dc0b56b4d4cf83 | refs/heads/master | 2023-07-29T09:17:11.051569 | 2023-07-17T13:14:15 | 2023-07-17T13:14:15 | 239,714,835 | 4,178 | 768 | Apache-2.0 | 2023-07-26T22:31:11 | 2020-02-11T08:43:48 | C++ | UTF-8 | Python | false | false | 5,741 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Test summary function of ops params valid check."""
import os
import tempfile
import shutil
from enum import Enum
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
from mindspore.train.summary.summary_record import SummaryRecord
from tests.security_utils import security_off_wrap
class SummaryEnum(Enum):
"""Summary enum."""
IMAGE = P.ImageSummary.__name__
SCALAR = P.ScalarSummary.__name__
TENSOR = P.TensorSummary.__name__
HISTOGRAM = P.HistogramSummary.__name__
class SummaryNet(nn.Cell):
"""Summary net definition."""
def __init__(self, summary_type, tag, data):
super(SummaryNet, self).__init__()
self.tag = tag
self.data = data
self.summary_fn = getattr(P, summary_type)()
self.one = Tensor(np.array([1]).astype(np.float32))
self.add = P.Add()
def construct(self):
self.summary_fn(self.tag, self.data)
return self.add(self.one, self.one)
class TestSummaryOps:
"""Test summary operators."""
summary_dir = ''
@classmethod
def run_case(cls, net):
""" run_case """
net.set_train()
steps = 10
with SummaryRecord(cls.summary_dir) as test_writer:
for i in range(1, steps):
net()
test_writer.record(i)
@classmethod
def setup_class(cls):
"""Run before class."""
if not os.path.exists(cls.summary_dir):
cls.summary_dir = tempfile.mkdtemp(suffix='_summary')
@classmethod
def teardown_class(cls):
"""Run after class."""
if os.path.exists(cls.summary_dir):
shutil.rmtree(cls.summary_dir)
@security_off_wrap
@pytest.mark.parametrize(
"summary_type, value",
[
(SummaryEnum.SCALAR.value, Tensor(1)),
(SummaryEnum.SCALAR.value, Tensor(np.array([1]))),
(SummaryEnum.IMAGE.value, Tensor(np.array([[[[1], [2], [3], [4]]]]))),
(SummaryEnum.TENSOR.value, Tensor(np.array([[1], [2], [3], [4]]))),
(SummaryEnum.HISTOGRAM.value, Tensor(np.array([[1], [2], [3], [4]]))),
])
def test_summary_success(self, summary_type, value):
"""Test summary success with valid tag and valid data."""
net = SummaryNet(summary_type, tag='tag', data=value)
TestSummaryOps.run_case(net)
@security_off_wrap
@pytest.mark.parametrize(
"summary_type",
[
SummaryEnum.SCALAR.value,
SummaryEnum.IMAGE.value,
SummaryEnum.HISTOGRAM.value,
SummaryEnum.TENSOR.value
])
def test_summary_tag_is_none(self, summary_type):
"""Test summary tag is None, all summary operator validation rules are consistent."""
net = SummaryNet(summary_type, tag=None, data=Tensor(0))
with pytest.raises(TypeError):
TestSummaryOps.run_case(net)
@security_off_wrap
@pytest.mark.parametrize(
"summary_type",
[
SummaryEnum.SCALAR.value,
SummaryEnum.IMAGE.value,
SummaryEnum.HISTOGRAM.value,
SummaryEnum.TENSOR.value
])
def test_summary_tag_is_empty_string(self, summary_type):
"""Test summary tag is a empty string, all summary operator validation rules are consistent."""
net = SummaryNet(summary_type, tag='', data=Tensor(0))
with pytest.raises(ValueError):
TestSummaryOps.run_case(net)
@security_off_wrap
@pytest.mark.parametrize("tag", [123, True, Tensor(0)])
def test_summary_tag_is_not_string(self, tag):
"""Test summary tag is not a string, all summary operator validation rules are consistent."""
# All summary operator validation rules are consistent, so we only test scalar summary.
net = SummaryNet(SummaryEnum.SCALAR.value, tag=tag, data=Tensor(0))
with pytest.raises(TypeError):
TestSummaryOps.run_case(net)
@security_off_wrap
@pytest.mark.parametrize("value", [123, True, 'data'])
def test_summary_value_type_invalid(self, value):
"""Test the type of summary value is invalid, all summary operator validation rules are consistent."""
# All summary operator validation rules are consistent, so we only test scalar summary.
net = SummaryNet(SummaryEnum.SCALAR.value, tag='tag', data=value)
with pytest.raises(TypeError):
TestSummaryOps.run_case(net)
@security_off_wrap
@pytest.mark.parametrize(
"summary_type, value",
[
(SummaryEnum.IMAGE.value, Tensor(np.array([1, 2]))),
(SummaryEnum.TENSOR.value, Tensor(0)),
(SummaryEnum.HISTOGRAM.value, Tensor(0))
])
def test_value_shape_invalid(self, summary_type, value):
"""Test invalid shape of every summary operators."""
net = SummaryNet(summary_type, tag='tag', data=value)
with pytest.raises(ValueError):
TestSummaryOps.run_case(net)
| [
"zhujianfeng@huawei.com"
] | zhujianfeng@huawei.com |
39ab273dae34141056fb99b2a557a0c095a9ee09 | 8cd90c5b92fe85158226de32b1fbb4c34ebd658b | /oscar_docdata/models.py | f3295ad74437b13549e68019e34d3e7aedc771ad | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | mvantellingen/django-oscar-docdata | 772ec3db372f9571cf62932ad2fe945c65fd2d7f | 983d3f8144e1feb67d4a2c5bb98b499e69e4ad44 | refs/heads/master | 2023-08-25T06:33:59.105290 | 2016-06-14T12:41:37 | 2016-06-14T12:41:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,043 | py | from decimal import Decimal as D
from django.db import models
from django.utils.translation import ugettext_lazy as _
from oscar_docdata.managers import DocdataOrderManager
from . import appsettings
try:
from polymorphic.models import PolymorphicModel # django-polymorphic 0.8
except ImportError:
from polymorphic import PolymorphicModel
class DocdataOrder(models.Model):
"""
Tracking of the order which is sent to docdata.
"""
# Simplified internal status codes.
# Lowercased on purpose to avoid mixing the statuses together.
STATUS_NEW = 'new' # Initial state
STATUS_IN_PROGRESS = 'in_progress' # In the redirect phase
STATUS_PENDING = 'pending' # Waiting for user to complete payment (e.g. credit cards)
STATUS_PAID = 'paid' # End of story, paid!
STATUS_PAID_REFUNDED = 'paid_refunded' # Paid, and performed a partial refund
STATUS_CANCELLED = 'cancelled' # End of story, cancelled
STATUS_CHARGED_BACK = 'charged_back' # End of story, consumer asked for charge back
STATUS_REFUNDED = 'refunded' # End of story, refunded, merchant refunded
STATUS_EXPIRED = 'expired' # No results of customer, order was closed.
STATUS_UNKNOWN = 'unknown' # Help!
STATUS_CHOICES = (
(STATUS_NEW, _("New")),
(STATUS_IN_PROGRESS, _("In Progress")),
(STATUS_PENDING, _("Pending")),
(STATUS_PAID, _("Paid")),
(STATUS_PAID_REFUNDED, _("Paid, part refunded")),
(STATUS_CANCELLED, _("Cancelled")),
(STATUS_CHARGED_BACK, _("Charged back")),
(STATUS_REFUNDED, _("Refunded")),
(STATUS_EXPIRED, _("Expired")),
(STATUS_UNKNOWN, _("Unknown")),
)
merchant_name = models.CharField(_("Docdata account"), max_length=100, default=appsettings.DOCDATA_MERCHANT_NAME)
merchant_order_id = models.CharField(_("Order ID"), max_length=100, default='')
order_key = models.CharField(_("Payment cluster ID"), max_length=200, default='', unique=True)
status = models.CharField(_("Status"), max_length=50, choices=STATUS_CHOICES, default=STATUS_NEW)
language = models.CharField(_("Language"), max_length=5, blank=True, default='en')
# Track sent information
total_gross_amount = models.DecimalField(_("Total gross amount"), max_digits=15, decimal_places=2)
currency = models.CharField(_("Currency"), max_length=10)
country = models.CharField(_("Country_code"), max_length=2, null=True, blank=True)
# Track received information
total_registered = models.DecimalField(_("Total registered"), max_digits=15, decimal_places=2, default=D('0.00'))
total_shopper_pending = models.DecimalField(_("Total shopper pending"), max_digits=15, decimal_places=2, default=D('0.00'))
total_acquirer_pending = models.DecimalField(_("Total acquirer pending"), max_digits=15, decimal_places=2, default=D('0.00'))
total_acquirer_approved = models.DecimalField(_("Total acquirer approved"), max_digits=15, decimal_places=2, default=D('0.00'))
total_captured = models.DecimalField(_("Total captured"), max_digits=15, decimal_places=2, default=D('0.00'))
total_refunded = models.DecimalField(_("Total refunded"), max_digits=15, decimal_places=2, default=D('0.00'))
total_charged_back = models.DecimalField(_("Total changed back"), max_digits=15, decimal_places=2, default=D('0.00'))
# Internal info.
created = models.DateTimeField(_("created"), auto_now_add=True)
updated = models.DateTimeField(_("updated"), auto_now=True)
objects = DocdataOrderManager()
class Meta:
ordering = ('-created', '-updated')
verbose_name = _("Docdata Order")
verbose_name_plural = _("Docdata Orders")
def __unicode__(self):
return self.order_key
def __repr__(self):
return "<DocdataOrder: {0}, {1} status={2}>".format(self.order_key, self.merchant_order_id, self.status)
@property
def latest_payment(self):
try:
return self.payments.order_by('-payment_id').all()[0]
except IndexError:
return None
def cancel(self):
"""
Cancel an order in Docdata.
"""
from .facade import get_facade
facade = get_facade()
facade.cancel_order(self)
cancel.alters_data = True
class DocdataPayment(PolymorphicModel):
"""
A reported Docdata payment.
This is a summarized version of a Docdata payment transaction,
as returned by the status API call.
Some payment types have additional fields, which are stored as subclass.
"""
docdata_order = models.ForeignKey(DocdataOrder, related_name='payments')
payment_id = models.CharField(_("Payment id"), max_length=100, default='', blank=True, primary_key=True)
# Note: We're not using choices here so that we can write unknown statuses if they are presented by Docdata.
status = models.CharField(_("status"), max_length=30, default='NEW')
# The payment method id from Docdata (e.g. IDEAL, MASTERCARD, etc)
payment_method = models.CharField(max_length=60, default='', blank=True)
# Track the various amounts associated with this source
confidence_level = models.CharField(_("Confidence level"), max_length=30, default='', editable=False)
amount_allocated = models.DecimalField(_("Amount Allocated"), decimal_places=2, max_digits=12, default=D('0.00'), editable=False)
amount_debited = models.DecimalField(_("Amount Debited"), decimal_places=2, max_digits=12, default=D('0.00'), editable=False)
amount_refunded = models.DecimalField(_("Amount Refunded"), decimal_places=2, max_digits=12, default=D('0.00'), editable=False)
amount_chargeback = models.DecimalField(_("Amount Changed back"), decimal_places=2, max_digits=12, default=D('0.00'), editable=False)
# Internal info.
created = models.DateTimeField(_("created"), auto_now_add=True)
updated = models.DateTimeField(_("updated"), auto_now=True)
def __unicode__(self):
return self.payment_id
class Meta:
ordering = ('payment_id',)
verbose_name = _("Payment")
verbose_name_plural = _("Payments")
# NOTE: currently unused.
# DirectDebit is used for periodic transfers (e.g. "Automatische incasso" in The Netherlands)
class DocdataDirectDebitPayment(DocdataPayment):
"""
Web direct debit direct payment.
"""
holder_name = models.CharField(max_length=35) # max_length from Docdata
holder_city = models.CharField(max_length=35) # max_length from Docdata
holder_country_code = models.CharField(_("Country_code"), max_length=2, null=True, blank=True)
# Note: there is django-iban for validated versions of these fields.
# Not needed here.
iban = models.CharField(max_length=34)
bic = models.CharField(max_length=11)
class Meta:
ordering = ('-created', '-updated')
verbose_name = _("Direct Debit Payment")
verbose_name_plural = _("Derect Debit Payments")
| [
"vdboor@edoburu.nl"
] | vdboor@edoburu.nl |
98a2dca4914597a422b4febb17cf8b7424bda615 | 549887e0f791306267e2d5fc4892c4c59296ffcb | /Python/Simulation/Numerical_Methods/test_gauss_elimination.py | 9d2a87f314c93e4e26878d12b8e7bb2b196b2c2b | [
"MIT"
] | permissive | MattMarti/Lambda-Trajectory-Sim | 5083a5f22c2773475afc23ba2502bb41708d53f0 | 4155f103120bd49221776cc3b825b104f36817f2 | refs/heads/master | 2020-05-24T03:25:18.952071 | 2020-03-21T02:49:39 | 2020-03-21T02:49:39 | 187,071,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,954 | py | import unittest;
import numpy as np;
from gauss_elimination import gauss_elimination;
class Test_gauss_elimination(unittest.TestCase):
'''
Test_gauss_elimination
Test case for the Gauss Elimination function, which solves linear systems.
@dependencies
python 3.6.0
unittest
numpy
@author: Matt Marti
@date: 2019-06-05
'''
# Test that the function works in a nominal case with numpy matrices
def test_nominal_01(self):
# Given
A = np.array([[ 3, 2, -3, 1, 6], \
[ 6, 2, 4, 0, 5], \
[-3, 1, 0, 2, 3], \
[ 5, -8, 1, 2, 6], \
[ 5, -8, 1, 4, 6]],\
dtype=np.float64);
B = np.array([[-24, 5 ], \
[-6, 3 ], \
[-9, 8 ], \
[ 24, 2 ], \
[ 36, 12]], \
dtype=np.float64 );
#
# True solution
xtru = np.linalg.solve( A, B );
# Computed solution
[ x_soln, A_aug ] = gauss_elimination( A, B, True );
# Check x solution
precision = 1e-12;
for i in range(0, xtru.shape[0] ):
for j in range( 0, xtru.shape[1] ):
assert abs(xtru[i,j] - x_soln[i,j]) < precision, 'Wrong solution';
#
# Check that the triangular matrix is returned correctly
for j in range( 0, i ):
assert not A_aug[i,j], 'Non-zero element in lower triangular area';
#
#
#
# A matrix that actually needs partial pivoting, and no use of numpy matrices
def test_nominal_02(self):
# Given
A = np.array([[1, 1, 1], [2, 2, 1], [3, 4, 2]]);
B = np.array([[1], [2], [2]]);
# True solution
xtru = np.linalg.solve( A, B );
# Computed solution
[ x_soln, A_aug ] = gauss_elimination( A, B, True );
# Check x solution
precision = 1e-12;
for i in range( 0, xtru.shape[0] ):
for j in range( 0, xtru.shape[1] ):
assert abs(xtru[i,j] - x_soln[i,j]) < precision, 'Wrong solution';
#
# Check that the triangular matrix is returned correctly
for j in range( 0, i ):
assert not A_aug[i,j], 'Non-zero element in lower triangular area';
#
#
#
#
def test_single_values(self):
# Test with ints
a = np.ndarray((1,1));
a[0,0] = 2;
b = np.ndarray((1,1));
b[0,0] = 10;
x = gauss_elimination(a,b);
self.assertAlmostEqual(x[0,0], 5, 'Failed for scalar values');
# Test with float result
a[0,0] = 2;
b[0,0] = 3;
x = gauss_elimination(a,b);
self.assertAlmostEqual(x[0,0], 1.5, 'Failed for scalar values');
#
# Only works with numpy.ndarrays
def test_types(self):
# Valid inputs from test_02
A = np.array([[1, 1, 1], [2, 2, 1], [3, 4, 2]]);
B = np.array([[1], [2], [2]]);
# Give the function a bad type
x = 'Five'; # String type
y = False; # Bool type
z = [5, 5, 1]; # List type
w = [[1], [2], [2]]; # List type
# Assertions
self.assertRaises(TypeError, gauss_elimination, A, x);
self.assertRaises(TypeError, gauss_elimination, A, y);
self.assertRaises(TypeError, gauss_elimination, A, z);
self.assertRaises(TypeError, gauss_elimination, A, w);
self.assertRaises(TypeError, gauss_elimination, x, B);
self.assertRaises(TypeError, gauss_elimination, y, B);
self.assertRaises(TypeError, gauss_elimination, z, B);
self.assertRaises(TypeError, gauss_elimination, w, B);
#
#
| [
"43075587+MattMarti@users.noreply.github.com"
] | 43075587+MattMarti@users.noreply.github.com |
1e20830d7fb509ea67ad56afcd3cbb32e10e3c89 | 5fefcd078a421ddd00bc2cdd0f205a0942e983e4 | /yencap-manager-3/DeviceStatusManager.py | b2be9a994eb494414e10562167dd890ed0ea7684 | [] | no_license | backtosource/EnSuite | 0ae2b6935d3aaf6ba7c7ef21761a1e0209e345ab | e82f0694b4f6d5f64b5449fbc37dfc9864b2dce7 | refs/heads/master | 2020-07-18T22:51:33.533130 | 2019-09-05T14:47:18 | 2019-09-05T14:47:18 | 206,328,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from threading import Thread
import time
class DeviceStatusManager ( Thread ):
def __init__(self, agents):
Thread.__init__(self)
self.agents = agents
self.running = True
def run ( self ):
while self.running:
time.sleep(1)
for agent in self.agents:
#print 'refresh status'
agent.refreshStatus()
def stop(self):
self.running = False
| [
"maximilian.huetter@gft.com"
] | maximilian.huetter@gft.com |
966756ed8885d24a5f4e6cb78421769df901fd18 | a0d5d9fa31f0cf5972a233dc53989992627f37e1 | /mfscrm/wsgi.py | 5280d389e72d9233c0cd57c3c7348a0505fe9406 | [] | no_license | snaddimulla/assign2 | c4aa8f0602e9eb5dc9efb3b49cfe633c5eaa806f | fdee5a5ebf85b99343fdd774581684b2bf636aa0 | refs/heads/master | 2020-04-25T17:20:49.193330 | 2019-02-28T16:34:39 | 2019-02-28T16:34:39 | 172,944,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mfscrm.settings")
application = get_wsgi_application()
| [
"snaddimulla@gmail.com"
] | snaddimulla@gmail.com |
57c224a601154950df2e7940ebaf32c20361fd1a | fe89f98d94127d971c6a70bf28317a4c742f40ef | /model/AlexNet/AlexNet_Test.py | a904d545ea4295a32681afa93f73a94ad51b1b3d | [] | no_license | cyhgxu/pytorch_model_zoo | 74f3c386407405b0764d85ba9aaf5cc269a1655f | be3522d99abfa65d5787d9134b53c43f62d83464 | refs/heads/master | 2020-06-13T20:47:24.057781 | 2019-03-27T15:19:20 | 2019-03-27T15:19:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | import torch
import AlexNet
import numpy as np
import cv2
from torch.autograd import Variable
resizeH = 227
resizeW = 227
pth_file = "model.pth" # model_file
image_path = "313.jpg" #test_image
classes = ["dog", "cat"]
def AlexNet_Test(pth_file, image_path):
model = AlexNet.AlexNet()
model.load_state_dict(torch.load(pth_file))
model.eval()
image = cv2.imread(image_path)
image = cv2.resize(image, (resizeH, resizeW), interpolation=cv2.INTER_CUBIC)
image = image.astype(np.float32)
image = np.transpose(image, (2, 1, 0))
image = torch.from_numpy(image).unsqueeze(0)
print(image.size())
if torch.cuda.is_available():
model = model.cuda()
image = image.cuda()
# out = model(Variable(image))
out = model(image)
pre = torch.max(out, 1)[1].cpu()
pre = pre.numpy()
pre_class = int(pre[0])
print(classes[pre_class])
# print("prdict is {:.s}".format(classes[pre[0]]))
if __name__ == "__main__":
AlexNet_Test(pth_file, image_path)
| [
"noreply@github.com"
] | cyhgxu.noreply@github.com |
beca7bfe6544a571637927d1528faa1a57eff77f | edd4430ecc9fb5d92e5875ac701acce8ae773eb3 | /algorithms.master/array : list/2Sum.py | 1654b4d38794b531367e69014e7d822c50084faa | [] | no_license | apk2129/_personal | 1a1ba8c38bb367d6b37d847546cbf7e3a0df5b12 | 83f95caa3f7a487e8561e69133772d5add4484e1 | refs/heads/master | 2021-01-18T03:17:05.180550 | 2017-10-23T01:04:29 | 2017-10-23T01:04:29 | 53,992,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | nums = [2, 7, 11, 15]
target = 9
def twoSum( nums, target):
if len(nums) <= 1 : return False
diff_map = {}
for i in range(len(nums)):
diff = target - nums[i]
if diff in diff_map:
return [diff_map[diff],i]
else:
diff_map[nums[i]] = i
print(twoSum(nums, target))
| [
"iAnish@Anishs-MacBook-Pro.local"
] | iAnish@Anishs-MacBook-Pro.local |
264aa98cdced1e3a3b21e731910d92a4f81a7489 | 5db3d51ff9a0bd7647c2315a358cb4ec9299d9d5 | /analyzeBusReportFnv2.py | f24d495ec4b04167e7b50dce7763a807fe53f163 | [] | no_license | bikiranguha/Thesis_project | 866385f51bd476448730c8169eb0b3c1dacba84e | 1a52ba0fed86afb522bda067b8011b6940b4088d | refs/heads/master | 2020-03-31T06:52:16.627848 | 2018-12-28T02:59:33 | 2018-12-28T02:59:33 | 151,997,984 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,684 | py | """
Function which generates a bus flow report of comed buses
"""
def BusReport(flowReportFile,Raw):
from getBusDataFn import getBusData
BusDataDict = getBusData(Raw)
ComedPlusBoundarySet = set()
flowDict = {}
#FromBusLines = []
#ToBusLines = []
class flowReport(object):
def __init__(self):
self.toBusList = []
self.MWList = []
self.MVARList = []
self.MVAList = []
self.cktID = []
"""
with open(Raw,'r') as f:
filecontent = f.read()
fileLines = filecontent.split('\n')
branchStartIndex = fileLines.index('0 / END OF GENERATOR DATA, BEGIN BRANCH DATA') + 1
branchEndIndex = fileLines.index('0 / END OF BRANCH DATA, BEGIN TRANSFORMER DATA')
for i in range(branchStartIndex, branchEndIndex):
line = fileLines[i]
words = line.split(',')
Bus1 = words[0].strip()
Bus2 = words[1].strip()
try:
Bus1Area = BusDataDict[Bus1].area
Bus2Area = BusDataDict[Bus2].area
except: # for buses '243083' and '638082'
continue
if Bus1Area == '222' and Bus2Area == '222':
ComedPlusBoundarySet.add(Bus1)
ComedPlusBoundarySet.add(Bus2)
if Bus1Area == '222' and Bus2Area != '222':
ComedPlusBoundarySet.add(Bus1)
ComedPlusBoundarySet.add(Bus2)
if Bus1Area != '222' and Bus2Area == '222':
ComedPlusBoundarySet.add(Bus1)
ComedPlusBoundarySet.add(Bus2)
for Bus in BusDataDict:
area = BusDataDict[Bus].area
if area == '222':
ComedPlusBoundarySet.add(Bus)
"""
with open(flowReportFile,'r') as f:
filecontent = f.read()
fileLines = filecontent.split('\n')
indices = [i for i, line in enumerate(fileLines) if line.startswith('BUS')]
for i in indices:
#print i
line = fileLines[i]
FromBus = line[4:10].strip()
"""
if FromBus not in ComedPlusBoundarySet:
continue
"""
flowDict[FromBus] = flowReport()
i+=2
line = fileLines[i]
while not 'M I S M A T C H' in line:
if 'RATING' in line:
break
if 'GENERATION' in line or 'LOAD' in line or 'SHUNT' in line:
i+=1
line = fileLines[i]
continue
toBus = line[4:10].strip()
MW=float(line[34:42].strip())
MVAR=float(line[42:50].strip())
cktID = line[31:34]
#print toBus
flowDict[FromBus].toBusList.append(toBus)
flowDict[FromBus].MWList.append(MW)
flowDict[FromBus].MVARList.append(MVAR)
flowDict[FromBus].cktID.append(cktID)
#ToBusLines.append(toBus)
i+=1
if i >=len(fileLines):
break
line = fileLines[i]
return flowDict
"""
with open('tmp.txt','w') as f:
for Bus in ToBusLines:
f.write(Bus)
f.write('\n')
"""
if __name__ == '__main__':
flowReportFile = 'BusReportsRawCropped_0723.txt'
Raw = 'RawCropped_0723v2.raw'
flowDict = BusReport(flowReportFile,Raw) | [
"bikiranguha@gmail.com"
] | bikiranguha@gmail.com |
be91b22431f62bbaccbbdb30b2dc7e2c7c2b81f8 | f683e147b543a939da6240051bdb38c3a018ab80 | /videotext/apps/core/migrations/0005_auto__add_field_note_type.py | af35dae9709293fc603520f9672bbed8006429b4 | [
"LicenseRef-scancode-other-permissive"
] | permissive | ReportersLab/TheVideoNotebook_Public | ffae912262805470af1e0b6097a3ba3c53a015a5 | 77314c417c0e6b97a4b210e438dcac0cf202a27c | refs/heads/master | 2020-12-24T15:14:28.244768 | 2012-07-31T13:33:23 | 2012-07-31T13:33:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,105 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Note.type'
db.add_column('core_note', 'type', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Note.type'
db.delete_column('core_note', 'type')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.customtag': {
'Meta': {'object_name': 'CustomTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'core.customtagitem': {
'Meta': {'object_name': 'CustomTagItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'core_customtagitem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tagged_items'", 'to': "orm['core.CustomTag']"})
},
'core.note': {
'Meta': {'ordering': "['-creation_time']", 'object_name': 'Note'},
'creation_time': ('django.db.models.fields.DateTimeField', [], {}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'icon_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'user_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Video']", 'null': 'True'})
},
'core.video': {
'Meta': {'ordering': "['-creation_time']", 'object_name': 'Video'},
'creation_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'icon_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'teaser': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'mp4'", 'max_length': '32', 'blank': 'True'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'user_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'video_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '256'})
}
}
complete_apps = ['core']
| [
"charlie.is@duke.edu"
] | charlie.is@duke.edu |
14ac05a13edaa90016691a783c85acc89dab2949 | 547e4724d80b9be67ada4d54e205c5b5182f484f | /symbol/detector.py | 3ea97c49a85cdc5d7ef54580aa560f4f67348156 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] | permissive | v-qjqs/simpledet-1 | 62d205b8d418ed7caf86325cb6e22cffec0e0dbe | 06250f35419e94ff730400bd2ff79765608587fe | refs/heads/master | 2022-11-14T18:17:37.170799 | 2020-07-04T06:18:25 | 2020-07-04T06:18:25 | 255,297,227 | 1 | 0 | Apache-2.0 | 2020-04-13T10:36:11 | 2020-04-13T10:36:10 | null | UTF-8 | Python | false | false | 3,043 | py | from __future__ import print_function
import mxnet as mx
import mxnext as X
from utils.patch_config import patch_config_as_nothrow
class Rpn(object):
_rpn_output = None
def __init__(self):
pass
@classmethod
def get_train_symbol(cls, backbone, neck, rpn_head):
rpn_feat = backbone.get_rpn_feature()
rpn_feat = neck.get_rpn_feature(rpn_feat)
rpn_loss = rpn_head.get_loss(rpn_feat, None, None)
return X.group(rpn_loss)
@classmethod
def get_rpn_test_symbol(cls, backbone, neck, rpn_head):
if cls._rpn_output is not None:
return cls._rpn_output
im_info = X.var("im_info")
im_id = X.var("im_id")
rec_id = X.var("rec_id")
rpn_feat = backbone.get_rpn_feature()
rpn_feat = neck.get_rpn_feature(rpn_feat)
(proposal, proposal_score) = rpn_head.get_all_proposal(rpn_feat, im_info)
cls._rpn_output = X.group([rec_id, im_id, im_info, proposal, proposal_score])
return cls._rpn_output
class FasterRcnn(object):
_rpn_output = None
def __init__(self):
pass
@classmethod
def get_train_symbol(cls, backbone, neck, rpn_head, roi_extractor, bbox_head):
gt_bbox = X.var("gt_bbox")
im_info = X.var("im_info")
rpn_feat = backbone.get_rpn_feature()
rcnn_feat = backbone.get_rcnn_feature()
rpn_feat = neck.get_rpn_feature(rpn_feat)
rcnn_feat = neck.get_rcnn_feature(rcnn_feat)
rpn_head.get_anchor()
rpn_loss = rpn_head.get_loss(rpn_feat, gt_bbox, im_info)
proposal, bbox_cls, bbox_target, bbox_weight = rpn_head.get_sampled_proposal(rpn_feat, gt_bbox, im_info)
roi_feat = roi_extractor.get_roi_feature(rcnn_feat, proposal)
bbox_loss = bbox_head.get_loss(roi_feat, bbox_cls, bbox_target, bbox_weight)
return X.group(rpn_loss + bbox_loss)
@classmethod
def get_test_symbol(cls, backbone, neck, rpn_head, roi_extractor, bbox_head):
rec_id, im_id, im_info, proposal, proposal_score = \
FasterRcnn.get_rpn_test_symbol(backbone, neck, rpn_head)
rcnn_feat = backbone.get_rcnn_feature()
rcnn_feat = neck.get_rcnn_feature(rcnn_feat)
roi_feat = roi_extractor.get_roi_feature_test(rcnn_feat, proposal)
cls_score, bbox_xyxy = bbox_head.get_prediction(roi_feat, im_info, proposal)
return X.group([rec_id, im_id, im_info, cls_score, bbox_xyxy])
@classmethod
def get_rpn_test_symbol(cls, backbone, neck, rpn_head):
if cls._rpn_output is not None:
return cls._rpn_output
im_info = X.var("im_info")
im_id = X.var("im_id")
rec_id = X.var("rec_id")
rpn_head.get_anchor()
rpn_feat = backbone.get_rpn_feature()
rpn_feat = neck.get_rpn_feature(rpn_feat)
(proposal, proposal_score) = rpn_head.get_all_proposal(rpn_feat, im_info)
cls._rpn_output = X.group([rec_id, im_id, im_info, proposal, proposal_score])
return cls._rpn_output | [
"noreply@github.com"
] | v-qjqs.noreply@github.com |
386b1efce4d6fc612c355b0fef509a117895acef | 493dcbc5b1c98b44d4a5d8e8c099e61840971c4c | /infogan.py | 594d7c2c60fc0cb284669d4618095ec33608abca | [] | no_license | prabowst/infogan | 90208fc59b7107fd18cfcdb1399f158d530a92f5 | f98dae144d4f8743a3e476a01bcc4c65fafd1a18 | refs/heads/master | 2023-06-08T12:53:26.964162 | 2021-06-26T12:23:47 | 2021-06-26T12:23:47 | 380,393,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,829 | py | import tensorflow as tf
import tensorflow.keras as keras
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import time
from model import generator, discriminator
from utils import sample_generator_input, plot_interval
import warnings
warnings.filterwarnings('ignore')
def train():
'''
Training loop of InfoGAN. This includes the declaration of the networks, specifications of optimizers, learning rate, batch size, and gradient calculations.
Params:
None
Return:
None
'''
(X_img, y_img), (_, _) = tf.keras.datasets.mnist.load_data()
X_img = X_img.reshape((X_img.shape[0], 28, 28, 1))
X_img = (X_img/127.5) - 1
gen_optim = keras.optimizers.Adam(1e-3)
disc_optim = keras.optimizers.Adam(2e-4)
aux_optim = keras.optimizers.Adam(2e-4)
gen_model = generator()
disc_model, aux_model = discriminator()
batch = 128
con_size = 62
num_class = 10
epochs = 100
disc_losses = []
gen_losses = []
aux_losses = []
for epoch in range(epochs):
temp_disc, temp_gen, temp_aux = [], [], []
start = time.time()
X_dataset = tf.data.Dataset.from_tensor_slices(X_img) \
.shuffle(X_img.shape[0]).batch(batch)
num_step = 0
for X_batch in X_dataset:
'==================TRAIN_STEP==================='
losses = [
keras.losses.BinaryCrossentropy(),
keras.losses.CategoricalCrossentropy()
]
batch_size = X_batch.shape[0]
gen_cat, gen_c1, gen_c2, gen_con = sample_generator_input(batch_size, con_size, num_class)
gen_input = np.concatenate((gen_cat, gen_c1, gen_c2, gen_con), axis=1)
with tf.GradientTape() as discriminator_tape:
disc_model.trainable = True
discriminator_tape.watch(disc_model.trainable_variables)
disc_real_out = disc_model(X_batch, training=True)
disc_real_loss = losses[0](tf.ones((batch_size, 1)), disc_real_out)
image_fake = gen_model(gen_input, training=True)
disc_fake_out = disc_model(image_fake, training=True)
disc_fake_loss = losses[0](tf.zeros((batch_size, 1)), disc_fake_out)
disc_loss = disc_real_loss + disc_fake_loss
disc_grad = discriminator_tape.gradient(disc_loss, disc_model.trainable_variables)
disc_optim.apply_gradients(zip(disc_grad, disc_model.trainable_variables))
batch_size = batch_size * 2
with tf.GradientTape() as generator_tape, tf.GradientTape() as aux_tape:
generator_tape.watch(gen_model.trainable_variables)
aux_tape.watch(aux_model.trainable_variables)
gen_cat, gen_c1, gen_c2, gen_con = sample_generator_input(batch_size, con_size, num_class)
gen_input = np.concatenate((gen_cat, gen_c1, gen_c2, gen_con), axis=1)
image_fake = gen_model(gen_input, training=True)
disc_fake_out = disc_model(image_fake, training=True)
gen_image_loss = losses[0](tf.ones(batch_size, 1), disc_fake_out)
cat, mu, sigma = aux_model(image_fake, training=True)
cat_loss = losses[1](gen_cat, cat)
gauss_dist = tfp.distributions.Normal(mu, sigma)
c1_loss = tf.reduce_mean(-gauss_dist.log_prob(gen_c1))
c2_loss = tf.reduce_mean(-gauss_dist.log_prob(gen_c2))
gen_loss = gen_image_loss + cat_loss + c1_loss + c2_loss
aux_loss = cat_loss + c1_loss + c2_loss
disc_model.trainable = False
gen_grad = generator_tape.gradient(gen_loss, gen_model.trainable_variables)
aux_grad = aux_tape.gradient(aux_loss, aux_model.trainable_variables)
gen_optim.apply_gradients(zip(gen_grad, gen_model.trainable_variables))
aux_optim.apply_gradients(zip(aux_grad, aux_model.trainable_variables))
temp_disc.append(disc_loss)
temp_gen.append(gen_loss)
temp_aux.append(aux_loss)
num_step += 1
if num_step >= 100:
break
if ((epoch+1) % 10 == 0) or (epoch == 0):
plot_interval(epoch+1, gen_model)
if (epoch+1) % 25 == 0:
gen_model.save('model/infogan_model_generator.tf')
disc_losses.append(np.mean(temp_disc))
gen_losses.append(np.mean(temp_gen))
aux_losses.append(np.mean(temp_aux))
print('Epoch [{:3d}/{:3d}] | disc_loss: {:6.4f} | gen_loss: {:6.4f} | aux_loss: {:6.4f} | runtime: {:.2f}s' \
.format(epoch+1, epochs, np.mean(temp_disc), np.mean(temp_gen), np.mean(temp_aux), time.time()-start))
epoch_axis = np.arange(1, (epochs)+1, dtype=np.int32)
df = pd.DataFrame(index=epoch_axis)
df['epoch'] = df.index
df['disc_loss'] = disc_losses
df['gen_loss'] = gen_losses
df['aux_loss'] = aux_losses
df = pd.melt(df, id_vars=['epoch'], value_vars=['disc_loss', 'gen_loss', 'aux_loss'],
var_name='loss_type', value_name='loss')
sns.set_style('white')
plt.figure(figsize=(8,6))
ax = sns.lineplot(data=df, x='epoch', y='loss', hue='loss_type')
ax.set_title('Network Losses')
plt.savefig('figures/network_losses.png', dpi=300, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
train() | [
"prabowo@pop-os.localdomain"
] | prabowo@pop-os.localdomain |
9b26142c32ecf8204f45e8bb32268a07bcd98909 | bf33a9beac10eb1b1d26f606723cef86f74a81ce | /DjangoCv/asgi.py | 97bc02d131aa9508a38ee473d22d8008ff0a3f6a | [] | no_license | Siabdel/DjangoCV | 12c1f7722df712336f037aeb3c3a52d901c208a6 | 5b92ade3f320a1b28321ab8916e8545b6daea518 | refs/heads/main | 2023-09-05T06:23:09.118526 | 2021-11-19T18:02:02 | 2021-11-19T18:02:02 | 429,713,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for djangoCv project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoCv.settings')
application = get_asgi_application()
| [
"siabdel58@gmail.com"
] | siabdel58@gmail.com |
735ef7f0d53d9a005ed4ebdd72d9109150d2565a | 0e950ad10240b79d5abf062ce1bd2550b2f07c5e | /projet/projetindividuel/urls.py | 3514f138f2bb85031c268b64b62fbe2e92a269d2 | [] | no_license | DesireBourdicGirard/ProjetIndividuel | a81e20499c4c41a8794bf31f0ede966f6777a511 | 8bcda9c5a0845ddf41a1b083734ebfd05e332269 | refs/heads/master | 2023-05-08T01:34:57.744815 | 2021-05-27T06:28:02 | 2021-05-27T06:28:02 | 368,464,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | """projetindividuel URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
path('communitymanager/', include('communitymanager.urls'))
]
| [
"desire.bourdic--girard@student.isae-supaero.fr"
] | desire.bourdic--girard@student.isae-supaero.fr |
fb49db779e31377f1be78a9603a577f749e5e3eb | 220deee0fa2de6e5eca47f0234ebf242b45bdc4a | /model.py | 3a25e014100c2240fac2d3f71740e468cba4a61e | [] | no_license | driptaRC/pairwise-pseudo-labeling | cdfbdd7005114e34553f690d7fd0cb6b78bb8e0a | afe6b9b45bcd44d6af2c339ac5b77d48575669ba | refs/heads/master | 2020-09-13T12:48:25.121448 | 2019-11-19T20:43:59 | 2019-11-19T20:43:59 | 222,785,735 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,331 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes, grayscale):
self.inplanes = 64
if grayscale:
in_dim = 1
else:
in_dim = 3
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(in_dim, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
#self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n)**.5)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# because MNIST is already 1x1 here:
# disable avg pooling
#x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = self.fc(x)
probas = F.softmax(logits, dim=1)
return logits, probas
def resnet18(num_classes,grayscale):
"""Constructs a ResNet-18 model."""
model = ResNet(block=BasicBlock,
layers=[2, 2, 2, 2],
num_classes=num_classes,
grayscale=grayscale)
return model
class SiameseMNISTnet(nn.Module):
def __init__(self, num_classes, grayscale):
super(SiameseMNISTnet, self).__init__()
self.base_model = ResNet(block=BasicBlock,
layers=[2, 2, 2, 2],
num_classes=num_classes,
grayscale=grayscale)
self.feature_extract = nn.Sequential(*list(self.base_model.children())[:-1])
self.fc1 = nn.Linear(512,1024)
self.fc2 = nn.Linear(1024,1)
def forward(self, x_1, x_2):
feat_1 = self.feature_extract(x_1)
feat_2 = self.feature_extract(x_2)
feat_1 = feat_1.view(feat_1.size(0), -1)
feat_2 = feat_2.view(feat_2.size(0), -1)
x = torch.abs(feat_1-feat_2)
x = self.fc1(x)
logit = self.fc2(x)
return logit
class SiameseNet(nn.Module):
def __init__(self, num_classes, grayscale):
super(SiameseNet, self).__init__()
self.base_model = ResNet(block=BasicBlock,
layers=[2, 2, 2, 2],
num_classes=num_classes,
grayscale=grayscale)
self.feature_extract = nn.Sequential(*list(self.base_model.children())[:-1])
self.fc1 = nn.Linear(4096,512)
self.fc2 = nn.Linear(512,1)
def forward(self, x_1, x_2):
feat_1 = self.feature_extract(x_1)
feat_2 = self.feature_extract(x_2)
feat_1 = feat_1.view(feat_1.size(0), -1)
feat_2 = feat_2.view(feat_2.size(0), -1)
x = torch.abs(feat_1-feat_2)
x = self.fc1(x)
logit = self.fc2(x)
return logit
| [
"noreply@github.com"
] | driptaRC.noreply@github.com |
11968ac6828cd24eb472205d533c8b9f5c40a035 | 191ad1fc949d5e669fa9619a03525548c04f37ad | /Udemy_100_days_of_python/projects/Snake_Project_Advance/main.py | 7cb34098eabf07d0c7ae284d726860ff6ef841ab | [
"Apache-2.0"
] | permissive | rubix-coder/python-basic-to-professional | 6d466c99f361064db415c185c144f39ca6742b4b | c92ea38f04e4a6130c440b11a872664227af9c93 | refs/heads/main | 2023-08-18T01:42:06.159052 | 2021-10-16T07:11:41 | 2021-10-16T07:11:41 | 376,836,120 | 3 | 1 | Apache-2.0 | 2021-09-10T11:03:10 | 2021-06-14T13:39:14 | Jupyter Notebook | UTF-8 | Python | false | false | 1,144 | py | from turtle import Screen
from snake import Snake
from food import Food
from scoreboard import Scoreboard
import time
screen = Screen()
screen.setup(width=600, height=600)
screen.bgcolor("black")
screen.title("My Snake Game")
screen.tracer(0)
snake = Snake()
food = Food()
scoreboard = Scoreboard()
screen.listen()
screen.onkey(snake.up, "Up")
screen.onkey(snake.down, "Down")
screen.onkey(snake.left, "Left")
screen.onkey(snake.right, "Right")
game_is_on = True
while game_is_on:
screen.update()
time.sleep(0.1)
snake.move()
# Detect collision with food.
if snake.head.distance(food) < 15:
food.refresh()
snake.extend()
scoreboard.increase_score()
# Detect collision with wall.
if snake.head.xcor() > 280 or snake.head.xcor() < -280 or snake.head.ycor() > 280 or snake.head.ycor() < -280:
scoreboard.reset()
snake.reset()
# Detect collision with tail.
for segment in snake.segments:
if segment == snake.head:
pass
elif snake.head.distance(segment) < 10:
scoreboard.reset()
snake.reset()
screen.exitonclick()
| [
"p.jesal.work@gmail.com"
] | p.jesal.work@gmail.com |
050cb07932aec46d401eabeded891b2a897ef2f5 | d1c8378cdb0cfee06175ba65b4f74e08c77d5b56 | /exp3/Titanic.py | e404d2c3302f15f18bd56df62438c742dc4a5eb2 | [] | no_license | CroesusChen/ML_course | 773118c6c017dbc20cd265b654a4632ff0f6b53f | 77f4c65d6f995248f1d0f73863f99c7017660aa1 | refs/heads/master | 2023-06-18T17:35:02.546811 | 2021-07-16T10:45:19 | 2021-07-16T10:45:19 | 366,622,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,677 | py | # 数据分析
import pandas as pd
import numpy as np
# 绘图
import matplotlib.pyplot as plt
import seaborn as sns
df_train = pd.read_csv('./data/titanic/train.csv')
df_test = pd.read_csv('./data/titanic/test.csv')
# 填充数据值
def fillna_data(df_train, df_test):
# 对训练集和测试集中的"Age"数据进行平均值填充
df_train['Age'] = df_train['Age'].fillna(df_train['Age'].mean())
df_test['Age'] = df_test['Age'].fillna(df_test['Age'].mean())
# 添加一个新的类别"Missing"来填充"Cabin"
df_train['Cabin'] = df_train['Cabin'].fillna('Missing')
df_test['Cabin'] = df_test['Cabin'].fillna('Missing')
# 用出现频率最多的类别填充训练集中的"Embarked"属性
df_train['Embarked'] = df_train['Embarked'].fillna(
df_train['Embarked'].mode()[0])
# 用出现频率最多的类别填充测试集中的"Fare"属性
df_test['Fare'] = df_test['Fare'].fillna(
df_test['Fare'].mode()[0])
return df_train, df_test
# 得到填充后的数据集 df_train, df_test
df_train, df_test = fillna_data(df_train, df_test)
# sns.barplot(x='Pclass', y='Survived', data=df_train,
# palette="Set1",
# errwidth=1.2,
# errcolor="0.1",
# capsize=0.05,
# alpha=0.6)
# plt.show()
id_test = df_test.loc[:, 'PassengerId']
# 第一次处理
# 去掉了以下特征
# 即对 Pclass Sex Age SibSp Parch Embarked 分析
# df_train = df_train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin', 'Fare'])
# df_test = df_test.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin', 'Fare'])
# # 第二次处理
# # 在第一次的基础上,添加了归一化处理的特征 Fare
# # 即对 Pclass Sex Age SibSp Parch Fare Embarked 分析
# df_train = df_train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin'])
# df_test = df_test.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin'])
# 第三次处理
# 在第二次的基础上,去掉了特征 SibSp Parch
df_train = df_train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp', 'Parch'])
df_test = df_test.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp', 'Parch'])
# 对数据集中的字符串数据进行编码处理
def preprocess_data(train, test):
# 使用one-hot编码将登船港口"Embarked"进行转换
# 训练集
Embarked = pd.get_dummies(train['Embarked'], prefix='Embarked')
tmp_train = pd.concat([train, Embarked], axis=1)
tmp_train.drop(columns=['Embarked'], inplace=True)
# 测试集
Embarked = pd.get_dummies(test['Embarked'], prefix='Embarked')
tmp_test = pd.concat([test, Embarked], axis=1)
tmp_test.drop(columns=['Embarked'], inplace=True)
# 将年龄归一化
tmp_train['Age'] = (tmp_train['Age'] - tmp_train['Age'].min()) / (tmp_train['Age'].max() - tmp_train['Age'].min())
tmp_test['Age'] = (tmp_test['Age'] - tmp_test['Age'].min()) / (tmp_test['Age'].max() - tmp_test['Age'].min())
# 将船票价格归一化
if 'Fare' in tmp_train.columns:
tmp_train['Fare'] = (tmp_train['Fare'] - tmp_train['Fare'].min()) / (
tmp_train['Fare'].max() - tmp_train['Fare'].min())
if 'Fare' in tmp_test.columns:
tmp_test['Fare'] = (tmp_test['Fare'] - tmp_test['Fare'].min()) / (
tmp_test['Fare'].max() - tmp_test['Fare'].min())
# 将性别"Sex"这一特征从字符串映射至数值
# 0代表female,1代表male
gender_class = {'female': 0, 'male': 1}
tmp_train['Sex'] = tmp_train['Sex'].map(gender_class)
tmp_test['Sex'] = tmp_test['Sex'].map(gender_class)
return tmp_train, tmp_test
data_train, data_test = preprocess_data(df_train, df_test)
label_train = data_train.loc[:, 'Survived']
data_train = data_train.drop(columns=['Survived'])
data_test = data_test.drop(columns=['Survived'])
from sklearn.model_selection import train_test_split
'''
从原始数据集(source)中拆分出训练数据集(用于模型训练train),测试数据集(用于模型评估test)
train_test_split是交叉验证中常用的函数,功能是从样本中随机的按比例选取train data和test data
train_data:所要划分的样本特征集
train_target:所要划分的样本结果
test_size:样本占比,如果是整数的话就是样本的数量
'''
# 建立模型用的训练数据集和测试数据集
train_X, test_X, train_y, test_y = train_test_split(data_train,
label_train,
train_size=.8)
def SVM():
from sklearn import svm
'''
SVM函数参数解析:
C:float, default=1.0
正则化参数。正则化的强度与C成反比,必须是严格的正数。惩罚是一个平方的l2惩罚。
gamma:{‘scale’, ‘auto’} or float, default=’scale’
rbf'、'poly'和'sigmoid'的内核系数。
如果gamma='scale'(默认)被执行,那么它使用1/(n_features * X.var())作为gamma的值。
如果是'auto',则使用1/n_features。
decision_function_shape:{‘ovo’, ‘ovr’}, default=’ovr’
多分类问题选择'ovo'
'''
clf_SVM = svm.SVC(C=2, gamma=0.4, kernel='rbf')
# 训练SVM模型
clf_SVM.fit(train_X, train_y)
from sklearn.metrics import confusion_matrix, classification_report
pred_SVM = clf_SVM.predict(test_X)
# 混淆矩阵
print(confusion_matrix(test_y, pred_SVM))
'''
classification_report函数用于显示主要分类指标的文本报告
显示每个类的精确度,召回率,F1值等信息
混淆矩阵 TP FP
FN TN
'''
print(classification_report(test_y, pred_SVM))
from sklearn.model_selection import cross_val_score
# 在训练集和测试集上的准确性
train_acc_SVM = cross_val_score(clf_SVM, train_X, train_y, cv=10, scoring='accuracy')
test_acc_SVM = cross_val_score(clf_SVM, test_X, test_y, cv=10, scoring='accuracy')
print('SVM Model on Train Data Accuracy: %f' %(train_acc_SVM.mean()))
print('SVM Model on Test Data Accuracy: %f' %(test_acc_SVM.mean()))
pred = clf_SVM.predict(data_test)
output_SVM = pd.DataFrame({'PassengerId': id_test,'Survived': pred})
output_SVM.to_csv('./output/submission_SVM.csv',index = False)
print('submission_SVM.csv生成完毕!')
def RandomForest():
from sklearn.ensemble import RandomForestClassifier
clf_RFC = RandomForestClassifier() # 未填参数,需调优
# 训练随机森林分类器模型
clf_RFC.fit(train_X,train_y)
from sklearn.metrics import confusion_matrix,classification_report
pred_RFC = clf_RFC.predict(test_X)
# 混淆矩阵
print(confusion_matrix(test_y,pred_RFC))
# 分类报告
print(classification_report(test_y, pred_RFC))
from sklearn.model_selection import cross_val_score
# 在训练集和测试集上的准确性
train_acc_RFC = cross_val_score(clf_RFC,train_X,train_y,cv = 10,scoring = 'accuracy')
test_acc_RFC = cross_val_score(clf_RFC,test_X,test_y,cv = 10,scoring = 'accuracy')
print('Random Forest Classifier Model on Train Data Accuracy: %f' % (train_acc_RFC.mean()))
print('Random Forest Classifier Model on Test Data Accuracy: %f' % (test_acc_RFC.mean()))
pred = clf_RFC.predict(data_test)
output_RFC = pd.DataFrame({'PassengerId': id_test,'Survived': pred})
output_RFC.to_csv('./output/submission_RFC.csv',index = False)
print('submission_RFC.csv生成完毕!')
def BPNetwork():
from sklearn.neural_network import MLPClassifier
# 两个隐藏层,第一层为64个神经元,第二层为32个神经元
mlp = MLPClassifier(hidden_layer_sizes = (64,32),activation = 'relu',
solver = 'adam',
max_iter = 800)
# 训练神经网络
mlp.fit(train_X,train_y)
from sklearn.metrics import confusion_matrix, classification_report
pred_BP = mlp.predict(test_X)
# 混淆矩阵
print(confusion_matrix(test_y, pred_BP))
# 分类报告
print(classification_report(test_y,pred_BP))
train_acc_BP = mlp.score(train_X,train_y)
test_acc_BP = mlp.score(test_X,test_y)
print('MLP Classifier Model on Train Data Accuracy: %f' % (train_acc_BP))
print('MLP Classifier Model on Test Data Accuracy: %f' % (test_acc_BP))
pred = mlp.predict(data_test)
output_BP = pd.DataFrame({'PassengerId': id_test,'Survived': pred})
output_BP.to_csv('./output/submission_BP.csv',index = False)
print('submission_BP.csv生成完毕!')
# SVM()
# RandomForest()
BPNetwork()
| [
"1142257739@qq.com"
] | 1142257739@qq.com |
7d4a0dec4df51a61effdee4d6fd9465caf9543be | 31fee9dff7edc3b319df4a31025a31f5030dbea9 | /visualizations/ws_2d/stimulus.py | da0d969cd7e331f537d6928359068e039b311342 | [] | no_license | bshimanuki/6.888 | 3dd81cc55accce7c3db55bde8aaee65d118fe158 | d4b53725600694e0bb3025b6da2391177328ed21 | refs/heads/master | 2021-08-28T08:33:58.460647 | 2017-12-11T18:42:43 | 2017-12-11T18:43:01 | 110,302,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,932 | py | from scipy.signal import correlate2d
import numpy as np
from nnsim.module import Module
from .serdes import InputSerializer, OutputDeserializer
def conv(x, W, b):
# print x.shape, W.shape, b.shape
y = np.zeros([x.shape[0], x.shape[1], W.shape[3]]).astype(np.int64)
for out_channel in range(W.shape[3]):
for in_channel in range(W.shape[2]):
W_c = W[:, :, in_channel, out_channel]
x_c = x[:, :, in_channel]
y[:, :, out_channel] += correlate2d(x_c, W_c, mode="same")
y[:, :, out_channel] += b[out_channel]
return y
class Stimulus(Module):
def instantiate(self, arr_x, arr_y, chn_per_word, input_chn, output_chn):
# PE static configuration (immutable)
self.arr_x = arr_x
self.arr_y = arr_y
self.chn_per_word = chn_per_word
self.input_chn = input_chn
self.output_chn = output_chn
self.serializer = InputSerializer(self.input_chn, self.arr_x,
self.arr_y, self.chn_per_word)
self.deserializer = OutputDeserializer(self.output_chn, self.arr_x,
self.arr_y, self.chn_per_word)
def configure(self, image_size, filter_size, in_chn, out_chn):
# Test data
# ifmap = np.zeros((image_size[0], image_size[1],
# in_chn)).astype(np.int64)
ifmap = np.random.normal(0, 10, (image_size[0], image_size[1],
in_chn)).astype(np.int64)
weights = np.random.normal(0, 10, (filter_size[0], filter_size[1], in_chn,
out_chn)).astype(np.int64)
bias = np.random.normal(0, 10, out_chn).astype(np.int64)
ofmap = np.zeros((image_size[0], image_size[1],
out_chn)).astype(np.int64)
# Reference Output
reference = conv(ifmap, weights, bias)
self.serializer.configure(ifmap, weights, bias, image_size, filter_size)
self.deserializer.configure(ofmap, reference, image_size)
| [
"robertverkuil@31-34-139.wireless.csail.mit.edu"
] | robertverkuil@31-34-139.wireless.csail.mit.edu |
ba0be7176dc3d517c5dd3bdcb5cbbeee96a7aa3a | db241b5457dd8df4239214bdc31a221d7ffa7e25 | /All_Code/Books/DeepLearningBasis/Chp06/overfit_weight_decay.py | 1ef9b9c9bfbc49a15fae9240c710eb9249ccee9c | [] | no_license | TaoistQu/AI | cd8c26bf73aef20e5c48bd928a2067fdfef6241f | 1f14dd5c202d3291a8867c53110f78f33c5ce316 | refs/heads/main | 2023-06-01T08:15:04.435754 | 2023-05-16T13:16:18 | 2023-05-16T13:16:18 | 263,840,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (C) 2022 #
# @Time : 2022/11/8 23:28
# @Author : TaoistQu
# @Email : qulei_20180331@163.com
# @File : overfit_weight_decay.py
# @Software: PyCharm
import os
import sys
from dataset.mnist import load_mnist
from common.multi_layer_net import MultiLayerNet
from common.optimizer import SGD
import numpy as np
import matplotlib.pyplot as plt
(x_train,t_train),(x_test,t_test) = load_mnist(normalize=True)
x_train = x_train[:300]
t_train = t_train[:300]
weight_decay_lambda = 0.1
network = MultiLayerNet(input_size=784,hidden_size_list=[100,100,100,100,100],output_size=10,
weight_decay_lambda=weight_decay_lambda)
optimizer = SGD(lr=0.01)
max_epochs = 201
train_size = x_train.shape[0]
batch_size = 100
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size,1)
epoch_cnt = 0
for i in range(1000000000):
batch_mask = np.random.choice(train_size,batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
grads = network.gradient(x_batch,t_batch)
optimizer.update(network.params,grads)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train,t_train)
test_acc = network.accuracy(x_test,t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print("epoch:"+str(epoch_cnt)+", train acc:"+str(train_acc)+", test acc:"+str(test_acc))
epoch_cnt += 1
if epoch_cnt >= max_epochs:
break
markers = {'train':'o','test':'s'}
x = np.arange(max_epochs)
plt.plot(x,train_acc_list,marker='o',label='train',markevery=10)
plt.plot(x,test_acc_list,marker='s',label='test',markevery=10)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0,1.0)
plt.legend(loc='lower right')
plt.show()
| [
"qulei_20180331@163.com"
] | qulei_20180331@163.com |
a6fb2197fbf80b1c53e59f37f84370f5749ed5e1 | b5dd8d1b798c94731a84c02d98aafb9147200a85 | /sequence_labeling/SLBaselineSYNLinear/data/Instance.py | 6ed47e34116c6f3ff8176de9230c270b70bc070a | [] | no_license | zhangmeishan/DepSAWR | 1ae348dd04ec5e46bc5a75c8972b4bc4008528fe | 104f44fd962a42fdee9b1a9332997d35e8461ff4 | refs/heads/master | 2021-07-09T20:56:56.897774 | 2020-10-27T05:41:08 | 2020-10-27T05:41:08 | 206,974,879 | 15 | 3 | null | null | null | null | UTF-8 | Python | false | false | 8,126 | py | class Word:
def __init__(self, id, form, label):
self.id = id
self.org_form = form
self.form = form.lower()
self.label = label
# 1 indicates word, 0 indicates syn
self.wtype = 0 if label == "###" else 1
def __str__(self):
values = [str(self.id), self.org_form, self.label]
return '\t'.join(values)
class Sentence:
def __init__(self, words):
self.words = list(words)
self.length = len(self.words)
self.key_head = -1
self.key_start = -1
self.key_end = -1
self.key_label = ""
self.span = False
self.wkey_head = -1
self.wkey_start = -1
self.wkey_end = -1
self.wlength, self.forms, self.labels = 0, [], []
self.wposis, self.r_wposis = [], []
for idx in range(self.length):
if words[idx].wtype == 1:
self.wlength = self.wlength + 1
self.forms.append(words[idx].org_form)
self.labels.append(words[idx].label)
num_words = len(self.wposis)
self.r_wposis.append(num_words)
self.wposis.append(idx)
else:
self.r_wposis.append(-1)
self.sentence = ' '.join(self.forms)
for idx in range(self.length):
if words[idx].label.endswith("-*"):
self.key_head = idx
self.wkey_head = self.r_wposis[idx]
self.key_label = words[idx].label[2:-2]
break
if self.key_head != -1:
self.span = True
for idx in range(self.length):
cur_label = words[idx].label
if cur_label.startswith("B-"+self.key_label) \
or cur_label.startswith("S-"+self.key_label):
self.key_start = idx
self.wkey_start = self.r_wposis[idx]
if cur_label.startswith("E-"+self.key_label) \
or cur_label.startswith("S-"+self.key_label):
self.key_end = idx
self.wkey_end = self.r_wposis[idx]
else:
self.key_start, self.wkey_start = self.length, self.wlength
self.key_end, self.wkey_end = -1, -1
def label_to_entity(labels):
length = len(labels)
entities = set()
idx = 0
while idx < length:
if labels[idx] == "O":
idx = idx + 1
elif labels[idx].startswith("B-"):
label = labels[idx][2:]
predict = False
if label.endswith("-*"):
label = label[0:-2]
predict = True
next_idx = idx + 1
end_idx = idx
while next_idx < length:
if labels[next_idx] == "O" or labels[next_idx].startswith("B-") \
or labels[next_idx].startswith("S-"):
break
next_label = labels[next_idx][2:]
if next_label.endswith("-*"):
next_label = next_label[0:-2]
predict = True
if next_label != label:
break
end_idx = next_idx
next_idx = next_idx + 1
if end_idx == idx:
new_label = "S-" + labels[idx][2:]
print("Change %s to %s" % (labels[idx], new_label))
labels[idx] = new_label
if not predict:
entities.add("[%d,%d]%s"%(idx, end_idx, label))
idx = end_idx + 1
elif labels[idx].startswith("S-"):
label = labels[idx][2:]
predict = False
if label.endswith("-*"):
label = label[0:-2]
predict = True
if not predict:
entities.add("[%d,%d]%s"%(idx, idx, label))
idx = idx + 1
elif labels[idx].startswith("M-"):
new_label = "B-" + labels[idx][2:]
print("Change %s to %s" % (labels[idx], new_label))
labels[idx] = new_label
else:
new_label = "S-" + labels[idx][2:]
print("Change %s to %s" % (labels[idx], new_label))
labels[idx] = new_label
return entities
def normalize_labels(labels):
length = len(labels)
change = 0
normed_labels = []
for idx in range(length):
normed_labels.append(labels[idx])
idx = 0
while idx < length:
if labels[idx] == "O":
idx = idx + 1
elif labels[idx].startswith("B-"):
label = labels[idx][2:]
if label.endswith("-*"):
label = label[0:-2]
next_idx = idx + 1
end_idx = idx
while next_idx < length:
if labels[next_idx] == "O" or labels[next_idx].startswith("B-") \
or labels[next_idx].startswith("S-"):
break
next_label = labels[next_idx][2:]
if next_label.endswith("-*"):
next_label = next_label[0:-2]
if next_label != label:
break
end_idx = next_idx
next_idx = next_idx + 1
if end_idx == idx:
new_label = "S-" + labels[idx][2:]
# print("Change %s to %s" % (labels[idx], new_label))
labels[idx] = new_label
normed_labels[idx] = new_label
change = change + 1
idx = end_idx + 1
elif labels[idx].startswith("S-"):
idx = idx + 1
elif labels[idx].startswith("M-"):
new_label = "B-" + labels[idx][2:]
# print("Change %s to %s" % (labels[idx], new_label))
normed_labels[idx] = new_label
labels[idx] = new_label
change = change + 1
else:
new_label = "S-" + labels[idx][2:]
# print("Change %s to %s" % (labels[idx], new_label))
normed_labels[idx] = new_label
labels[idx] = new_label
change = change + 1
return normed_labels, change
def evalInstance(gold, predict):
glength, plength = gold.length, predict.length
if glength != plength:
raise Exception('gold length does not match predict length.')
gold_entity_num, predict_entity_num, correct_entity_num = 0, 0, 0
goldlabels, predictlabels = gold.labels, predict.labels
if gold.span:
gold_entities = label_to_entity(goldlabels)
predict_entities = label_to_entity(predictlabels)
gold_entity_num, predict_entity_num = len(gold_entities), len(predict_entities)
for one_entity in gold_entities:
if one_entity in predict_entities:
correct_entity_num = correct_entity_num + 1
else:
gold_entity_num, predict_entity_num = len(goldlabels), len(predictlabels)
for idx in range(glength):
if goldlabels[idx] == predictlabels[idx]:
correct_entity_num = correct_entity_num + 1
return gold_entity_num, predict_entity_num, correct_entity_num
def readInstance(file):
min_count = 1
total = 0
words = []
for line in file:
tok = line.strip().split('\t')
if not tok or line.strip() == '' or line.strip().startswith('#'):
if len(words) > min_count:
total += 1
yield Sentence(words)
words = []
elif len(tok) == 3:
try:
words.append(Word(int(tok[0]), tok[1], tok[2]))
except Exception:
pass
else:
pass
if len(words) > min_count:
total += 1
yield Sentence(words)
print("Total num: ", total)
def writeInstance(filename, sentences):
with open(filename, 'w') as file:
for sentence in sentences:
for entry in sentence.words:
file.write(str(entry) + '\n')
file.write('\n')
def printInstance(output, sentence):
for entry in sentence.words:
output.write(str(entry) + '\n')
output.write('\n') | [
"mason.zms@gmail.com"
] | mason.zms@gmail.com |
a50a5b14d076ee204bff5cbec2ad62033c1a5479 | eec76ff305838ebc64e2a5199a53c9cdf934e4c0 | /venv/Scripts/pip3-script.py | 61faeb6409f603884ab8c1e08170273b968eac2a | [] | no_license | abbbhardwaj/BeBot | 0011084f57aa5a20605dce79228e0ff22dcf023e | e3296013630b6c7481093bdfa26d9ae61de70587 | refs/heads/master | 2020-03-31T06:12:31.538064 | 2018-10-07T18:30:46 | 2018-10-07T18:30:46 | 151,972,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | #!C:\Users\divya\PycharmProjects\Bbot\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
| [
"abhinav.bhardwaj05@gmail.com"
] | abhinav.bhardwaj05@gmail.com |
ae6e3f2fa193e2f24cc46236bc77b714f5a12766 | 4ff2c34eaadab04217ee1b919c2d8f144f2be0d7 | /学习/test9.py | 1b3adbef4c3126b4ec89bc9fbca09124cd74dec8 | [] | no_license | DCDCBigBig/DianFall2021 | 485db3ac6e48d7b96db4481d332146f044e3447e | 4cc8c8fa34a4be38ee0ee62bb043129af0de6f65 | refs/heads/main | 2023-09-04T03:12:40.534878 | 2021-10-21T16:20:30 | 2021-10-21T16:20:30 | 419,790,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | import numpy as np
import cv2
print(cv2.CV_16U) | [
"ctdingchang23@163.com"
] | ctdingchang23@163.com |
4d59b6d7525d2424cccd3c6215409bdfb7e78f33 | 171a89102edf10901e18a2c0f41c3313608d2324 | /src/rogerthat/bizz/job/unschedule_service_api_callback_records.py | 0a04dd74317213aea6716a58732b45ec57e5498c | [
"Apache-2.0"
] | permissive | gitter-badger/rogerthat-backend | 7e9c12cdd236ef59c76a62ac644fcd0a7a712baf | ab92dc9334c24d1b166972b55f1c3a88abe2f00b | refs/heads/master | 2021-01-18T06:08:11.435313 | 2016-05-11T08:50:20 | 2016-05-11T08:50:20 | 58,615,985 | 0 | 0 | null | 2016-05-12T06:54:07 | 2016-05-12T06:54:07 | null | UTF-8 | Python | false | false | 1,170 | py | # -*- coding: utf-8 -*-
# Copyright 2016 Mobicage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.1@@
from rogerthat.dal.service import get_service_api_callback_records_query
from google.appengine.ext import db, deferred
def run(service_user, cursor=None):
query = get_service_api_callback_records_query(service_user)
query.with_cursor(cursor)
records = query.fetch(100)
put = list()
for rec in records:
rec.timestamp = 0 - abs(rec.timestamp)
put.append(rec)
db.put(put)
if len(records) > 0:
return deferred.defer(run, service_user, query.cursor(), _transactional=db.is_in_transaction())
| [
"bart@mobicage.com"
] | bart@mobicage.com |
9e7c824bb664b478189cd3de400a4248b0012973 | bcb83cd597ac6a6ff06f1594750fb0a1db5024b6 | /opinion_mining/AMCBoot.py | c3778d895f257d33f38f8e8c700cc0c932e3f98a | [] | no_license | sherrylml/Opinion-Mining | 7cd95135ff900ca1f63f9c2056775777983efa9e | 81f15c5744258bc9699af6b688e27d34ba09a7ca | refs/heads/master | 2021-01-25T07:39:46.657978 | 2015-05-31T07:39:04 | 2015-05-31T07:39:04 | 35,430,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,946 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'LML_CH'
__mtime__ = '2015/5/9'
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
from __future__ import division
import math
from numpy.ma import sort,log
import re
import numpy
from scipy import spatial
from numpy import savetxt, loadtxt
import nltk
# from opinion_mining.AMC_preprocess import domain_preprocess
from Opinion_Mining.opinion_mining.AMC_preprocess import domain_preprocess
f = open(r'E:\python_workplace\Opinion_Mining\Data\English_stopwords.txt', encoding='utf-8')
stopwords = set(line.strip() for line in f.readlines()) # 读入停用词
lemitaion = nltk.WordNetLemmatizer()
f.close()
ignorechars = ''',:'.;!()#-./1234567890'''
def pre_proc(C):
C = [w.replace(ignorechars, "") for w in C ]
C = [lemitaion.lemmatize(w) for w in C if w not in stopwords and len(w) >= 3]
C = [lemitaion.lemmatize(w, pos='v') for w in C if w not in stopwords and len(w) >= 3]
C = [w for w in C if w not in stopwords and len(w) >= 3]
return C
def KL_Measure(i, j):
'''
计算KL散度
:return:
'''
KL1 = sum(i*(log(i/j).data))
KL2 = sum(j*(log(j/i).data))
D = (KL1 + KL2)/2
return 1/(1+ math.e ** D )
# return sum(kl_div(i,j))
def lemitate(w):
w = w.replace(ignorechars, "")
w = lemitaion.lemmatize(w)
w = lemitaion.lemmatize(w, pos='v')
return w
def getVocabulary():
f1 = open(r'E:\python_workplace\Opinion_Mining\Data\Nokia 6610\Nokia6610.txt', 'w')
f2 = open(r'E:\python_workplace\Opinion_Mining\Data\Nokia 6610\noun_prase.txt', 'w')
f3 = open(r'E:\python_workplace\Opinion_Mining\Data\Nokia 6610\parse_result.txt', encoding='utf-8')
CF = []
NP = []
flag = 1
w1 = ''
w2 = ''
for line in f3:
line = line.replace("*'", "")
if line.startswith("result:"):
NP = []
temp = []
f2.write('\n') #note: remove the first one \n
f1.write('\n')
elif line.startswith("#"):
if line.startswith("#nn"):
line = re.match(r'.*\((.*)-\d*\'*,\s(.*)-\d*\'*\)$', line).groups()
word = ' '.join([lemitate(line[1]),lemitate(line[0])])
word = ' '.join([line[1],line[0]])
NP.append(word)
f2.write(word + ',')
else:
if line.split("\t")[7] == 'nn':
w1 = line.split("\t")[1]
flag = 0
else:
if flag == 0:
w2 = line.split("\t")[1]
w = ' '.join([w1,w2])
flag = 1
else:
w = line.split("\t")[1]
w = w.replace(ignorechars, "")
if len(w)>2 and w not in stopwords:
w = lemitate(w)
if len(w)>2 and w not in stopwords:
f1.write(w + ',')
f1.close()
f2.close()
f3.close()
domain_preprocess(r'E:\python_workplace\Opinion_Mining\Data\Nokia 6610\Nokia6610.txt',r'E:\eclipse_workplace\AMC\Data\Input\100Reviews\Electronics')
def get_CF():
CF = []
CO = []
CF_N = []
N = []
NN = []
temp = []
root = ''
f = open(r'E:\python_workplace\Opinion_Mining\Data\Nokia 6610\parse_result.txt', encoding='utf-8')
p = open(r'E:\python_workplace\Opinion_Mining\Data\Nokia 6610\noun_phrase.txt',encoding='utf-8')
NP = [line.strip().split(',') for line in p.readlines()]
p.close()
index = 0
for line in f:
line = line.replace("*'", "")
if line.startswith("result:"):
temp = []
N = NP[index]
index += 1
elif line.startswith("#"):
if re.match(r'.*\((.*)-\d*\'*,\s(.*)-\d*\'*\)$', line):
line = re.match(r'.*\((.*)-\d*\'*,\s(.*)-\d*\'*\)$', line).groups()
else:
print(line)
if line[0] == root:
w = line[1]
w = w.replace(ignorechars, "")
# if w in N:
flag = 0
for token in N:
if token.__contains__(w) and token not in temp:
temp.append(token)
CF.append(token)
flag = 0
else:
flag = 1
if flag and w in NN:
CF.append(w)
elif line.split("\t")[7]=='root':
root = line.split("\t")[1]
# w = ''
# if line.startswith("#nsubj") or line.startswith("#pobj") or line.startswith("#dobj"):
# line = re.match(r'.*\((.*)-\d*\'*,\s(.*)-\d*\'*\)$', line).groups()
# w = line[1]
# elif line.startswith("#det"):
# line = re.match(r'.*\((.*)-\d*\'*,\s(.*)-\d*\'*\)$', line).groups()
# w = line[0]
# w = w.replace(ignorechars, "")
# flag = 0
# for token in N:
# if token.__contains__(w) and token not in temp:
# temp.append(token)
# CF.append(token)
# flag = 0
# else:
# flag = 1
# if flag and w in NN and w not in temp:
# temp.append(w)
# CF.append(w)
elif line.__contains__("NN"):
word = line.split("\t")[1]
NN.append(word)
CF_N.append(word)
elif line.__contains__("JJ") or line.__contains__("VB") :
word = line.split("\t")[1]
CO.append(word)
# f = open(r'E:\python_workplace\hai2012\corpus\truefeature.txt', encoding='utf-8')
# TF = []
# for line in f.readlines():
# line.replace(', ',',')
# if ',' in line:
# tmp = line.split(',')
# for t in tmp:
# TF.append(t.strip())
# else:
# TF.append(line.strip())
# CF = TF
# CF = CF_N
addition = ['at&t customer service', 'infrared', 'infrared', 'sprint plan', 'sprint customer service', 'sturdy', 'ringtone', 'background', 'screensaver', 'memory', 'menu options', 't-mobile reception', 't-zone', 't-zone', 't-mobile', 'customer rep', 'call', 'phone performance', 'look', 't-mobile', 'voice dialing', 'message', 'fm', 'operate', 'button', 'key', 'volume', 't-mobile', 'high speed internet', 'ringing tone', 'ring tone', 'game', 'button', 'size', 'size', 'key', 'vibrate setting', 'vibrate setting', 'voice dialing', 'voice dialing', 'picture', 'ringtone', 'key lock', 'ring tone', 'fm radio', 'weight', 'wallpaper', 'tune', 'size', 'size', 'key', 'pc cable', 'loud phone', 'size', 'application', 'pc suite', 'size', 'game', 'ringtone', 'ergonomics', 'size', 'size', 'volume', 'volume', 'size', 'weight', 'ringtone', 'volume', 'weight', 'pc sync', 'tone', 'wallpaper', 'application', 'message', 'picture sharing', 'mms', 'size', 'voice dialing', 'key', 'application', 'size', 'speakerphone', 'look', 'default ringtone', 't-mobile', 'ringtone', 'speakerphone', 'size', 'look', 'weight', 'browsing', 'game', 'battery life', 'voice dialing', 'command', 'button', 'key', 't-mobile', 't-mobile', 'size', 'earpiece', 'voice dialing', 'ringtone', 'gprs', 't-zone', 't-zone', 't-mobile service', 'rate plan', 'weight', 'signal']
CF += addition
return pre_proc(CO),pre_proc(CF)
def seed_mustlinks():
f = open(r'E:\python_workplace\Opinion_Mining\Data\Nokia 6610\Nokia6610.knowl_mustlinks', encoding='utf-8')
links = []
for line in f:
words = re.split("\s",line.strip())
for word in words:
links.append(word)
S = ['phone','headphone']
flag = 1
while(flag):
flag = 0
for index, word in enumerate(links):
if word in S:
if index%2 == 0 and links[index+1] not in S:
S.append(links[index+1])
flag = 1
elif index%2 == 1 and links[index-1] not in S:
S.append(links[index-1])
flag = 1
f.close()
return S
def get_pairwise():
ntopic = 100
# f = open(r'E:\python_workplace\hai2012\corpus\corpus_NP\corpus_NP.twords', encoding='utf-8')
# tword_array = loadtxt(r'E:\python_workplace\hai2012\corpus\corpus_NP\corpus_NP.twdist')
f = open(r'E:\python_workplace\Opinion_Mining\Data\Nokia 6610\Nokia6610.twords', encoding='utf-8')
tword_array = loadtxt(r'E:\python_workplace\Opinion_Mining\Data\Nokia 6610\Nokia6610.twdist')
tword_array = -sort(-tword_array,axis=1)
tword_array = tword_array[:,0:100].transpose()
wdict = {}
for num, line in enumerate(f):
if num == 0:
pass # 忽略标题
else:
words = re.split("\t",line.strip())
dcount = 0
for w in words:
if w in wdict:
wdict[w].append((num-1,dcount))
elif len(w)>1:
wdict[w] = [(num-1,dcount)]
dcount += 1
f.close()
print (wdict)
keys = [k for k in wdict.keys()]
keys.sort()
print (keys)
# w_t = numpy.zeros([len(keys), ntopic])
w_t = numpy.ones([len(keys), ntopic]) * 0.000001
for i, k in enumerate(keys):
for d in wdict[k]:
w_t[i,d[1]] = tword_array[d[0]][d[1]]
print(w_t)
print(w_t.size)
pairwise = spatial.distance.squareform(spatial.distance.pdist(w_t, metric = "cosine"))
# pairwise = spatial.distance.squareform(spatial.distance.pdist(w_t, lambda i,j: KL_Measure(i, j)))
pairwise_filename = r'../Data/pairwise.txt'
savetxt(pairwise_filename, pairwise, fmt='%.8f')
print (pairwise)
print (pairwise.size)
return keys, pairwise
keys,pairwise = get_pairwise()
def A(x, y):
if x in keys:
i = keys.index(x)
else:
# print(x)
return 1
if y in keys:
j = keys.index(y)
else :
# print(y)
return 1
return pairwise[i,j]
def getCommonWords():
'''
调用DomainRelevace.py计算领域相关性低的词为common words
outdomain的数据集太大,结果先手写设定
:return:
'''
CommonWords = ['people','thing','year','hour','minute','time','motorola','samsung','s105','number','house','cell','night','number']
return CommonWords
def main():
print ("result***********")
threth_list = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99,1,2]
# threth_list = [0]
for threth in threth_list:
print ("threth=", threth)
CO,CF = get_CF()
# CF = sum(C,[])
print("##### CF,CO #####")
print(CF)
print(CO)
# S = seed_mustlinks()
S = ['phone','headphone']
# print("##### S #####")
# print(S)
F = []
O = []
ffth = threth
foth = threth
ooth = threth
flag = 0
while (flag == 0):
flag = 1
for f in S:
for cf in CF:
if A(f, cf) <= ffth:
S.append(cf)
F.append(cf)
CF.remove(cf)
flag = 0
for co in CO:
if A(f, co) <= foth:
O.append(co)
CO.remove(co)
flag = 0
for o in O:
for co in CO:
if A(o, co) <= ooth:
O.append(co)
CO.remove(co)
flag = 0
for cf in CF:
if A(o, cf) <= foth:
S.append(cf)
F.append(cf)
CF.remove(cf)
flag = 0
CommonWords = getCommonWords()
F = [item for item in F if item not in CommonWords]
print (F)
print (O)
f1 = open(r'E:\python_workplace\Opinion_Mining\Data\Nokia 6610\feature_amc.txt', 'w')
for feature in F:
f1.writelines(feature + '\n')
f1.close()
f = open(r'E:\python_workplace\Opinion_Mining\Data\Nokia 6610\true_feature.txt', encoding='utf-8')
TF = []
for line in f.readlines():
line.replace(', ',',')
if ',' in line:
tmp = line.split(',')
for t in tmp:
TF.append(t.strip())
else:
TF.append(line.strip())
# print (TF)
print (len(TF))
print (len(F))
TP = 0
FP = 0
# FN = 0
test = []
for cf in F:
if cf in TF:
TP += 1
test.append(cf)
TF.remove(cf)
else:
FP += 1
FN = len(TF)
print (test)
print (TF)
# for tf in TF:
# if tf not in F:
# FN += 1
precision = TP/(TP+FP)
recall = TP/(TP + FN)
print (TP,FP,FN)
print ('p=%f'% precision)
print ('r=%f'% recall)
f=(2*precision*recall)/(precision+recall)
print ('F=%f' % f)
# opinion word's extraction result
print("opinion word:")
p = open(r'E:\python_workplace\Opinion_Mining\Data\Nokia 6610\true_opinion.txt', encoding='utf-8')
TO = []
for line in p.readlines():
line.replace(', ',',')
if ',' in line:
tmp = line.split(',')
for t in tmp:
TO.append(t.strip())
else:
TO.append(line.strip())
print (len(TO))
print (len(O))
TP = 0
FP = 0
test = []
for co in O:
if co in TO:
TP += 1
test.append(co)
TO.remove(co)
else:
FP += 1
FN = len(TO)
if(TP):
precision = TP/(TP+FP)
recall = TP/(TP + FN)
print (TP,FP,FN)
print ('p=%f'% precision)
print ('r=%f'% recall)
f=(2*precision*recall)/(precision+recall)
print ('F=%f' % f)
if __name__ == "__main__":
# getVocabulary()
# domain_preprocess(r'E:\python_workplace\Opinion Mining (LML)\Data\Nokia 6610\Nokia6610.txt',r'E:\eclipse_workplace\AMC\Data\Input\100Reviews\Electronics')
main() | [
"sherrylml@126.com"
] | sherrylml@126.com |
7e72fb11137d1cc82500a43c590445b6d4222f54 | 11334e46d3575968de5062c7b0e8578af228265b | /Projects/subsumption_lewis/test_escape_behavior.py | 4a60ca86e27a79e1aadad1e7cc150c9a55c47a09 | [] | no_license | slowrunner/Carl | 99262f16eaf6d53423778448dee5e5186c2aaa1e | 1a3cfb16701b9a3798cd950e653506774c2df25e | refs/heads/master | 2023-06-08T05:55:55.338828 | 2023-06-04T02:39:18 | 2023-06-04T02:39:18 | 145,750,624 | 19 | 2 | null | 2023-06-04T02:39:20 | 2018-08-22T18:59:34 | Roff | UTF-8 | Python | false | false | 1,653 | py | #!/usr/bin/env python3
"""
FILE: test_escape_behavior.py
PURPOSE: Test an subsumption architecture escape behavior
REFERENCES:
"Mobile Robots: Inspiration To Implementation", Jones, Flynn, Seiger p318
"""
import subsumption
import time
import logging
subsumption.inhibit_scan = False
subsumption.inhibit_drive = False
subsumption.TALK = False
def stop():
subsumption.mot_trans = 0
subsumption.mot_rot = 0
time.sleep(3)
def test_escape_behavior():
logging.info("==== TEST ESCAPE BEHAVIOR ====")
subsumption.say("Escape Behavior Test Will Begin In 5 seconds")
time.sleep(5)
try:
while True:
time.sleep(1.0)
except KeyboardInterrupt:
logging.info("==== ESCAPE BEHAVIOR TEST COMPLETE ====")
subsumption.say("Escape Behavior Test Complete")
# MAIN
def main():
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(funcName)s: %(message)s')
logging.info("==== TEST SUBSUMPTION ====")
subsumption.say("Test subsumption.")
try:
subsumption.setup()
# while True:
# do main things
test_escape_behavior()
except KeyboardInterrupt:
print("")
msg="Ctrl-C Detected in Main"
logging.info(msg)
subsumption.say(msg)
except Exception as e:
logging.info("Handling main exception: %s",e)
finally:
subsumption.teardown()
logging.info("==== Subsumption Test Done ====")
subsumption.say("Subsumption test done")
if __name__ == "__main__":
main()
| [
"slowrunner@users.noreply.github.com"
] | slowrunner@users.noreply.github.com |
1c26b5ba9c27247a493c7583c13c134bfa1408ee | bf36ea2c2175745bcb1cbfaba3afe77332f2ee3e | /src/scripts/stock_db_test.py | a1ec4272a69e3eb14666a491f699d38711bb6217 | [] | no_license | SymPiracha/Stocks-Dashboard | 80783f531b37d1b22234582efc561b43f2ce941d | 8b73668bfa27d5dce05d9919a2126f18880a2bc1 | refs/heads/main | 2023-02-28T06:18:50.373216 | 2021-01-31T10:48:44 | 2021-01-31T10:48:44 | 334,278,808 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | from iexfinance.stocks import get_historical_data
from datetime import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
api_key = 'pk_495c80fadacc450e8d8912f83b9d4053' #got the token from the iex finance account.
today = datetime.now().strftime('%Y-%m-%d') #stored the date when the function is called.
temp = (datetime.now() - timedelta(12)).strftime('%Y-%m-%d') #stores the last 12 days of change.
df = get_historical_data("TSLA", temp, today,token=api_key) #using the api to access the data for a particular stock e.g TSLA
df = df .iloc[3:] # removed the first 3 rows because we are dealing with 5 previous days. (Used 6 to calculate the %change for the 5th day)
df1 = df[['label','close','volume']]
df1.columns = ['date','close','volume']
price_change = []
volume_change = []
#running the forloop to add new columns after computation of these columns namely : %volume_change and %price_change.
for i in range(5):
old_price = int(df1.iat[i,1])
new_price = int(df1.iat[i+1,1])
old_volume = int(df1.iat[i,2])
new_volume = int(df1.iat[i+1,2])
volume_change.append((new_volume-old_volume)/(old_volume)*100)
price_change.append((new_price-old_price)/(old_price) * 100)
df1 = df1.iloc[1:]
df1['%volume_change'] = volume_change
df1['%price_change'] = price_change
print(plt.plot(df1['date'],df1['%price_change'],df1['%volume_change']))
#print(df1)
| [
"ibrahimnaveed@Ibrahims-MacBook-Air.local"
] | ibrahimnaveed@Ibrahims-MacBook-Air.local |
dee1ec3736ca68ab8e05ad1f1f8b5418a5663a9a | 5a333195fbed1201864ba08beb3582c954518590 | /sort/quick_sort.py | e7476688fe6011fada2513b0d9d95cfe60758c80 | [] | no_license | ljke/algorithm-py | b2bba2020dd3e058fb3b17e20bdc19c3e30fa12a | 12bd7ad8778188dc4184e57711a7d21c566f42cd | refs/heads/master | 2020-04-12T04:01:33.984769 | 2019-04-25T08:23:27 | 2019-04-25T08:23:27 | 162,282,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | # coding=utf-8
# 快速排序
# 选取基准值,根据与基准值的大小关系对数据分区
# 递归调用,对分区也进行快速排序
# 非原地快排
def quick_sort(arr):
if len(arr) < 2:
return arr
else:
pivot = arr[0]
less = [i for i in arr[1:] if i <= pivot]
greater = [i for i in arr[1:] if i > pivot]
return quick_sort(less) + [pivot] + quick_sort(greater)
# 原地快排
def quick_sort_opti(arr):
if len(arr) < 2:
return arr
else:
pivot = arr[-1]
i = 0
for j in range(len(arr) - 1):
if arr[j] < pivot:
arr[i], arr[j] = arr[j], arr[i] # 交换元素
i += 1
arr[i], arr[-1] = arr[-1], arr[i]
return quick_sort_opti(arr[0:i]) + [pivot] + quick_sort_opti(arr[i+1:])
if __name__ == '__main__':
test = [1, 4, 5, 3, -2, 10, 9]
print quick_sort(test)
test = [1, 4, 5, 3, -2, 10, 9]
print quick_sort_opti(test)
| [
"ljke1995@gmail.com"
] | ljke1995@gmail.com |
c42ee65059fd84127b788c9f61f22f6091572c64 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/hoework01/gettop10frommaoyam01_20200625172155.py | 6673c7bd655b35c14f885d7566123eee9d12b9b9 | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 1,024 | py | # 使用requests,bs4库,爬取猫眼电影top10的电影名称、电影类型、上映时间,并以utf-8的字符集保存到csv文件中
import requests
maoyanUrl = "https://maoyan.com/films?showType=3";
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Accept': "*/*",
'Accept-Encoding': 'gazip, deflate, br',
'Accept-Language': 'en-AU,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,la;q=0.6',
'Content-Type': 'text/plain',
'Connection': 'keep-alive',
# 'Host': 'wreport1.meituan.net',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/films?showType=3',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'cross-site',
}
response = requests.get(maoyanUrl,headers=header)
response.encoding = 'utf-8'
print(response.text) | [
"31039587+ydbB@users.noreply.github.com"
] | 31039587+ydbB@users.noreply.github.com |
58fa54923fd2634f3278314421dd4b2484103e88 | 13f4dba586d98b872d2b815fe74026a852a657e3 | /python/server/backends/twitter_helper.py | 0cbb5d0665b65fd760fca0fd5d4e0f803018d837 | [
"Apache-2.0"
] | permissive | henrypan/searchhub | 6514bb0386605efa0e8046816c0bcc171565af04 | aba1418a1016b00b6f4ba366c3c3d6553e64965c | refs/heads/master | 2023-02-21T05:07:56.325515 | 2016-07-08T22:36:57 | 2016-07-08T22:36:57 | 63,719,853 | 0 | 0 | NOASSERTION | 2023-02-15T04:22:41 | 2016-07-19T18:53:11 | Python | UTF-8 | Python | false | false | 3,350 | py | from server import app
from schedule_helper import create_schedule
import twitter
import json
'''
Helper class for creating Twitter data sources
'''
def create_twitter_datasource_configs(project):
"""
Generate the Twitter data source config for a given project
:param project: the project
:returns: the configuration dictionary
"""
if app.config.get('TWITTER_CONSUMER_KEY') is None:
print "No Twitter config set, skipping"
return None
try:
twitter_api = twitter.Api(consumer_key=app.config.get('TWITTER_CONSUMER_KEY'),
consumer_secret=app.config.get('TWITTER_CONSUMER_SECRET'),
access_token_key=app.config.get('TWITTER_ACCESS_TOKEN'),
access_token_secret=app.config.get('TWITTER_TOKEN_SECRET'))
except:
print "Unable to connect to Twitter, skipping"
return None
config = {
'id': "twitter-{0}".format(project["name"]),
'connector': "lucid.twitter.stream",
'pipeline': project["twitter_pipeline"],
'type': "twitter_stream",
'properties': {
'collection': app.config.get('FUSION_COLLECTION'),
'consumer_key': app.config.get('TWITTER_CONSUMER_KEY'),
'consumer_secret': app.config.get('TWITTER_CONSUMER_SECRET'),
'access_token': app.config.get('TWITTER_ACCESS_TOKEN'),
'token_secret': app.config.get('TWITTER_TOKEN_SECRET'),
'initial_mapping': {
'mappings': [
# Add fields
{"source": "project", "target": project["name"], "operation": "set"},
{"source": "project_label", "target": project["label"], "operation": "set"},
{"source": "datasource_label", "target": project["label"] + " Twitter", "operation": "set"},
{"source": "source_s", "target": "twitter", "operation": "set"},
{"source": "isBot", "target": "false", "operation": "set"},
# People names
{"source": "userName", "target": "person_ss", "operation": "copy"},
{"source": "userMentionName", "target": "person_ss", "operation": "copy"},
{"source": "person_ss", "target": "person_t", "operation": "copy"},
{"source": "userMentionScreenName", "target": "person_t", "operation": "copy"},
{"source": "userScreenName", "target": "person_t", "operation": "copy"},
# Author
{"source": "userName", "target": "author_s", "operation": "move"},
{"source": "author_s", "target": "author_t", "operation": "copy"},
{"source": "userScreenName", "target": "author_t", "operation": "copy"},
# Other stuff
{"source": "createdAt", "target": "publishedOnDate", "operation": "move"},
{"source": "tweet", "target": "content_t", "operation": "move"},
{"source": "tagText", "target": "tags_ss", "operation": "move"},
{"source": "tags_ss", "target": "tags_t", "operation": "copy"}
]
},
'filter_follow': [],
'filter_track': [],
'filter_locations':[]
}
}
for follow in project["twitter"]["follows"]:
print follow
if follow[0] == '@':
user = twitter_api.GetUser(screen_name=follow)
#print user.id
config['properties']['filter_follow'].append("" + str(user.id))
else:
config['properties']['filter_track'].append(follow)
return config
| [
"gsingers@apache.org"
] | gsingers@apache.org |
51d3ea922f240f9e7f32578863efe5c191e052d6 | 4bdb59c52bc98a94dd191d06f3a4db9ef9b640be | /Py/Web/scrap4.py | 88fa14a7ae8ad6708094027a12dfb38e7775f85c | [] | no_license | aksaba/MyCodes | 860ea121a87c49361bca3b80e5a10e04f9399053 | ebad5b6cdcde903c4be1107b32f4bca2a5e5a0e8 | refs/heads/master | 2023-03-19T11:17:44.438495 | 2023-03-07T09:21:36 | 2023-03-07T09:21:36 | 126,673,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | import requests
url = "http://duckduckgo.com/html"
payload = {'q':'x'}
r = requests.post(url, payload)
with open("requests_results.html", "w") as f:
f.write(r.content)
| [
"aksabapathy@gmail.com"
] | aksabapathy@gmail.com |
dc342444308955de1ce5c6fc868ec96bd56f7d1a | 8d05ef4a66e89508ecb42297fb02aae3ba79d3e5 | /Black_Jack.py | eed7b40d3e5e09631cf116700abdda1a5fd9d476 | [] | no_license | OxyKerad/Black-Jack | 62859135b029cc6c1caf1e2b5c23275ee737125d | 3428f03366600c8aafcf4155739426018fc6203a | refs/heads/master | 2020-03-19T18:13:39.257062 | 2018-06-10T11:24:30 | 2018-06-10T11:24:30 | 136,800,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,601 | py | import random
import os
class Cards(object):
cards = (
'2_pik', '2_kier', '2_trefl', '2_karo', '3_pik', '3_kier', '3_trefl', '3_karo', '4_pik', '4_kier', '4_trefl',
'4_karo', '5_pik', '5_kier', '5_trefl', '5_karo', '6_pik', '6_kier', '6_trefl', '6_karo', '7_pik', '7_kier',
'7_trefl', '7_karo','8_pik', '8_kier', '8_trefl', '8_karo', '9_pik', '9_kier', '9_trefl', '9_karo', '10_pik',
'10_kier', '10_trefl', '10_karo', 'J_pik', 'J_kier', 'J_trefl', 'J_karo', 'D_pik', 'D_kier', 'D_trefl', 'D_karo',
'K_pik', 'K_kier', 'K_trefl', 'K_karo', 'AS_pik', 'AS_kier', 'AS_trefl', 'AS_karo')
def pick_card(self):
return random.choice(self.cards)
def pick_cards(self):
return random.sample(self.cards, 2)
class Player(Cards):
def __init__(self, bankroll=100, score=0, player_cards=[], bet=0):
self.player_cards = player_cards
self.score = score
self.bankroll = bankroll
self.bet = bet
def count_score(self, used_card):
self.score = 0
for i in used_card:
point = i.split('_')[0]
if (point == 'J') or (point == 'D') or (point == 'K'):
point = 10
elif point == 'AS':
point = 11
self.score += int(point)
return self.score
def chceck_bankroll(self, result):
if result == "YOU WON":
self.bankroll = self.bankroll + self.bet
elif result == "YOU LOST":
self.bankroll = self.bankroll - self.bet
return self.bankroll
def player_result(self, ob, result='N'):
ob.score = ob.count_score(ob.player_cards)
if result != 'N':
if ob.chceck_bankroll(result) <= 0:
print("You are bankrupt. \nTry again")
exit()
return (
"have {} and have {} score. You bet is {}$. Your bank roll is {} $".format(self.player_cards, self.score,
self.bet, self.bankroll)
if self.bet != 0 else "have {} and have {} score.".format(self.player_cards, self.score))
def check_win(score_bob, score_croupier):
if score_bob == 21 or score_croupier > 21 or score_bob > score_croupier and score_bob < 21:
return "YOU WON"
elif score_croupier == 21 or score_bob > 21 or score_bob < score_croupier:
return "YOU LOST"
elif score_bob == score_croupier:
return "TIE"
def check_21(bob, croupier):
if bob.score == 21:
return ("Bob " + bob.player_result(bob) + " You WON")
elif croupier.score == 21:
return ("Croupier " + croupier.player_result(croupier) + " He WON")
else:
return 0
def next_pick_up(used_card, decision='N'):
deck = Cards()
deck_cards = list(deck.cards)
for i in used_card:
deck_cards.remove(i)
if decision == 'H':
next_cards = random.sample(deck_cards, 1)
else:
next_cards = random.sample(deck_cards, 2)
return next_cards
def player_input():
plin = input("Write H for Hit or S for Stand. Your choice: ").upper()
if plin == 'H':
return plin
elif plin == 'S':
return plin
else:
print("Invalid input. Enter H or S: ")
player_input()
def play_again():
plin = input("Do you wanna play again Y/N? Your choice: ").upper()
if plin == 'Y':
os.system('cls')
return plin
elif plin == 'N':
print('Thanks for playing!')
exit()
else:
print("Invalid input. Enter Y for yes or N for no: ")
play_again()
def start_game(bob=Player()):
if bob.score == 0:
print("Welcome in Blackjack game. You have 100 $ and the bet is for 10 $. Have fun and good luck!\n\n")
bob.bet = 10
bob.player_cards = bob.pick_cards()
bob.score = bob.count_score(bob.player_cards)
croupier = Player()
croupier.player_cards = next_pick_up(bob.player_cards)
croupier.score = croupier.count_score(croupier.player_cards)
test_21 = check_21(bob, croupier)
if test_21:
print(test_21)
play_again()
else:
print("Bob", bob.player_result(bob))
print("Croupier have {} and unsigned card ".format(croupier.player_cards[0]))
play_game(bob, croupier)
def play_game(bob, croupier):
plin = player_input()
if plin == 'H':
bob.player_cards += next_pick_up(bob.player_cards + croupier.player_cards, plin)
print("Bob", bob.player_result(bob))
play_game(bob, croupier) if bob.score <= 21 else print(check_win(bob.score, croupier.score),
"\nBob", bob.player_result(bob, check_win(bob.score,
croupier.score)),
"\nCroupier", croupier.player_result(croupier))
elif plin == 'S':
if croupier.score <= 11:
croupier.player_cards += next_pick_up(bob.player_cards + croupier.player_cards, 'H')
croupier.count_score(croupier.player_cards)
print(check_win(bob.score, croupier.score), "\nBob",
bob.player_result(bob, check_win(bob.score, croupier.score)), "\nCroupier",
croupier.player_result(croupier))
if play_again() == 'Y':
start_game(bob)
start_game() | [
"darek.belz@gmail.com"
] | darek.belz@gmail.com |
7b95fcc33b3aa2249ed1f27138745f475927c2d6 | cf14b6ee602bff94d3fc2d7e712b06458540eed7 | /gs82/gs82/urls.py | 0aecc6d4eeb66d7fa733fff9c8bcaddef8e0841a | [] | no_license | ManishShah120/Learning-Django | 8b0d7bfe7e7c13dcb71bb3d0dcdf3ebe7c36db27 | 8fe70723d18884e103359c745fb0de5498b8d594 | refs/heads/master | 2023-03-29T09:49:47.694123 | 2021-03-28T16:04:34 | 2021-03-28T16:04:34 | 328,925,596 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from django.contrib import admin
from django.urls import path
from enroll import views
from django.views.decorators.cache import cache_page
urlpatterns = [
path('admin/', admin.site.urls),
path('', cache_page(30)(views.home)),
path('home/', views.home),
path('contact/', views.contact),
]
| [
"mkshah141@gmail.com"
] | mkshah141@gmail.com |
72b89b38c0e3aa5b4434dab787a84864f5016e07 | 36e1bb79968425e0095b18c267e0f178c724b065 | /src/lexer.py | 28f53c8075ef7ea617a2de2cbfec6387632933c7 | [] | no_license | nashrul-8/LIYN-Language | 34a543410608c0a7885161c5799ef033c5d7f626 | 9ffc6f524976992c86e27094b0ef9236d3893d2c | refs/heads/master | 2022-10-17T05:30:16.828141 | 2020-06-12T15:51:23 | 2020-06-12T15:51:23 | 251,566,219 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | from sly import Lexer
class BasicLexer(Lexer):
tokens = {NAME, NUMBER, STRING, IF, THEN,
ELSE, FOR, FUN, TO, ARROW, EQEQ, PRINT}
ignore = '\t '
literals = {'=', '+', '-', '/', '*', '(', ')', ',', ';'}
# Mendefinisikan token
IF = r'JIKA'
THEN = r'MAKA'
ELSE = r'LAINNYA'
FOR = r'UNTUK'
FUN = r'FUNGSI'
TO = r'KE'
PRINT = r'CETAK'
ARROW = r'->'
NAME = r'[A-Za-z_][a-zA-Z0-9_]*'
STRING = r'\".*?\"'
EQEQ = r'=='
@_(r'\d+')
def NUMBER(self, t):
t.value = int(t.value)
return t
@_(r'#.*')
def COMMENT(self, t):
pass
@_(r'\n+')
def newline(self, t):
self.lineno = t.value.count('\n')
| [
"noreply@github.com"
] | nashrul-8.noreply@github.com |
52a1d88819372454adbfb076fdc2c7690c58f356 | 31d79a7b2b79a83ae21ec7d2c850bd39b79a8ddc | /CeVExercicios/ex099 - Proff.py | e5bd29f8b7f5ecd7626f8000276ed76dc39e6b5b | [
"MIT"
] | permissive | brunnossanttos/exercicios-intro-python3 | a36fc5f0fdafbb1db1b22c6cf107654858da53d3 | 9d6630770af8fdd759441de78d1a5c824197f874 | refs/heads/main | 2023-07-25T00:36:46.068756 | 2021-08-26T13:43:49 | 2021-08-26T13:43:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | from time import sleep
def maior(* num):
cont = maior = 0
print('\nAnalisando os valores passados... ')
for valor in num:
print(f'{valor}', end=' ')
sleep(0.4)
if cont == 0:
maior = valor
else:
if valor > maior:
maior = valor
cont += 1
print()
print(f'\nForam informados {cont} valores ao todo.')
print(f'O maior valor informando foi {maior}.')
# Programa Principal
maior(2, 9, 4, 5, 7, 1)
maior(4, 7, 0)
maior(1, 2)
maior(6)
maior()
| [
"85589872+brunnossanttos@users.noreply.github.com"
] | 85589872+brunnossanttos@users.noreply.github.com |
f7a3955559d747fba8970c5e5ee6fd29663aca62 | d999ee6aa45752c17056a271de5a7cfe36ddcf23 | /venv/Lib/site-packages/pyLibrary/queries/es14/format.py | 5e7979c500261a17336f04aab204f23abe662ef2 | [] | no_license | Parsav/Python | 89f22b22e0106a66b0235b5e9997647045761dfe | 6ff924c150dc14a8a9a51e1c1e20bcc250469d84 | refs/heads/master | 2022-12-28T22:43:45.177446 | 2017-02-06T21:31:21 | 2017-02-06T21:31:21 | 81,127,425 | 0 | 1 | null | 2022-11-29T02:38:03 | 2017-02-06T20:06:46 | Python | UTF-8 | Python | false | false | 8,101 | py | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from collections import Mapping
from pyLibrary import convert
from pyLibrary.collections.matrix import Matrix
from pyLibrary.debugs.logs import Log
from pyDots import Data, set_default, coalesce, wrap, split_field, Null
from pyLibrary.queries.containers.cube import Cube
from pyLibrary.queries.es14.aggs import count_dim, aggs_iterator, format_dispatch, drill
from pyLibrary.queries.expressions import TupleOp
def format_cube(decoders, aggs, start, query, select):
new_edges = count_dim(aggs, decoders)
dims = []
for e in new_edges:
if isinstance(e.value, TupleOp):
e.allowNulls = False
if e.allowNulls is False:
extra = 0
else:
extra = 1
dims.append(len(e.domain.partitions)+extra)
dims = tuple(dims)
matricies = [(s, Matrix(dims=dims, zeros=s.default)) for s in select]
for row, coord, agg in aggs_iterator(aggs, decoders):
for s, m in matricies:
try:
v = _pull(s, agg)
m[coord] = v
except Exception, e:
Log.error("", e)
cube = Cube(query.select, new_edges, {s.name: m for s, m in matricies})
cube.frum = query
return cube
def format_cube_from_aggop(decoders, aggs, start, query, select):
agg = drill(aggs)
matricies = [(s, Matrix(dims=[], zeros=s.default)) for s in select]
for s, m in matricies:
m[tuple()] = _pull(s, agg)
cube = Cube(query.select, [], {s.name: m for s, m in matricies})
cube.frum = query
return cube
def format_table(decoders, aggs, start, query, select):
new_edges = count_dim(aggs, decoders)
header = new_edges.name + select.name
def data():
dims = tuple(len(e.domain.partitions) + (0 if e.allowNulls is False else 1) for e in new_edges)
is_sent = Matrix(dims=dims, zeros=0)
for row, coord, agg in aggs_iterator(aggs, decoders):
is_sent[coord] = 1
output = [d.get_value(c) for c, d in zip(coord, decoders)]
for s in select:
output.append(_pull(s, agg))
yield output
# EMIT THE MISSING CELLS IN THE CUBE
if not query.groupby:
for c, v in is_sent:
if not v:
record = [d.get_value(c[i]) for i, d in enumerate(decoders)]
for s in select:
if s.aggregate == "count":
record.append(0)
else:
record.append(None)
yield record
return Data(
meta={"format": "table"},
header=header,
data=list(data())
)
def format_table_from_groupby(decoders, aggs, start, query, select):
header = [d.edge.name for d in decoders] + select.name
def data():
for row, coord, agg in aggs_iterator(aggs, decoders):
output = [d.get_value_from_row(row) for d in decoders]
for s in select:
output.append(_pull(s, agg))
yield output
return Data(
meta={"format": "table"},
header=header,
data=list(data())
)
def format_table_from_aggop(decoders, aggs, start, query, select):
header = select.name
agg = drill(aggs)
row = []
for s in select:
row.append(_pull(s, agg))
return Data(
meta={"format": "table"},
header=header,
data=[row]
)
def format_tab(decoders, aggs, start, query, select):
table = format_table(decoders, aggs, start, query, select)
def data():
yield "\t".join(map(convert.string2quote, table.header))
for d in table.data:
yield "\t".join(map(convert.string2quote, d))
return data()
def format_csv(decoders, aggs, start, query, select):
table = format_table(decoders, aggs, start, query, select)
def data():
yield ", ".join(map(convert.string2quote, table.header))
for d in table.data:
yield ", ".join(map(convert.string2quote, d))
return data()
def format_list_from_groupby(decoders, aggs, start, query, select):
def data():
for row, coord, agg in aggs_iterator(aggs, decoders):
output = Data()
for g, d in zip(query.groupby, decoders):
output[g.name] = d.get_value_from_row(row)
for s in select:
output[s.name] = _pull(s, agg)
yield output
output = Data(
meta={"format": "list"},
data=list(data())
)
return output
def format_list(decoders, aggs, start, query, select):
new_edges = count_dim(aggs, decoders)
def data():
dims = tuple(len(e.domain.partitions) + (0 if e.allowNulls is False else 1) for e in new_edges)
is_sent = Matrix(dims=dims, zeros=0)
for row, coord, agg in aggs_iterator(aggs, decoders):
is_sent[coord] = 1
output = Data()
for e, c, d in zip(query.edges, coord, decoders):
output[e.name] = d.get_value(c)
for s in select:
output[s.name] = _pull(s, agg)
yield output
# EMIT THE MISSING CELLS IN THE CUBE
if not query.groupby:
for c, v in is_sent:
if not v:
output = Data()
for i, d in enumerate(decoders):
output[query.edges[i].name] = d.get_value(c[i])
for s in select:
if s.aggregate == "count":
output[s.name] = 0
yield output
output = Data(
meta={"format": "list"},
data=list(data())
)
return output
def format_list_from_aggop(decoders, aggs, start, query, select):
agg = drill(aggs)
if isinstance(query.select, list):
item = Data()
for s in select:
item[s.name] = _pull(s, agg)
else:
item = _pull(select[0], agg)
if query.edges or query.groupby:
return wrap({
"meta": {"format": "list"},
"data": [item]
})
else:
return wrap({
"meta": {"format": "value"},
"data": item
})
def format_line(decoders, aggs, start, query, select):
list = format_list(decoders, aggs, start, query, select)
def data():
for d in list.data:
yield convert.value2json(d)
return data()
set_default(format_dispatch, {
None: (format_cube, format_table_from_groupby, format_cube_from_aggop, "application/json"),
"cube": (format_cube, format_cube, format_cube_from_aggop, "application/json"),
"table": (format_table, format_table_from_groupby, format_table_from_aggop, "application/json"),
"list": (format_list, format_list_from_groupby, format_list_from_aggop, "application/json"),
# "csv": (format_csv, format_csv_from_groupby, "text/csv"),
# "tab": (format_tab, format_tab_from_groupby, "text/tab-separated-values"),
# "line": (format_line, format_line_from_groupby, "application/json")
})
def _pull(s, agg):
"""
USE s.pull TO GET VALUE OUT OF agg
:param s: THE JSON EXPRESSION SELECT CLAUSE
:param agg: THE ES AGGREGATE OBJECT
:return:
"""
p = s.pull
if not p:
Log.error("programmer error")
elif isinstance(p, Mapping):
return {k: _get(agg, v, None) for k, v in p.items()}
else:
return _get(agg, p, s.default)
def _get(v, k, d):
for p in split_field(k):
try:
v = v.get(p)
if v is None:
return d
except Exception:
v = [vv.get(p) for vv in v]
return v
| [
"parker.lrrd@gmail.com"
] | parker.lrrd@gmail.com |
3d987b5cc0963702e101d7203d9f854c5047bad2 | 7bfcb5cfd015e9c36c60962555f1033caaee1a02 | /test.py | d4bec4dbda235606afbadb4ef2d274bfe11046eb | [] | no_license | LeoCCR/TPM_analysis | 9ab749a3e2537eb46f49bb4e94fb3bc93cc7cdc2 | 497ed7492fbb6ee69b65d952862582d6dacae347 | refs/heads/master | 2023-01-13T07:36:48.206128 | 2020-11-22T13:29:12 | 2020-11-22T13:29:12 | 311,665,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # %%
from operator import le
import numpy as np
import numpy.ma as ma
from random import randrange
x = np.array([float(randrange(1, 5)) for _ in range(10)])
y = np.array([float(randrange(1, 5)) for _ in range(10)])
# %%
x[x == 1] = np.nan
y[y == 1] = np.nan
print(x)
print(y)
print(abs(np.ma.corrcoef(np.ma.masked_invalid(x), np.ma.masked_invalid(y))[0, 1]))
# %%
x = ma.masked_invalid(x)
qa = ma.std(x)
# %%
print(x)
print(x[0])
print(qa)
| [
"private@private.com"
] | private@private.com |
85fe7d6260a2e8eaa0bf6dfb8a880ca9f9c8aecd | cbad375d39bf673c6a5ddcb2af33c53e5cc47494 | /cluster/affinity.py | 25c75c47cc026c355a31bd00e446f4eb56004a76 | [] | no_license | vambati/textcentral | f68640005ffb197797bbf5c0bec52436eb1903ce | 0fb29c4c092510e4ec7beeca2d184ba3da43f751 | refs/heads/master | 2021-01-20T11:13:28.324284 | 2014-05-15T20:53:56 | 2014-05-15T20:53:56 | 67,584,699 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,489 | py | import numpy as np
import sys
import csv
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
# Text proc
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import codecs
def read_text_file(inpFile,delim):
f = open(inpFile, "r")
ylabels = []
tsvData = []
for s in f:
try:
u = s.encode('utf-8')
label,line = u.split(delim)
ylabels.append(label)
tsvData.append(line)
except:
pass
return tsvData,ylabels
##############################################################################
# Generate sample data
X,labels = read_text_file(sys.argv[1],"\t")
vectorizer = CountVectorizer()
transformer = TfidfTransformer()
X = vectorizer.fit_transform(X)
# Arra-ize
X = X.toarray()
#y = np.array(labels)
print "Affinity Clustering..."
print X
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation().fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import pylab as pl
from itertools import cycle
pl.close('all')
pl.figure(1)
pl.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
pl.plot(X[class_members, 0], X[class_members, 1], col + '.')
pl.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
pl.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.show()
| [
"vambati@paypal.com"
] | vambati@paypal.com |
9d86ebf7c63297ed3b76e1cc209398efa04eb21b | cf88d610b991925259bd43930b59c16954e75c85 | /spectral_clustering.py | 11674979c587bfe0070e7884ab6ddfb44afd3774 | [] | no_license | jingxianWang9401/clustering-algorithm-model | 5f4e171e39c402d6e443d37ac519cd05e6876b97 | 510ab4dafe4e14812f1155dae630e591f609d5e1 | refs/heads/master | 2023-01-24T07:45:52.792289 | 2020-11-23T03:01:49 | 2020-11-23T03:01:49 | 315,184,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,095 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 19 14:42:07 2020
@author: wangjingxian
"""
from sklearn.datasets.samples_generator import make_blobs
from sklearn.cluster import spectral_clustering
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from itertools import cycle ##python自带的迭代器模块
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
'''
##产生随机数据的中心
centers = [[1, 1], [-1, -1], [1, -1]]
##产生的数据个数
n_samples=3000
##生产数据
X, lables_true = make_blobs(n_samples=n_samples, centers= centers, cluster_std=0.6,
random_state =0)
'''
#data=pd.read_csv('E:\data_mining\loudian_problem\data\dataset3.csv')
#X=data.ix[:,7]
data=pd.read_csv('E:\data_mining\eye_classification\data\eeg_train.csv')
X=data.iloc[:,0:14]
trainingLabels=data.iloc[:,[14]]
#scale=MinMaxScaler().fit(X.values.reshape(-1,1))#训练规则
#X_dataScale=scale.transform(X.values.reshape(-1,1))#应用规则
##变换成矩阵,输入必须是对称矩阵
metrics_metrix = (-1 * metrics.pairwise.pairwise_distances(X)).astype(np.int32)
metrics_metrix += -1 * metrics_metrix.min()
##设置谱聚类函数
n_clusters_= 2
lables = spectral_clustering(metrics_metrix,n_clusters=n_clusters_)
print('数据聚类标签为:',lables)
'''
predicted_label=spectral_clustering.predict([[0.320347155,0.478602869]])
print('预测标签为:',predicted_label)
'''
labels_unique = np.unique(lables)
##聚簇的个数,即分类的个数
n_clusters_ = len(labels_unique)
print("number of estimated clusters聚类数量为 : %d" % n_clusters_)
#print ("聚类中心\n", (spectral_clustering.cluster_centers_))
quantity = pd.Series(lables).value_counts()
print( "聚类后每个类别的样本数量\n", (quantity))
#获取聚类之后每个聚类中心的数据
resSeries = pd.Series(lables)
res0 = resSeries[resSeries.values == 0]
print("聚类后类别为0的数据\n",(data.iloc[res0.index]))
res1 = resSeries[resSeries.values == 1]
print("聚类后类别为1的数据\n",(data.iloc[res1.index]))
| [
"“wjx2016@mail.ustc.edu.cn”"
] | “wjx2016@mail.ustc.edu.cn” |
70719139c13bdd3da9c10876ba08919046782dd0 | 240d849acbb68e9f897339c62cb425004b35dc60 | /my_awesome_blog/blog/serializers.py | 119c95a7f25a38e6124dc9bc1fdd4493e4eb00ba | [
"MIT"
] | permissive | DaniilAnichin/my_awesome_blog | 12789b53194b0621cd576fa5fd5f53deda742c54 | 95f6f0fada796492894293b310db273d3a72356c | refs/heads/master | 2020-03-30T09:50:13.136000 | 2018-10-01T13:36:30 | 2018-10-01T13:36:30 | 151,094,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import Post, Tag
User = get_user_model()
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('name',)
def run_validators(self, value):
pass
def to_representation(self, instance):
return str(instance)
def to_internal_value(self, data):
return data
class PostSerializer(serializers.ModelSerializer):
tags = TagSerializer(many=True)
class Meta:
model = Post
exclude = ()
def create(self, validated_data):
tags = validated_data.pop('tags')
instance = super().create(validated_data)
instance.set_tags(tags)
return instance
def update(self, instance, validated_data):
tags = validated_data.pop('tags')
instance = super().update(instance, validated_data)
instance.set_tags(tags)
return instance
class AuthorSerializer(serializers.ModelSerializer):
posts = PostSerializer(many=True)
class Meta:
model = User
fields = ('id', 'name', 'email', 'username', 'posts')
| [
"anichindaniil@gmail.com"
] | anichindaniil@gmail.com |
bf136f9a5bba68dbe3ea23bdb7093d0d5a6364ba | 8374b94097b8e2121fc18ee9124f9be1e8df853e | /homework_6/hw6_a.py | 45f85d3546cd1a9a126125e285cbafe2c19d9a8c | [
"MIT"
] | permissive | cvetkovic/ooa | 04769eeb29d630cd352d162d87e1f805f4e9d082 | f5544b0ebb646f23449f0e7c3561f34ca8641e81 | refs/heads/master | 2023-02-06T04:24:58.210986 | 2020-12-20T21:19:08 | 2020-12-20T21:19:08 | 306,406,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from scipy.optimize import linprog
c = [-20, -30]
A = [[1, 3], [3, 1], [1, 6]]
b = [75, 99, 288]
x0_bounds = (0, None)
x1_bound = (0, None)
res = linprog(c, A, b, bounds=[x0_bounds, x1_bound])
print('a = ', res.x[0], ', b = ', res.x[1]) | [
"l.cvetkovic.997@gmail.com"
] | l.cvetkovic.997@gmail.com |
5ab870b4fd3246853a2cab66ab7427042d1e8217 | ce63cda28f3467d1f57b410118e83b8a76777740 | /alpha3/celery.py | b84abff93a0f155ca8ae1ac54483a719dd555d64 | [
"MIT"
] | permissive | webclinic017/alphatrader | 39fa21040bed60f7cb28682139b54b0290976b55 | d97af1182e16f8281651832afb8938f2adebf065 | refs/heads/master | 2022-11-29T17:32:46.694706 | 2020-08-04T13:17:03 | 2020-08-04T13:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'alpha3.settings')
app = Celery('alpha3',backend='redis://localhost:6379')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings',namespace="CELERY")
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
app.conf.result_expires = 60
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) | [
"kris@causematch.com"
] | kris@causematch.com |
7b3ef7cafbb490a4c4d80df026a59221e5503557 | 9e7cf1ce552ccdf02bc103dfa4aacce24aa84fde | /catalyst/core/callbacks/scheduler.py | 7a566912f15cb66aa80f330495456058b4ea467d | [
"Apache-2.0"
] | permissive | denyhoof/catalyst | 69065a16fcac36df77bf180e91e12c85174635ac | a340450076f7846007bc5695e5163e15b7ad9575 | refs/heads/master | 2022-11-22T21:02:14.398208 | 2020-07-17T08:24:46 | 2020-07-17T08:24:46 | 279,684,159 | 0 | 0 | Apache-2.0 | 2020-07-14T20:15:22 | 2020-07-14T20:15:22 | null | UTF-8 | Python | false | false | 7,413 | py | from typing import Tuple
from abc import ABC, abstractmethod
import torch
from catalyst.contrib.nn.schedulers import BatchScheduler, OneCycleLRWithWarmup
from catalyst.core import utils
from catalyst.core.callback import Callback, CallbackNode, CallbackOrder
from catalyst.core.runner import IRunner
class SchedulerCallback(Callback):
"""@TODO: Docs. Contribution is welcome."""
def __init__(
self,
scheduler_key: str = None,
mode: str = None,
reduced_metric: str = None,
):
"""@TODO: Docs. Contribution is welcome."""
super().__init__(order=CallbackOrder.scheduler, node=CallbackNode.all)
self.scheduler_key = scheduler_key
self.mode = mode
self.reduced_metric = reduced_metric
@staticmethod
def _scheduler_step(
scheduler, reduced_metric=None,
):
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
scheduler.step(reduced_metric)
lr = scheduler.optimizer.param_groups[0]["lr"]
else:
scheduler.step()
lr = scheduler.get_lr()[0]
momentum = utils.get_optimizer_momentum(scheduler.optimizer)
return lr, momentum
def step_batch(self, runner: IRunner) -> None:
"""@TODO: Docs. Contribution is welcome.
Args:
runner (IRunner): current runner
"""
lr, momentum = self._scheduler_step(scheduler=self._scheduler)
if self.scheduler_key is not None:
runner.batch_metrics[f"lr/{self.scheduler_key}"] = lr
if momentum is not None:
runner.batch_metrics[
f"momentum/{self.scheduler_key}"
] = momentum
else:
runner.batch_metrics["lr"] = lr
if momentum is not None:
runner.batch_metrics["momentum"] = momentum
def step_epoch(self, runner: IRunner) -> None:
"""@TODO: Docs. Contribution is welcome.
Args:
runner (IRunner): current runner
"""
reduced_metric = runner.valid_metrics[self.reduced_metric]
lr, momentum = self._scheduler_step(
scheduler=self._scheduler, reduced_metric=reduced_metric
)
if self.scheduler_key is not None:
runner.epoch_metrics[f"lr/{self.scheduler_key}"] = lr
if momentum is not None:
runner.epoch_metrics[
f"momentum/{self.scheduler_key}"
] = momentum
else:
runner.epoch_metrics["lr"] = lr
if momentum is not None:
runner.epoch_metrics["momentum"] = momentum
def on_stage_start(self, runner: IRunner) -> None:
"""Stage start hook.
Args:
runner (IRunner): current runner
"""
self.reduced_metric = self.reduced_metric or runner.main_metric
scheduler = runner.get_attr(
key="scheduler", inner_key=self.scheduler_key
)
assert scheduler is not None
self._scheduler = scheduler
if self.mode is None:
if isinstance(scheduler, BatchScheduler):
self.mode = "batch"
else:
self.mode = "epoch"
if (
isinstance(scheduler, OneCycleLRWithWarmup)
and self.mode == "batch"
):
scheduler.reset()
assert self.mode is not None
def on_loader_start(self, runner: IRunner) -> None:
"""Loader start hook.
Args:
runner (IRunner): current runner
"""
if (
runner.is_train_loader
and isinstance(self._scheduler, OneCycleLRWithWarmup)
and self.mode == "batch"
):
self._scheduler.recalculate(
loader_len=runner.loader_len, current_step=runner.epoch - 1
)
def on_batch_end(self, runner: IRunner) -> None:
"""Batch end hook.
Args:
runner (IRunner): current runner
"""
if runner.is_train_loader and self.mode == "batch":
self.step_batch(runner=runner)
def on_epoch_end(self, runner: IRunner) -> None:
"""Epoch end hook.
Args:
runner (IRunner): current runner
"""
if self.mode == "epoch":
self.step_epoch(runner=runner)
class LRUpdater(ABC, Callback):
"""Basic class that all Lr updaters inherit from."""
def __init__(self, optimizer_key: str = None):
"""
Args:
optimizer_key (str): which optimizer key to use
for learning rate scheduling
"""
super().__init__(order=CallbackOrder.scheduler, node=CallbackNode.all)
self.init_lr = 0
self.optimizer_key = optimizer_key
@abstractmethod
def calc_lr(self):
"""@TODO: Docs. Contribution is welcome."""
pass
@abstractmethod
def calc_momentum(self):
"""@TODO: Docs. Contribution is welcome."""
pass
@staticmethod
def _update_lr(optimizer, new_lr) -> None:
for pg in optimizer.param_groups:
pg["lr"] = new_lr
@staticmethod
def _update_momentum(optimizer, new_momentum) -> None:
if "betas" in optimizer.param_groups[0]:
for pg in optimizer.param_groups:
pg["betas"] = (new_momentum, pg["betas"][1])
else:
for pg in optimizer.param_groups:
pg["momentum"] = new_momentum
def _update_optimizer(self, optimizer) -> Tuple[float, float]:
new_lr = self.calc_lr()
if new_lr is not None:
self._update_lr(optimizer, new_lr)
new_momentum = self.calc_momentum()
if new_momentum is not None:
self._update_momentum(optimizer, new_momentum)
else:
new_momentum = utils.get_optimizer_momentum(optimizer)
return new_lr, new_momentum
def update_optimizer(self, runner: IRunner) -> None:
"""@TODO: Docs. Contribution is welcome.
Args:
runner (IRunner): current runner
"""
lr, momentum = self._update_optimizer(optimizer=self._optimizer)
if self.optimizer_key is not None:
runner.batch_metrics[f"lr_{self.optimizer_key}"] = lr
runner.batch_metrics[f"momentum_{self.optimizer_key}"] = momentum
else:
runner.batch_metrics["lr"] = lr
runner.batch_metrics["momentum"] = momentum
def on_stage_start(self, runner: IRunner) -> None:
"""Stage start hook.
Args:
runner (IRunner): current runner
"""
optimizer = runner.get_attr(
key="optimizer", inner_key=self.optimizer_key
)
assert optimizer is not None
self._optimizer = optimizer
self.init_lr = optimizer.defaults["lr"]
def on_loader_start(self, runner: IRunner) -> None:
"""Loader start hook.
Args:
runner (IRunner): current runner
"""
if runner.is_train_loader:
self.update_optimizer(runner=runner)
def on_batch_end(self, runner: IRunner) -> None:
"""Batch end hook.
Args:
runner (IRunner): current runner
"""
if runner.is_train_loader:
self.update_optimizer(runner=runner)
__all__ = ["SchedulerCallback", "LRUpdater"]
| [
"noreply@github.com"
] | denyhoof.noreply@github.com |
0c29642a190cd6de6f413c71d0e36c58b397b93d | 1fcf291cff3fd4e664b595bbcfdc3afb7c53701a | /fsleyes/displaycontext/shopts.py | 6ca68ffe7554d793b128697465b3112b839e395a | [
"Apache-2.0",
"MIT",
"CC-BY-3.0"
] | permissive | neurodebian/fsleyes | c10af340db9e659b3e13320836da9d6aa30fb2c5 | a790acdb97f4c8c4571b38189e5f9c57c51fa1c9 | refs/heads/master | 2022-06-11T00:01:24.284224 | 2017-12-07T00:12:07 | 2017-12-07T00:12:07 | 113,656,275 | 0 | 0 | null | 2017-12-09T09:02:00 | 2017-12-09T09:01:59 | null | UTF-8 | Python | false | false | 6,976 | py | #!/usr/bin/env python
#
# shopts.py - The SHOpts class.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module provides the :class:`SHOpts` class, a :class:`.VectorOpts`
class for rendering :class:`.Image` instances which contain fibre orientation
distributions (FODs) in the form of spherical harmonic (SH) coefficients.
"""
import os.path as op
import numpy as np
import fsleyes_props as props
import fsleyes
from . import vectoropts
SH_COEFFICIENT_TYPE = {
1 : ('asym', 0),
9 : ('asym', 2),
25 : ('asym', 4),
49 : ('asym', 6),
81 : ('asym', 8),
121 : ('asym', 10),
169 : ('asym', 12),
225 : ('asym', 14),
289 : ('asym', 16),
1 : ('sym', 0),
6 : ('sym', 2),
15 : ('sym', 4),
28 : ('sym', 6),
45 : ('sym', 8),
66 : ('sym', 10),
91 : ('sym', 12),
120 : ('sym', 14),
153 : ('sym', 16),
}
"""``Image`` files which contain SH coefficients may be symmetric (only
containing coefficients for even spherical functions) or asymmetric
(containing coefficients for odd and even functions). This dictionary provides
mappings from the number coefficients (the volumes contained in the image), to
the file type (either symmetric [``'sym'``] or asymmetric [``'asym'``), and the
maximum SH order that was used in generating the coefficients.
"""
class SHOpts(vectoropts.VectorOpts):
"""The ``SHOpts`` is used for rendering class for rendering :class:`.Image`
instances which contain fibre orientation distributions (FODs) in the form
of spherical harmonic (SH) coefficients. A ``SHOpts`` instance will be
used for ``Image`` overlays with a :attr:`.Displaty.overlayType` set to
``'sh'``.
A collection of pre-calculated SH basis function parameters are stored in
the ``assets/sh/`` directory. Depending on the SH order that was used in
the fibre orientation, and the desired display resolution (controlled by
:attr:`shResolution`), a different set of parameters needs to be used.
The :meth:`getSHParameters` method will load and return the corrrect
set of parameters.
"""
shResolution = props.Int(minval=3, maxval=10, default=5)
"""Resolution of the sphere used to display the FODs at each voxel. The
value is equal to the number of iterations that an isocahedron, starting
with 12 vertices, is tessellated. The resulting number of vertices is
as follows:
==================== ==================
Number of iterations Number of vertices
3 92
4 162
5 252
6 362
7 492
8 642
9 812
10 1002
==================== ==================
"""
shOrder = props.Choice(allowStr=True)
"""Maximum spherical harmonic order to visualise. This is populated in
:meth:`__init__`.
"""
size = props.Percentage(minval=10, maxval=500, default=100)
"""Display size - this is simply a linear scaling factor. """
lighting = props.Boolean(default=False)
"""Apply a simple directional lighting model to the FODs. """
radiusThreshold = props.Real(minval=0.0, maxval=1.0, default=0.05)
"""FODs with a maximum radius that is below this threshold are not shown.
"""
colourMode = props.Choice(('direction', 'radius'))
"""How to colour each FOD. This property is overridden if the
:attr:`.VectorOpts.colourImage` is set.
- ``'direction'`` The vertices of an FOD are coloured according to their
x/y/z location (see :attr:`xColour`, :attr:`yColour`,
and :attr:`zColour`).
- ``'radius'`` The vertices of an FOD are coloured according to their
distance from the FOD centre (see :attr:`colourMap`).
"""
def __init__(self, *args, **kwargs):
vectoropts.VectorOpts.__init__(self, *args, **kwargs)
ncoefs = self.overlay.shape[3]
shType, maxOrder = SH_COEFFICIENT_TYPE.get(ncoefs)
if shType is None:
raise ValueError('{} does not look like a SH '
'image'.format(self.overlay.name))
self.__maxOrder = maxOrder
self.__shType = shType
# If this Opts instance has a parent,
# the shOrder choices will be inherited
if self.getParent() is None:
if shType == 'sym': vizOrders = range(0, self.__maxOrder + 1, 2)
elif shType == 'asym': vizOrders = range(0, self.__maxOrder + 1)
self.getProp('shOrder').setChoices(list(vizOrders), instance=self)
self.shOrder = vizOrders[-1]
@property
def shType(self):
"""Returns either ``'sym'`` or ``'asym'``, depending on the type
of the SH coefficients contained in the file.
"""
return self.__shType
@property
def maxOrder(self):
"""Returns the maximum SH order that was used to generate the
coefficients of the SH image.
"""
return self.__maxOrder
def getSHParameters(self):
"""Load and return a ``numpy`` array containing pre-calculated SH
function parameters for the curert maximum SH order and display
resolution. The returned array has the shape ``(N, C)``, where ``N``
is the number of vertices used to represent each FOD, and ``C`` is
the number of SH coefficients.
"""
# TODO Adjust matrix if shOrder is
# less than its maximum possible
# value for this image.
#
# Also, calculate the normal vectors.
resolution = self.shResolution
ncoefs = self.overlay.shape[3]
order = self.shOrder
ftype, _ = SH_COEFFICIENT_TYPE[ncoefs]
fname = op.join(
fsleyes.assetDir,
'assets',
'sh',
'{}_coef_{}_{}.txt'.format(ftype, resolution, order))
params = np.loadtxt(fname)
if len(params.shape) == 1:
params = params.reshape((-1, 1))
return params
def getVertices(self):
"""Loads and returns a ``numpy`` array of shape ``(N, 3)``, containing
``N`` vertices of a tessellated sphere.
"""
fname = op.join(
fsleyes.assetDir,
'assets',
'sh',
'vert_{}.txt'.format(self.shResolution))
return np.loadtxt(fname)
def getIndices(self):
"""Loads and returns a 1D ``numpy`` array, containing indices into
the vertex array, specifying the order in which they are to be drawn
as triangles.
"""
fname = op.join(
fsleyes.assetDir,
'assets',
'sh',
'face_{}.txt'.format(self.shResolution))
return np.loadtxt(fname).flatten()
| [
"pauld.mccarthy@gmail.com"
] | pauld.mccarthy@gmail.com |
0e501d9dd75b74a72158bedfcf1627e37e68a190 | 976628c3df55a8611159dc5a93965dff4809bb7f | /app/config.py | 5c402f095919b10f15ec13009022d1daac61c1f8 | [
"MIT"
] | permissive | songzhi/fastapi-templates | cf64f28378795e6c3cca164c37bfe30253df4be0 | 5364610ff37328cf82d1de93bbdf99811b2a4d12 | refs/heads/master | 2022-11-27T17:53:24.724477 | 2020-07-27T01:23:10 | 2020-07-27T01:23:10 | 265,873,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | from datetime import timezone, timedelta
import os
from pathlib import Path
import sys
BASE_DIR = Path(os.path.dirname(__file__) if not getattr(
sys, 'frozen', False) else os.path.dirname(sys.executable))
TZ = timezone(timedelta(hours=8))
IS_TEST: bool = os.environ.get('TEST', False) is not None
IS_DEV: bool = os.environ.get('DEV', False) is not None
AES_KEY = 'lqnqp20serj)4fht'
SECRET_KEY = "09d25e094faa6ca2226c818166b7a2363b93f7099f6f0f4caa6cf63b88e8d3e7"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24
DATABASE_URL = 'mongodb://localhost:12138'
| [
"lsongzhi@163.com"
] | lsongzhi@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.