text
string
size
int64
token_count
int64
import os class Credentials: API_KEY = os.getenv('API_KEY') API_SECRET_KEY = os.getenv('API_SECRET_KEY') #tokens ACCESS_TOKEN = os.getenv('ACCESS_TOKEN') ACCESS_TOKEN_SECRET = os.getenv('ACCESS_SECRET_TOKEN') class Settings: TRACK_WORDS = 'Technology' TABLE_NAME = "twttechnology" TABLE_ATTRIBUTES = "id INTEGER PRIMARY KEY AUTO_INCREMENT, id_tweet VARCHAR(255), created_at DATETIME, text VARCHAR(255), \ polarity INT, subjectivity INT, user_created_at VARCHAR(255), \ user_location VARCHAR(255), user_description VARCHAR(255), \ user_followers_count INT, longitude DOUBLE, latitude DOUBLE, \ retweet_count INT, favorite_count INT" TABLE_ATTRIBUTES_2 = "id INTEGER PRIMARY KEY AUTO_INCREMENT, \ word_id INTEGER, id_tweet VARCHAR(255), created_at DATETIME, text VARCHAR(255), \ polarity INT, subjectivity INT, user_created_at VARCHAR(255), \ user_location VARCHAR(255), user_description VARCHAR(255), \ user_followers_count INT, longitude DOUBLE, latitude DOUBLE, \ retweet_count INT, favorite_count INT" class Dbsettings: HOST = os.getenv('MYSQL_HOST') USER = os.getenv('MYSQL_USER') PASSWORD = os.getenv('MYSQL_PASSWORD') DATABASE = os.getenv('MYSQL_DB') PORT = os.getenv('MYSQL_PORT') #DATA BASE FOR NLP TECH PROJECT HOST2 = os.getenv('MYSQL_HOST_2') USER2 = os.getenv('MYSQL_USER_2') PASSWORD2 = os.getenv('MYSQL_PASSWORD_2') DATABASE2 = os.getenv('MYSQL_DB_2') PORT2 = os.getenv('MYSQL_PORT_2')
1,581
592
from .library import * from .differentiation import * from .sindy_ball import SINDyBall from .tests import * from .utils import *
130
42
from django.db import models from django.contrib.auth.models import AbstractUser class User(AbstractUser): ADMIN = 'admin' PLAYER = 'player' TYPE_CHOICES = ( (ADMIN, "Admin"), (PLAYER, "Player") ) type = models.CharField(choices=TYPE_CHOICES, max_length=6, default=PLAYER)
341
114
#!/usr/bin/python import glob def main(): pyfiles = glob.glob("../*/*.py") for pyfile in pyfiles: print "pyfile %s" % pyfile main()
151
56
import base64 import datetime import email import logging import os import typing from email.message import Message from googleapiclient import errors from email_scrapper.models import Stores from email_scrapper.readers.base_reader import BaseReader logger = logging.getLogger(__name__) class GmailReader(BaseReader): SCOPES = ['https://www.googleapis.com/auth/gmail.readonly'] def __init__(self, service, user_id: str = "me", user_email: str = None, email_mapping: dict = None, date_from: datetime.datetime = None): """ Parameters ---------- service: The Gmail API service email_mapping: dict Mapping of class:Stores: to str representing the email to search from """ super(GmailReader, self).__init__(date_from=date_from, user_email=user_email, email_mapping=email_mapping) self.service = service self.user_id = user_id @classmethod def authenticate_with_browser(cls, credentials_json: dict = None, date_from: datetime.datetime = None): """ Login to gmail through the browser. Requires a credentials.json file or a credentials_json dict passed Returns ------- GmailReader """ try: from google_auth_oauthlib.flow import InstalledAppFlow from googleapiclient.discovery import build import pickle creds = None if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) if not creds or not creds.valid: if credentials_json: flow = InstalledAppFlow.from_client_config(credentials_json, GmailReader.SCOPES) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', GmailReader.SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) service = build('gmail', 'v1', credentials=creds) response = service.users().getProfile(userId="me").execute() return cls(service, user_id="me", user_email=response.get("emailAddress"), date_from=date_from) except (ImportError, ModuleNotFoundError): raise BaseException("Google Auth library not found") def _get_search_date_range(self): return self.search_date_range.strftime("%Y-%m-%d") def _get_email_details(self, message) -> Message: response = self.service.users().messages().get(userId=self.user_id, id=message['id'], format="raw").execute() msg_str = base64.urlsafe_b64decode(response['raw'].encode('ASCII')) mime_msg = email.message_from_bytes(msg_str) return mime_msg def _get_search_query(self, store: Stores, subject: str = None): return f"from:{self._get_store_email(store)} after:{self._get_search_date_range()}" def read_store_emails(self, store: Stores, subject: str = None) -> typing.Generator[str, None, None]: query = self._get_search_query(store, subject) try: response = self.service.users().messages().list(userId=self.user_id, q=query).execute() if 'messages' in response: for message in response['messages']: yield self._get_email_details(message) while 'nextPageToken' in response: page_token = response['nextPageToken'] response = self.service.users().messages().list(userId=self.user_id, q=query, pageToken=page_token).execute() for message in response['messages']: yield self._get_email_details(message) except errors.HttpError as error: print('An error occurred: %s' % error)
4,073
1,102
class Rails: def __init__(self, num_rails): self.num_rails = num_rails self.rails = [[] for _ in range(num_rails)] def populate_rails_linear(self, message, rail_lengths): message_list = list(message) for rail in self.linear_iterator(rail_lengths): rail.append(message_list.pop(0)) def populate_rails_zig_zag(self, message): message_list = list(message) for rail in self.zig_zag_iterator(message): rail.append(message_list.pop(0)) def to_string_linear(self): return ''.join([data for rail in self.rails for data in rail]) def to_string_zig_zag(self, message): return ''.join([rail.pop(0) for rail in self.zig_zag_iterator(message)]) def linear_iterator(self, rail_lengths): for index in range(len(self.rails)): for rail_length in range(rail_lengths[index]): yield self.rails[index] def zig_zag_iterator(self, message): index = 0 increasing = True for _ in message: yield self.rails[index] increasing = self.direction(index, increasing) index = self.increment_index(index, increasing) def increment_index(self, index, increasing): if increasing: return index + 1 else: return index - 1 def direction(self, index, increasing): if index == 0: return True elif index == self.num_rails - 1: return False else: return increasing def encode(message, num_rails): rails = Rails(num_rails) rails.populate_rails_zig_zag(message) return rails.to_string_linear() def decode(message, num_rails): faulty_rails = Rails(num_rails) faulty_rails.populate_rails_zig_zag(message) rail_lengths = [len(rail) for rail in faulty_rails.rails] rails = Rails(num_rails) rails.populate_rails_linear(message, rail_lengths) return rails.to_string_zig_zag(message)
2,026
615
a=[1,2,3] b=[1,1,1] #d={1:"ONE", 2:"TWO", 3:"THREE", 4:"FOUR", 5:"FIVE", 6:"SIX"} f=[a[0]+b[0],a[1]+b[1],a[2]+b[2]] if f[0]==1: f[0]="ONE" elif f[0]==2: f[0]="TWO" print(f)
187
130
''' A+B for Input-Output Practice (IV) 描述 Your task is to Calculate the sum of some integers. 输入 Input contains multiple test cases. Each test case contains a integer N, and then N integers follow in the same line. A test case starting with 0 terminates the input and this test case is not to be processed. 输出 For each group of input integers you should output their sum in one line, and with one line of output for each line in input. 输入样例 4 1 2 3 4 5 1 2 3 4 5 0 输出样例 10 15 ''' while(True): input_list = list(map(int, input().split())) # split()默认为所有的空字符,包括空格、换行(\n)、制表符(\t)等。 # 使用split(" ") 报RE n = input_list[0] if n == 0: break sum = 0 for i in range(n): sum = sum + input_list[i + 1] print(sum)
748
288
import multiprocessing import os import os.path import pickle import librosa import numpy as np from scipy import signal def audio_extract(path, audio_name, audio_path, sr=16000): save_path = path samples, samplerate = librosa.load(audio_path) resamples = np.tile(samples, 10)[:160000] resamples[resamples > 1.] = 1. resamples[resamples < -1.] = -1. frequencies, times, spectrogram = signal.spectrogram(resamples, samplerate, nperseg=512, noverlap=353) spectrogram = np.log(spectrogram + 1e-7) mean = np.mean(spectrogram) std = np.std(spectrogram) spectrogram = np.divide(spectrogram - mean, std + 1e-9) assert spectrogram.shape == (257, 1004) save_name = os.path.join(save_path, audio_name + '.pkl') print(save_name) with open(save_name, 'wb') as fid: pickle.dump(spectrogram, fid) class Consumer(multiprocessing.Process): def __init__(self, task_queue): multiprocessing.Process.__init__(self) self.task_queue = task_queue def run(self): proc_name = self.name while True: next_task = self.task_queue.get() if next_task is None: # Poison pill means shutdown print('{}: Exiting'.format(proc_name)) self.task_queue.task_done() break # print(next_task) audio_extract(next_task[0], next_task[1], next_task[2]) self.task_queue.task_done() if __name__ == '__main__': # Establish communication queues tasks = multiprocessing.JoinableQueue() # Start consumers num_consumers = multiprocessing.cpu_count() print('Creating {} consumers'.format(num_consumers)) consumers = [ Consumer(tasks) for i in range(num_consumers) ] for w in consumers: w.start() # path='data/' save_dir = '/home/xiaokang_peng/data/AVE_av/audio_spec' if not os.path.exists(save_dir): os.mkdir(save_dir) path_origin = '/home/xiaokang_peng/data/AVE_av/audio' audios = os.listdir(path_origin) for audio in audios: audio_name = audio audio_path = os.path.join(path_origin, audio) tasks.put([save_dir, audio_name[:-4], audio_path]) # Add a poison pill for each consumer for i in range(num_consumers): tasks.put(None) # Wait for all of the tasks to finish tasks.join() print("ok")
2,421
841
import inspect import logging from protean.container import Element, OptionsMixin from protean.core.event import BaseEvent from protean.exceptions import IncorrectUsageError from protean.utils import DomainObjects, derive_element_class, fully_qualified_name from protean.utils.mixins import HandlerMixin logger = logging.getLogger(__name__) class BaseEventHandler(Element, HandlerMixin, OptionsMixin): """Base Event Handler to be inherited by all event handlers""" element_type = DomainObjects.EVENT_HANDLER class Meta: abstract = True @classmethod def _default_options(cls): aggregate_cls = ( getattr(cls.meta_, "aggregate_cls") if hasattr(cls.meta_, "aggregate_cls") else None ) return [ ("aggregate_cls", None), ("stream_name", aggregate_cls.meta_.stream_name if aggregate_cls else None), ("source_stream", None), ] def __new__(cls, *args, **kwargs): if cls is BaseEventHandler: raise TypeError("BaseEventHandler cannot be instantiated") return super().__new__(cls) def event_handler_factory(element_cls, **opts): element_cls = derive_element_class(element_cls, BaseEventHandler, **opts) if not (element_cls.meta_.aggregate_cls or element_cls.meta_.stream_name): raise IncorrectUsageError( { "_entity": [ f"Event Handler `{element_cls.__name__}` needs to be associated with an aggregate or a stream" ] } ) # Iterate through methods marked as `@handle` and construct a handler map # # Also, if `_target_cls` is an event, associate it with the event handler's # aggregate or stream methods = inspect.getmembers(element_cls, predicate=inspect.isroutine) for method_name, method in methods: if not ( method_name.startswith("__") and method_name.endswith("__") ) and hasattr(method, "_target_cls"): # `_handlers` is a dictionary mapping the event to the handler method. if method._target_cls == "$any": # This replaces any existing `$any` handler, by design. An Event Handler # can have only one `$any` handler method. element_cls._handlers["$any"] = {method} else: element_cls._handlers[fully_qualified_name(method._target_cls)].add( method ) # Associate Event with the handler's stream if inspect.isclass(method._target_cls) and issubclass( method._target_cls, BaseEvent ): # Order of preference: # 1. Stream name defined in event # 2. Stream name defined for the event handler # 3. Stream name derived from aggregate stream_name = element_cls.meta_.stream_name or ( element_cls.meta_.aggregate_cls.meta_.stream_name if element_cls.meta_.aggregate_cls else None ) method._target_cls.meta_.stream_name = ( method._target_cls.meta_.stream_name or stream_name ) return element_cls
3,317
865
# -*- coding: utf-8 -*- """ Created on Fri Sep 17 10:12:26 2021 @author: Florian Jehn """ import os import pandas as pd import numpy as np def read_ipcc_counts_temp(): """reads all counts of temperatures for all reports and makes on df""" files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "temperatures") all_df = pd.DataFrame() for file in files: file_df = pd.read_csv("Results" + os.sep + "temperatures" + os.sep + file, sep=";", index_col=0) file_df.columns = [file[:-4]] all_df = pd.concat([all_df, file_df], axis=1) return all_df.transpose() def read_ipcc_counts_rfc(): """reads all counts of reasons of concern for all reports and makes on df""" files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "reasons_for_concern") all_df = pd.DataFrame() for file in files: file_df = pd.read_csv("Results" + os.sep + "reasons_for_concern" + os.sep + file, sep=";", index_col=0) file_df.columns = [file[:-4]] all_df = pd.concat([all_df, file_df], axis=1) return all_df.transpose() def read_false_positive(): """reads in all the counted false/true positive rates for the temperatres in the IPCC and calculates a true positive rate for each entry""" files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "false_positive_check_files") all_df = pd.DataFrame() for file in files: # only read those files that contains the counting results if "results" not in file: continue file_df = pd.read_csv("Results" + os.sep + "false_positive_check_files" + os.sep + file, sep=",", index_col=0) # calculate the true positive rate file_df["True Positive Rate [%]"] = (file_df["n true positive"]/(file_df["n true positive"]+file_df["n false positive"]))*100 # Arange the df for seaborn file_df["Temperature [°C]"] = file_df.index file_df.reset_index(inplace=True, drop=True) all_df = pd.concat([all_df, file_df]) return all_df def scale_counts(ipcc_counts): """scale the counts by overall sum""" sums = ipcc_counts.sum(axis=1) for col in ipcc_counts: ipcc_counts[col] = ipcc_counts[col]/sums*100 return ipcc_counts def read_meta(): """reads in the meta data of the reports""" meta = pd.read_csv("Reports" + os.sep + "meta_data_reports.tsv", sep="\t") meta["Year"] = meta["Year"].astype("str") return meta def group_temps(ipcc_counts): """groups the temperatures into three categories""" ipcc_counts["0.5°C - 2°C"] = ipcc_counts[" 0.5°C"] + ipcc_counts[" 1°C"] + ipcc_counts[" 1.5°C"] +ipcc_counts[" 2°C"] ipcc_counts["2.5°C - 4°C"] = ipcc_counts[" 2.5°C"] + ipcc_counts[" 3°C"] + ipcc_counts[" 3.5°C"] +ipcc_counts[" 4°C"] ipcc_counts["≥ 4.5°C"] = ipcc_counts[" 4.5°C"] + ipcc_counts[" 5°C"] + ipcc_counts[" 5.5°C"] +ipcc_counts[" 6°C"] +ipcc_counts[" 6.5°C"] + ipcc_counts[" 7°C"] + ipcc_counts[" 7.5°C"] +ipcc_counts[" 8°C"] + ipcc_counts[" 8.5°C"] + ipcc_counts[" 9°C"] + ipcc_counts[" 9.5°C"] +ipcc_counts[" 10°C"] return ipcc_counts.iloc[:,20:] def merge_counts_meta(ipcc_counts, meta): """merges the df with the counted temperatures/rfcs with the metadata""" return pd.merge(meta, ipcc_counts, right_index=True, left_on="count_names") def lookup_names(): """"Returns lookup dict for different files names to merge them""" lookup_dict = { "IPCC_AR6_WGI_Full_Report":"counts_IPCC_AR6_WGI_Full_Report_parsed", "SROCC_FullReport_FINAL":"counts_SROCC_FullReport_FINAL_parsed", "210714-IPCCJ7230-SRCCL-Complete-BOOK-HRES":"counts_210714-IPCCJ7230-SRCCL-Complete-BOOK-HRES_parsed", "SR15_Full_Report_Low_Res":"counts_SR15_Full_Report_Low_Res_parsed", "SYR_AR5_FINAL_full":"counts_SYR_AR5_FINAL_full_wcover_parsed", "ipcc_wg3_ar5_full":"counts_ipcc_wg3_ar5_full_parsed", "WGIIAR5-PartA_FINAL":"counts_WGIIAR5-PartA_FINAL_parsed", "WGIIAR5-PartB_FINAL":"counts_WGIIAR5-PartB_FINAL_parsed", "WG1AR5_all_final":"counts_WG1AR5_all_final_parsed", "SREX_Full_Report-1":"counts_SREX_Full_Report-1_parsed", "SRREN_Full_Report-1":"counts_SRREN_Full_Report-1_parsed", "ar4_syr_full_report":"counts_ar4_syr_full_report_parsed", "ar4_wg2_full_report":"counts_ar4_wg2_full_report_parsed", "ar4_wg1_full_report-1":"counts_ar4_wg1_full_report-1_parsed", "ar4_wg3_full_report-1":"counts_ar4_wg3_full_report-1_parsed", "sroc_full-1":"counts_sroc_full-1_parsed", "srccs_wholereport-1":"counts_srccs_wholereport-1_parsed", "SYR_TAR_full_report":"counts_SYR_TAR_full_report_parsed", "WGII_TAR_full_report-2":"counts_WGII_TAR_full_report-2_parsed", "WGI_TAR_full_report":"counts_WGI_TAR_full_report_parsed", "WGIII_TAR_full_report":"counts_WGIII_TAR_full_report_parsed", "srl-en-1":"counts_srl-en-1_parsed", "srtt-en-1":"counts_srtt-en-1_parsedd", "emissions_scenarios-1":"counts_emissions_scenarios-1_parsed", "av-en-1":"counts_av-en-1_parsed", "The-Regional-Impact":"counts_The-Regional-Impact_parsed", "2nd-assessment-en-1":"counts_2nd-assessment-en-1_parsed", "ipcc_sar_wg_III_full_report":"counts_ipcc_sar_wg_III_full_report_parsed", "ipcc_sar_wg_II_full_report":"counts_ipcc_sar_wg_II_full_report_parsed", "ipcc_sar_wg_I_full_report":"counts_ipcc_sar_wg_I_full_report_parsed", "climate_change_1994-2":"counts_climate_change_1994-2_parsed", # "ipcc-technical-guidelines-1994n-1":"", # could not read in, but also contains no temp mentions "ipcc_wg_I_1992_suppl_report_full_report":"counts_ipcc_wg_I_1992_suppl_report_full_report_parsed", "ipcc_wg_II_1992_suppl_report_full_report":"counts_ipcc_wg_II_1992_suppl_report_full_report_parsed", "ipcc_90_92_assessments_far_full_report":"counts_ipcc_90_92_assessments_far_full_report_parsed", "ipcc_far_wg_III_full_report":"counts_ipcc_far_wg_III_full_report_parsed", "ipcc_far_wg_II_full_report":"counts_ipcc_far_wg_II_full_report_parsed", "ipcc_far_wg_I_full_report":"counts_ipcc_far_wg_I_full_report_parsed", } return lookup_dict def create_temp_keys(): """Creates a list of strings for all temperatures the paper looked at""" temps = [] for i,temp in enumerate(np.arange(0.5,10.1,0.5)): if i % 2 != 0: temps.append(" "+str(int(temp))+"°C") else: temps.append(" "+str(temp)+"°C" ) return temps def combine_all_raw_strings(): """combines all raw strings into one big file to search through""" reports = [file for file in os.listdir(os.getcwd() + os.sep + "Raw IPCC Strings") if file[-4:] == ".csv" ] all_reports = " " for report in reports: print("Starting with " + report) report_df = pd.read_csv(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + report, sep="\t", usecols=[0]) report_list = report_df[report_df.columns[0]].tolist() report_str = " ".join([str(item) for item in report_list]) all_reports += report_str with open(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + "all_ipcc_strings.csv", 'w', encoding='utf-8') as f: # this file is not included in the repository, as it is too large for Github f.write(all_reports) if __name__ == "__main__": combine_all_raw_strings()
7,508
3,073
from pytest import fixture from zev.get_filesize import get_filesize def test_get_filesize(empty_filepath): assert get_filesize(empty_filepath) == 0
156
55
def main(): quote = input("What is the quote?\n") person = input("Who said it?\n") speech = "\n" + person + " says, " + '"' + quote + '"' print(speech) main()
179
68
#This program calculates the successive values of the following # calculation: Next value by taking the positive integer added by user # and if it is even divide it by 2, if it is odd, multiply by #3 and add 1.Program ends if current value is 1. #First: I created variable "pnumber" which will be the positive integer entered by the user. pnumber=int(input("Enter a positive integer here:")) #Created formula to find out if number entered by user is positive integer ( greater than 0) while pnumber > 0: if pnumber ==1:# then if number greater than 0 and equals 1, program stops with break statement. print(pnumber) break if pnumber % 2 == 0:# if number entered by user is even we divide numbers by 2. print(pnumber) pnumber = pnumber / 2 elif pnumber % 2 != 0: #if number entered by user is odd we multiply the values by 3 and add 1. print(pnumber) pnumber = pnumber*3+1 #If user enters a not positive integer , the program confirmes this and stops. while pnumber < 0: print pnumber, "is not a positive integer." break print ("Thank you so much for using my program")
1,151
328
# main function def has33(nums): # iterates through the list and tries to find two 3s next to each other for i in range(0, len(nums) - 1): # if indice i has a 3 and the indice next to it has a 3, print true if nums[i] == 3 and nums[i + 1] == 3: return print('True') return print('False') has33([1, 3, 3]) has33([3, 1, 3]) has33([3, 3, 3]) has33([1, 3, 1, 3])
404
175
from django.contrib import admin from django.urls import path, include from django.conf.urls.static import static from django.conf import settings from products.views import (products, index, products_detail) from rest_framework_jwt.views import refresh_jwt_token from users.views import ObtainCustomJSONWebToken apipatterns = [ path('', include('products.urls')), ] urlpatterns = [ path('admin/', admin.site.urls), path('api/', include((apipatterns, 'api'), namespace='api')), path('', index, name='index'), path('products/', products, name='products'), path('products/<int:product_id>/', products_detail, name='products_detail'), path('', include('users.urls'), name='users'), path('sign-in/', ObtainCustomJSONWebToken.as_view()), path('api/sign-in/refresh', refresh_jwt_token) ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
914
277
from survos2.config import Config import numpy as np from numpy.lib.function_base import flip from qtpy import QtWidgets from qtpy.QtWidgets import QPushButton, QRadioButton from survos2.frontend.components.base import * from survos2.frontend.components.entity import ( SmallVolWidget, TableWidget, setup_entity_table, setup_bb_table, ) from survos2.frontend.components.icon_buttons import IconButton from survos2.frontend.control import Launcher from survos2.frontend.plugins.base import * from survos2.frontend.plugins.plugins_components import MultiSourceComboBox from survos2.frontend.utils import FileWidget from survos2.improc.utils import DatasetManager from survos2.model import DataModel from survos2.server.state import cfg from survos2.frontend.plugins.features import FeatureComboBox from survos2.frontend.plugins.annotations import LevelComboBox from survos2.entity.patches import PatchWorkflow, organize_entities, make_patches class ObjectComboBox(LazyComboBox): def __init__(self, full=False, header=(None, "None"), parent=None): self.full = full super().__init__(header=header, parent=parent) def fill(self): params = dict(workspace=True, full=self.full) result = Launcher.g.run("objects", "existing", **params) logger.debug(f"Result of objects existing: {result}") if result: # self.addCategory("Points") for fid in result: if result[fid]["kind"] == "points": self.addItem(fid, result[fid]["name"]) elif result[fid]["kind"] == "boxes": self.addItem(fid, result[fid]["name"]) @register_plugin class ObjectsPlugin(Plugin): __icon__ = "fa.picture-o" __pname__ = "objects" __views__ = ["slice_viewer"] __tab__ = "objects" def __init__(self, parent=None): super().__init__(parent=parent) self.vbox = VBox(self, spacing=10) self.objects_combo = ComboBox() self.vbox.addWidget(self.objects_combo) self.existing_objects = {} self.objects_layout = VBox(margin=0, spacing=5) self.objects_combo.currentIndexChanged.connect(self.add_objects) self.vbox.addLayout(self.objects_layout) self._populate_objects() def _populate_objects(self): self.objects_params = {} self.objects_combo.clear() self.objects_combo.addItem("Add objects") params = dict( workspace=DataModel.g.current_session + "@" + DataModel.g.current_workspace ) result = Launcher.g.run("objects", "available", **params) print(result) logger.debug(f"objects available: {result}") if result: all_categories = sorted(set(p["category"] for p in result)) for i, category in enumerate(all_categories): self.objects_combo.addItem(category) self.objects_combo.model().item( i + len(self.objects_params) + 1 ).setEnabled(False) for f in [p for p in result if p["category"] == category]: self.objects_params[f["name"]] = f["params"] self.objects_combo.addItem(f["name"]) def add_objects(self, idx): logger.debug(f"Add objects with idx {idx}") if idx == 0 or idx == -1: return # self.objects_combo.setCurrentIndex(0) print(idx) order = idx - 2 if order == 1: params = dict( order=order, workspace=DataModel.g.current_session + "@" + DataModel.g.current_workspace, fullname="survos2/entity/blank_boxes.csv", ) else: params = dict( order=order, workspace=DataModel.g.current_session + "@" + DataModel.g.current_workspace, fullname="survos2/entity/blank_entities.csv", ) result = Launcher.g.run("objects", "create", **params) if result: objectsid = result["id"] objectsname = result["name"] objectsfullname = result["fullname"] objectstype = result["kind"] self._add_objects_widget( objectsid, objectsname, objectsfullname, objectstype, True ) def _add_objects_widget( self, objectsid, objectsname, objectsfullname, objectstype, expand=False ): logger.debug( f"Add objects {objectsid} {objectsname} {objectsfullname} {objectstype}" ) widget = ObjectsCard(objectsid, objectsname, objectsfullname, objectstype) widget.showContent(expand) self.objects_layout.addWidget(widget) src = DataModel.g.dataset_uri(objectsid, group="objects") with DatasetManager(src, out=None, dtype="uint32", fillvalue=0) as DM: src_dataset = DM.sources[0] src_dataset.set_metadata("fullname", objectsfullname) self.existing_objects[objectsid] = widget return widget def clear(self): for objects in list(self.existing_objects.keys()): self.existing_objects.pop(objects).setParent(None) self.existing_objects = {} def setup(self): self._populate_objects() params = dict( workspace=DataModel.g.current_session + "@" + DataModel.g.current_workspace ) result = Launcher.g.run("objects", "existing", **params) logger.debug(f"objects result {result}") if result: # Remove objects that no longer exist in the server print(self.existing_objects.keys()) for objects in list(self.existing_objects.keys()): if objects not in result: self.existing_objects.pop(objects).setParent(None) # Populate with new entity if any for entity in sorted(result): if entity in self.existing_objects: continue enitity_params = result[entity] objectsid = enitity_params.pop("id", entity) objectsname = enitity_params.pop("name", entity) objectsfullname = enitity_params.pop("fullname", entity) objectstype = enitity_params.pop("kind", entity) print(f"type: {objectstype}") if objectstype != "unknown": widget = self._add_objects_widget( objectsid, objectsname, objectsfullname, objectstype ) widget.update_params(params) self.existing_objects[objectsid] = widget else: logger.debug( "+ Skipping loading entity: {}, {}, {}".format( objectsid, objectsname, objectstype ) ) class ObjectsCard(Card): def __init__( self, objectsid, objectsname, objectsfullname, objectstype, parent=None ): super().__init__( title=objectsname, collapsible=True, removable=True, editable=True, parent=parent, ) self.objectsid = objectsid self.objectsname = objectsname self.object_scale = 1.0 self.objectsfullname = objectsfullname self.objectstype = objectstype self.widgets = {} self.filewidget = FileWidget(extensions="*.csv", save=False) self.filewidget.path.setText(self.objectsfullname) self.add_row(self.filewidget) self.filewidget.path_updated.connect(self.load_data) self.compute_btn = PushButton("Compute") self.view_btn = PushButton("View", accent=True) self.get_btn = PushButton("Get", accent=True) self._add_param("scale", title="Scale: ", type="Float", default=1) self._add_param("offset", title="Offset: ", type="FloatOrVector", default=0) self._add_param( "crop_start", title="Crop Start: ", type="FloatOrVector", default=0 ) self._add_param( "crop_end", title="Crop End: ", type="FloatOrVector", default=9000 ) self.flipxy_checkbox = CheckBox(checked=True) self.add_row(HWidgets(None, self.flipxy_checkbox, Spacing(35))) self.add_row(HWidgets(None, self.view_btn, self.get_btn, Spacing(35))) self.view_btn.clicked.connect(self.view_objects) self.get_btn.clicked.connect(self.get_objects) cfg.object_scale = self.widgets["scale"].value() cfg.object_offset = self.widgets["offset"].value() cfg.object_crop_start = self.widgets["crop_start"].value() cfg.object_crop_end = self.widgets["crop_end"].value() cfg.object_scale = 1.0 cfg.object_offset = (0,0,0) cfg.object_crop_start = (0,0,0) cfg.object_crop_end = (1e9,1e9,1e9) if self.objectstype == "patches": self._add_annotations_source() self.entity_mask_bvol_size = LineEdit3D(default=64, parse=int) self._add_feature_source() self.make_entity_mask_btn = PushButton("Make entity mask", accent=True) self.make_entity_mask_btn.clicked.connect(self.make_entity_mask) self.make_patches_btn = PushButton("Make patches", accent=True) self.make_patches_btn.clicked.connect(self.make_patches) self.add_row(HWidgets(None, self.entity_mask_bvol_size, self.make_entity_mask_btn, Spacing(35))) self.add_row(HWidgets(None, self.make_patches_btn, Spacing(35))) self.table_control = TableWidget() self.add_row(self.table_control.w, max_height=500) cfg.entity_table = self.table_control def _add_param(self, name, title=None, type="String", default=None): if type == "Int": p = LineEdit(default=default, parse=int) elif type == "Float": p = LineEdit(default=default, parse=float) elif type == "FloatOrVector": p = LineEdit3D(default=default, parse=float) elif type == "IntOrVector": p = LineEdit3D(default=default, parse=int) else: p = None if title is None: title = name if p: self.widgets[name] = p self.add_row(HWidgets(None, title, p, Spacing(35))) def load_data(self, path): self.objectsfullname = path print(f"Setting objectsfullname: {self.objectsfullname}") def card_deleted(self): params = dict(objects_id=self.objectsid, workspace=True) result = Launcher.g.run("objects", "remove", **params) if result["done"]: self.setParent(None) self.table_control = None def _add_annotations_source(self): self.annotations_source = LevelComboBox(full=True) self.annotations_source.fill() self.annotations_source.setMaximumWidth(250) widget = HWidgets( "Annotation:", self.annotations_source, Spacing(35), stretch=1 ) self.add_row(widget) def card_title_edited(self, newtitle): logger.debug(f"Edited entity title {newtitle}") params = dict(objects_id=self.objectsid, new_name=newtitle, workspace=True) result = Launcher.g.run("objects", "rename", **params) return result["done"] def view_objects(self): logger.debug(f"Transferring objects {self.objectsid} to viewer") cfg.ppw.clientEvent.emit( { "source": "objects", "data": "view_objects", "objects_id": self.objectsid, "flipxy": self.flipxy_checkbox.value(), } ) def update_params(self, params): if "fullname" in params: self.objectsfullname = params["fullname"] def _add_feature_source(self): self.feature_source = FeatureComboBox() self.feature_source.fill() self.feature_source.setMaximumWidth(250) widget = HWidgets("Feature:", self.feature_source, Spacing(35), stretch=1) self.add_row(widget) def get_objects(self): cfg.object_scale = self.widgets["scale"].value() cfg.object_offset = self.widgets["offset"].value() cfg.object_crop_start = self.widgets["crop_start"].value() cfg.object_crop_end = self.widgets["crop_end"].value() dst = DataModel.g.dataset_uri(self.objectsid, group="objects") print(f"objectsfullname: {self.objectsfullname}") params = dict( dst=dst, fullname=self.objectsfullname, scale=cfg.object_scale, offset=cfg.object_offset, crop_start=cfg.object_crop_start, crop_end=cfg.object_crop_end, ) logger.debug(f"Getting objects with params {params}") result = Launcher.g.run("objects", "update_metadata", workspace=True, **params) if self.objectstype == "points": tabledata, self.entities_df = setup_entity_table( self.objectsfullname, scale=cfg.object_scale, offset=cfg.object_offset, crop_start=cfg.object_crop_start, crop_end=cfg.object_crop_end, flipxy=self.flipxy_checkbox.value() ) elif self.objectstype == "boxes": tabledata, self.entities_df = setup_bb_table( self.objectsfullname, scale=cfg.object_scale, offset=cfg.object_offset, crop_start=cfg.object_crop_start, crop_end=cfg.object_crop_end, flipxy=self.flipxy_checkbox.value() ) elif self.objectstype == "patches": tabledata, self.entities_df = setup_entity_table( self.objectsfullname, scale=cfg.object_scale, offset=cfg.object_offset, crop_start=cfg.object_crop_start, crop_end=cfg.object_crop_end, flipxy=self.flipxy_checkbox.value() ) cfg.tabledata = tabledata self.table_control.set_data(tabledata) print(f"Loaded tabledata {tabledata}") self.table_control.set_data(tabledata) self.collapse() self.expand() def make_entity_mask(self): src = DataModel.g.dataset_uri(self.feature_source.value(), group="features") with DatasetManager(src, out=None, dtype="float32", fillvalue=0) as DM: src_array = DM.sources[0][:] entity_arr = np.array(self.entities_df) bvol_dim = self.entity_mask_bvol_size.value() entity_arr[:, 0] -= bvol_dim[0] entity_arr[:, 1] -= bvol_dim[1] entity_arr[:, 2] -= bvol_dim[2] from survos2.entity.entities import make_entity_mask gold_mask = make_entity_mask( src_array, entity_arr, flipxy=True, bvol_dim=bvol_dim )[0] # create new raw feature params = dict(feature_type="raw", workspace=True) result = Launcher.g.run("features", "create", **params) if result: fid = result["id"] ftype = result["kind"] fname = result["name"] logger.debug(f"Created new object in workspace {fid}, {ftype}, {fname}") dst = DataModel.g.dataset_uri(fid, group="features") with DatasetManager(dst, out=dst, dtype="float32", fillvalue=0) as DM: DM.out[:] = gold_mask cfg.ppw.clientEvent.emit( {"source": "objects_plugin", "data": "refresh", "value": None} ) def make_patches(self): src = DataModel.g.dataset_uri(self.feature_source.value(), group="features") with DatasetManager(src, out=None, dtype="float32", fillvalue=0) as DM: src_array = DM.sources[0][:] objects_scale = 1.0 entity_meta = { "0": { "name": "class1", "size": np.array((15, 15, 15)) * objects_scale, "core_radius": np.array((7, 7, 7)) * objects_scale, }, } entity_arr = np.array(self.entities_df) combined_clustered_pts, classwise_entities = organize_entities( src_array, entity_arr, entity_meta, plot_all=False ) wparams = {} wparams["entities_offset"] = (0, 0, 0) wparams["entity_meta"] = entity_meta wparams["workflow_name"] = "Make_Patches" wparams["proj"] = DataModel.g.current_workspace wf = PatchWorkflow( [src_array], combined_clustered_pts, classwise_entities, src_array, wparams, combined_clustered_pts ) src = DataModel.g.dataset_uri(self.annotations_source.value().rsplit("/", 1)[-1], group="annotations") with DatasetManager(src, out=None, dtype="uint16", fillvalue=0) as DM: src_dataset = DM.sources[0] anno_level = src_dataset[:] & 15 logger.debug(f"Obtained annotation level with labels {np.unique(anno_level)}") logger.debug(f"Making patches in path {src_dataset._path}") train_v_density = make_patches(wf, entity_arr, src_dataset._path, proposal_vol=(anno_level > 0)* 1.0, padding=self.entity_mask_bvol_size.value(), num_augs=0, max_vols=-1) self.patches = train_v_density cfg.ppw.clientEvent.emit( {"source": "panel_gui", "data": "view_patches", "patches_fullname": train_v_density} )
17,692
5,361
import sys import gzip import numpy as np if __name__ == "__main__": f_names = sys.argv[1:] max_value = 100000 bin_size = 50 threshold = 0.01 data = [] total_bins = (max_value/bin_size)+1 for no, f_name in enumerate(f_names): #prefix = f_name.split("/")[-1].replace(".txt.gz", "") d = np.zeros(total_bins) with gzip.open(f_name, "rb") as f: for line in f: row = line.strip("\r\n").split("\t") size, count = (int(row[0]), int(row[1])) if size < max_value: s = size/bin_size d[s] += count else: d[max_value/bin_size] += count d = d[::-1].cumsum() data.append(d) data = np.array(data) current_size = max_value for no, d in enumerate(data.T): p = d/d.sum() if np.all(abs(p-0.25)<=threshold): current_size = (total_bins-no)*bin_size else: break print "Orientation Size (+/-%s): %s" % (threshold, current_size) for no, d in enumerate(data.T): p = d/d.sum() print "\t".join(map(str, [(total_bins-no)*bin_size]+p.tolist()))
1,248
451
from tqdm import tqdm import os import glob import pickle import numpy as np from imageio import imread, imwrite import astimp from multiprocessing import Pool, cpu_count from functools import partial class ErrorInPreproc(Exception): pass class Dataset(): """Datasets consisting of several files in a given input_folder.""" def __init__(self, base_path, glob_patterns=('*.jpg', '*.JPG', '*.png', "*.PNG")): """base_path : path to the folder where the files are stored glob_patterns : a list of patterns for selecting files (e.g. ['*.jpg'])""" assert os.path.exists( base_path), "input folder '{}' not found".format(base_path) self.base_path = base_path self.paths = [] for pattern in glob_patterns: self.paths += glob.glob(os.path.join(base_path, pattern)) self.names = [os.path.basename(path).split('.')[0] for path in self.paths] class PreprocResults(): """Access to preprocessed pickled AST images""" def __init__(self, pickles_folder): if not os.path.exists(pickles_folder): raise FileNotFoundError("{} does not exit".format(pickles_folder)) self.pf = pickles_folder self.ds = Dataset(self.pf, glob_patterns=("*.pickle",)) self.names = self.ds.names errorlog_path = os.path.join(pickles_folder, "error_log.txt") if os.path.exists(errorlog_path): with open(errorlog_path, 'r') as f: lines = f.readlines() self.errors = {line.split(',')[0]: line.split(',')[ 1] for line in lines} else: self.errors = [] def get_by_name(self, name): """Load a pickle by name. Pickles have the same name than images example: 234_SLR_ESBL.jpg <-> 234_SLR_ESBL.jpg.pickle""" if name in self.errors and self.errors[name].split(" ") != 'INFO': raise ErrorInPreproc(self.errors[name].strip()) path = os.path.join(self.pf, name+'.pickle') if not os.path.exists(path): raise FileNotFoundError("Pickle {} not found.".format(path)) with open(path, 'rb') as f: p = pickle.load(f) return p def __getitem__(self, name): return self.get_by_name(name) def get_all(self): """Load all pickles in input folder""" output = [] for path in tqdm(self.ds.paths, desc="Loading pickles"): with open(path, 'rb') as f: p = pickle.load(f) output.append(p) return output def preprocess_one_image(path): img = np.array(imread(path)) # load image ast = astimp.AST(img) crop = ast.crop circles = ast.circles pellets = ast.pellets labels = ast.labels_text # create preprocessing object # NOTE the preprocessing object is not created it no pellets where found. preproc = ast.preproc if len(circles) != 0 else None pobj = {"ast":ast, "preproc": preproc, "circles": circles, "pellets": pellets, "labels": labels, "crop": crop, "fname": os.path.basename(path), "inhibitions": ast.inhibitions} return pobj def pickle_one_preproc(idx, output_path, image_paths, error_list, skip_existing=False, mute=True): if mute: log_function = lambda x : x else: log_function = tqdm.write path = image_paths[idx] try: # create output path fname = os.path.basename(path) # file name from path ofpath = os.path.join( output_path, f"{fname}.pickle") # output file path if skip_existing: # skip if output file exists already if os.path.exists(ofpath): return None # WARNING for an unknown reason the pickle call must be inside this function pobj = preprocess_one_image(path) with open(ofpath, 'wb') as f: pickle.dump(pobj, f) if len(pobj['circles']) == 0: # if no pellet found error_list[idx] = "INFO : {}, No pellets found".format(fname) log_function("No pellet found in {}".format(fname)) except Exception as e: ex_text = ', '.join(map(lambda x: str(x), e.args)) error_list[idx] = "{}, {}".format(fname, ex_text) log_function("Failed images: {} - {}".format(len(error_list), ex_text)) return None def preprocess(img_paths, output_path, skip_existing=False, parallel=True): """preprocess images and pickle the preproc object. img_paths : a list of paths of the image files.""" if not os.path.exists(output_path): os.mkdir(output_path) errors = [""]*len(img_paths) if parallel: jobs = cpu_count() print("Running in parallel on {} processes".format(jobs)) f = partial(pickle_one_preproc, image_paths=img_paths, output_path=output_path, error_list=errors, skip_existing=skip_existing ) with Pool(jobs) as p: list(tqdm(p.imap(f,range(len(img_paths))), total=len(img_paths))) errors = [e for e in errors if e != ""] else: for idx in tqdm(range(len(img_paths)), desc="Preprocessing"): pickle_one_preproc(idx, output_path, img_paths, errors, skip_existing, mute=False) return errors
5,459
1,713
from clustering_algorithms import CLARA, PAM, get_initial_points from data_loaders import load_data from timer import Timer from visualizers import plot_data # FILENAME = "datasets/artificial/sizes3.arff" FILENAME = "datasets/artificial/zelnik4.arff" # FILENAME = "datasets/artificial/xclara.arff" # FILENAME = "datasets/real-world/glass.arff" def run_clara(data, points): clara = CLARA(points, len(data["classes"]), labels=data["classes"]) clara.run() return clara.get_result_df() def run_pam(data, points): pam = PAM(points, len(data["classes"]), labels=data["classes"]) pam.run() return pam.get_result_df() if __name__ == "__main__": data = load_data(FILENAME) # plot_data(data["df"], data["classes"], data["class_column"]) points = get_initial_points(data["df"], data["coordinates_columns"]) # result = run_clara(data, points) result = run_pam(data, points) plot_data( result, data["classes"], "cluster", attributes_names=data["coordinates_columns"] )
1,024
356
from module import support from module import fibo import sys support.print_func("Runoob") fibo.fib(1000) print(fibo.fib2(100)) print(fibo.__name__) # 把模块中的一个函数赋给一个本地的名称 fib = fibo.fib fib(10) """ from…import 语句 Python的from语句让你从模块中导入一个指定的部分到当前命名空间中,语法如下: from modname import name1[, name2[, ... nameN]] 例如,要导入模块 fibo 的 fib 函数,使用如下语句: >>> from fibo import fib, fib2 >>> fib(500) 1 1 2 3 5 8 13 21 34 55 89 144 233 377 这个声明不会把整个fibo模块导入到当前的命名空间中,它只会将fibo里的fib函数引入进来。 From…import* 语句 把一个模块的所有内容全都导入到当前的命名空间也是可行的,只需使用如下声明: from modname import * 这提供了一个简单的方法来导入一个模块中的所有项目。然而这种声明不该被过多地使用。 """ """ __name__属性 一个模块被另一个程序第一次引入时,其主程序将运行。如果我们想在模块被引入时,模块中的某一程序块不执行,我们可以用__name__属性来使该程序块仅在该模块自身运行时执行。 #!/usr/bin/python3 # Filename: using_name.py if __name__ == '__main__': print('程序自身在运行') else: print('我来自另一模块') 运行输出如下: $ python using_name.py 程序自身在运行 $ python >>> import using_name 我来自另一模块 >>> 说明: 每个模块都有一个__name__属性,当其值是'__main__'时,表明该模块自身在运行,否则是被引入。 """ """ 内置的函数 dir() 可以找到模块内定义的所有名称。以一个字符串列表的形式返回: """ print(dir(fibo)) print(dir(sys)) # 如果没有给定参数,那么 dir() 函数会罗列出当前定义的所有名称 print(dir())
1,143
828
import torch from einops import rearrange import svgwrite ########################################### # Normalization / Standardization functions ########################################### def normalize_functional(tensor: torch.Tensor, mean: list, std: list): """ Standardizes tensor in the channel dimension (dim -3) using mean and std. [... C H W] -> [... C H W] """ mean = torch.tensor(mean).view(-1, 1, 1).to(tensor.device) std = torch.tensor(std).view(-1, 1, 1).to(tensor.device) return (tensor-mean)/std def unnormalize_functional(tensor: torch.Tensor, mean: list, std: list): """ Un-standardizes tensor in the channel dimension (dim -3) using mean and std. Also clips the tensor to be in the range [0, 1]. [... C H W] -> [... C H W] """ mean = torch.tensor(mean).view(-1, 1, 1).to(tensor.device) std = torch.tensor(std).view(-1, 1, 1).to(tensor.device) return ((tensor*std)+mean).clamp(0, 1) def unnormalize_to(x, x_min, x_max): """ Linear normalization of x to [x_min, x_max]. In other words maps x.min() -> x_min and x.max() -> x_max. """ return x * (x_max - x_min) + x_min ############################ # Image convertion functions ############################ def rgba_to_rgb(rgba: torch.Tensor): """ Converts tensor from 3 channels into 4. Multiplies first 3 channels with the last channel. [... 4 H W] -> [... 3 H W] """ return rgba[..., :-1, :, :] * rgba[..., -1:, :, :] def rgb_to_rgba(rgb: torch.Tensor, fill: float = 1.0): """ Converts tensor from 4 channels into 3. Alpha layer will be filled with 1 by default, but can also be specified. [... 3 H W] -> [... 4 H W] """ alpha_channel = torch.full_like(rgb[..., :1, :, :], fill_value=fill) return torch.concat([rgb, alpha_channel], dim=-3) ########################################### # Alpha compositing/decompositing functions ########################################### def alpha_composite(base, added, eps=1e-8): """ Composite two tensors, i.e., layers `added` on top of `base`, where the last channel is assumed to be an alpha channel. [... C H W], [... C H W] -> [... C H W] """ # Separate color and alpha alpha_b = base[..., -1:, :, :] alpha_a = added[..., -1:, :, :] color_b = base[..., :-1, :, :] color_a = added[..., :-1, :, :] # https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending alpha_0 = (1 - alpha_a) * alpha_b + alpha_a color_0 = ((1-alpha_a) * alpha_b*color_b + alpha_a*color_a) / (alpha_0 + eps) # Re-combine new color and alpha return torch.concat([color_0, alpha_0], dim=-3) def alpha_composite_multiple(images_tensor): """ Composite tensor of N images into a single image. Assumes last channel is an alpha channel. [... N C H W] -> [... C H W] """ image_iterator = rearrange(images_tensor, "... N C H W -> N ... C H W") # Get first image compositioned_image = image_iterator[0] # Add the rest of the images for image in image_iterator[1:]: # TODO: Possibly need to add .copy() to prevent assignment error in autograd. compositioned_image = alpha_composite(compositioned_image, image) return compositioned_image def get_visible_mask(shapes): """ Inputs a set of rendered images where C > 1 and the last channel is an alpha channel. Assuming that images were to be compositioned first to last (N=0, 1, 2...), returns a mask for each image that show what pixels of that image is visible in the final composition. [... N C H W] -> [... N H W] """ shape_iterator = rearrange(shapes, "... N C H W -> N ... C H W").flip(0) accumulated_alpha = torch.zeros_like(shape_iterator[0,..., 0, :, :]) # empty like first image, single channel shape_maks = torch.zeros_like(shape_iterator[..., 0, :, :]) # empty image for each shape layer for i, shape in enumerate(shape_iterator): # a over b alpha compositioning # alpha_0 = (1 - alpha_a) * alpha_b + alpha_a # get b # alpha_b = (alpha_0 - alpha_a) / (1 - alpha_a) shape_alpha = shape[..., -1, :, :] alpha_visible = shape_alpha - accumulated_alpha * shape_alpha shape_maks[i] = alpha_visible accumulated_alpha = (1 - shape_alpha) * accumulated_alpha + shape_alpha return rearrange(shape_maks.flip(0), "N ... H W -> ... N H W").unsqueeze(-3)
4,459
1,510
import cv2 import numpy as np from skimage import draw from skimage import io # Read image im_in = cv2.imread("analyses/MDA231_stopper_1_c3.tif", cv2.IMREAD_GRAYSCALE); # Threshold. # Set values equal to or above 220 to 0. # Set values below 220 to 255. th, im_th = cv2.threshold(im_in, 20, 255, cv2.THRESH_BINARY_INV); # Copy the thresholded image. im_floodfill = im_th.copy() # Mask used to flood filling. # Notice the size needs to be 2 pixels than the image. h, w = im_th.shape[:2] mask = np.zeros((h+2, w+2), np.uint8) # Floodfill from point (0, 0) cv2.floodFill(im_floodfill, mask, (0,0), 255); # Invert floodfilled image im_floodfill_inv = cv2.bitwise_not(im_floodfill) # Combine the two images to get the foreground. im_out = im_th | im_floodfill_inv io.imsave(fname='temp_output.png', arr=im_out) # im_out_inv = cv2.bitwise_not(im_out) # dilate the mask: k_size = 2 k_half = k_size/2 kernel = np.ones((k_size,k_size),np.uint8) coords = draw.circle(k_half, k_half, k_half, shape=im_th.shape) kernel[coords] = 1 erosion = cv2.erode(im_out,kernel,iterations = 1) dilation = cv2.dilate(cv2.bitwise_not(erosion),kernel,iterations = 1) dilation = cv2.bitwise_not(dilation) # io.imshow(dilation) io.imsave(fname='mask.png', arr=dilation) # Display images. # io.imsave(fname='mask.png', arr=im_out) # # mostly from http://nickc1.github.io/python,/matlab/2016/05/17/Standard-Deviation-(Filters)-in-Matlab-and-Python.html # import cv2 # from skimage import draw # from skimage import io # filename = 'analyses/MDA231_stopper_1_c3.tif' # plate = io.imread(filename,as_grey=True) # image = plate # #io.imshow(image) # # io.imsave(fname='temp_output.png', arr=image) # import numpy as np # # img = cv2.imread('....') # Read in the image # sobelx = cv2.Sobel(image,cv2.CV_64F,1,0) # Find x and y gradients # sobely = cv2.Sobel(image,cv2.CV_64F,0,1) # # Find magnitude and angle # I2 = np.sqrt(sobelx**2.0 + sobely**2.0) # # angle = np.arctan2(sobely, sobelx) * (180 / np.pi) # # io.imshow(I2) # # io.imsave(fname='temp_output.png', arr=I2) # from scipy.ndimage.filters import uniform_filter # import numpy as np # def window_stdev(X, window_size): # c1 = uniform_filter(X, window_size, mode='reflect') # c2 = uniform_filter(X*X, window_size, mode='reflect') # return np.sqrt(c2 - c1*c1) # # x = np.arange(16).reshape(4,4).astype('float') # kernel_size = 3 # I1 = window_stdev(I2,kernel_size)*np.sqrt(kernel_size**2/(kernel_size**2 - 1)) # # io.imshow(I1) # # io.imsave(fname='temp_output.png', arr=I1) # from scipy.signal import medfilt2d # I1 = medfilt2d(I1, kernel_size=3) # # io.imshow(I1) # # io.imsave(fname='temp_output.png', arr=I1) # import numpy as np # from skimage.morphology import reconstruction # from skimage.exposure import rescale_intensity # # image = rescale_intensity(I1, in_range=(50, 200)) # image = I1 # seed = np.copy(image) # seed[1:-1, 1:-1] = image.max() # mask = image # filled = reconstruction(seed, mask, method='erosion') # io.imsave(fname='temp_output.png', arr=filled) # # kernel = np.zeros((80,80),np.uint8) # # coords = draw.circle(40, 40, 40, shape=image.shape) # # kernel[coords] = 1 # # erosion = cv2.erode(I1,kernel,iterations = 1) # # # io.imshow(erosion) # # # # kernel = np.ones((40,40),np.uint8) # # # # erosion = cv2.erode(I1,kernel,iterations = 1) # # # # io.imshow(erosion) # # # io.imsave(fname='temp_output.png', arr=erosion) # # from skimage.morphology import reconstruction # # fill = reconstruction(I1, erosion, method='erosion') # # # io.imshow(fill) # # # io.imsave(fname='temp_output.png', arr=fill) # # dilation = cv2.dilate(fill,kernel,iterations = 1) # # # io.imshow(dilation) # # io.imsave(fname='temp_output.png', arr=dilation)
3,742
1,649
from bxcommon.test_utils.abstract_test_case import AbstractTestCase from bxcommon.messages.bloxroute.txs_message import TxsMessage from bxcommon.models.transaction_info import TransactionInfo from bxcommon.test_utils import helpers from bxcommon.utils.object_hash import Sha256Hash class TxsMessageTests(AbstractTestCase): def test_txs_with_short_ids_message(self): txs_info = [ TransactionInfo(Sha256Hash(helpers.generate_bytearray(32)), helpers.generate_bytearray(200), 111), TransactionInfo(Sha256Hash(helpers.generate_bytearray(32)), helpers.generate_bytearray(300), 222), TransactionInfo(Sha256Hash(helpers.generate_bytearray(32)), helpers.generate_bytearray(400), 333) ] msg = TxsMessage(txs=txs_info) msg_bytes = msg.rawbytes() self.assertTrue(msg_bytes) parsed_msg = TxsMessage(buf=msg_bytes) self.assertTrue(parsed_msg) parsed_txs_info = parsed_msg.get_txs() self.assertEqual(len(parsed_txs_info), len(txs_info)) for index in range(len(txs_info)): self.assertEqual(parsed_txs_info[index].short_id, txs_info[index].short_id) self.assertEqual(parsed_txs_info[index].contents, txs_info[index].contents) self.assertEqual(parsed_txs_info[index].hash, txs_info[index].hash)
1,344
469
lanche = ('Hambúrguer', 'Suco', 'Pizza', 'Pudim', 'Batata Frita') # Tuplas são imutáveis # lanche[1] = 'Refrigerante' - Esse comando não vai funcionar print(len(lanche)) print(sorted(lanche)) print(lanche) print(lanche[-3:]) for comida in lanche: print(f'Eu vou comer {comida}') for cont in range(0, len(lanche)): print(f'Eu vou comer {lanche[cont]} na posição {cont}') for pos, comida in enumerate(lanche): print(f'Eu Vou comer {comida} na posição {pos}') print('Comi pra caramba!') a = (2, 5, 4) b = (5, 8, 1, 2) c = b + a print(c) print(c.index(5, 1)) print(f'o tamanho de "c" é {len(c)}') print(f'Tem {c.count(5)} números 5') pessoa = ('Gustavo', 39, 'M', 99.88) del(pessoa) print(pessoa)
712
333
import yaml import os config_file = os.path.join(os.path.dirname(__file__), "config/config.yml") with open(config_file, 'r') as stream: CONFIG = yaml.load(stream)
167
61
import xml.etree.ElementTree as ET tree = ET.parse('/Users/zhaoli/workspace/splunk/playground/var/lib/jenkins/jobs/Splunk/jobs/develop/jobs/platform/jobs/cli/jobs/trigger_cli_linux/config.xml') root = tree.getroot() # SPs = root.findall("properties/hudson.model.ParametersDefinitionProperty/parameterDefinitions/[hudson.model.StringParameterDefinition]") SPs = root.findall("properties/hudson.model.ParametersDefinitionProperty/parameterDefinitions/hudson.model.StringParameterDefinition/[name='branch']") print "***" print dir(SPs) print "***" for s in SPs: print "-----" # print s.tag, ":", s.text ET.dump(s) spd = ET.Element("hudson.model.StringParameterDefinition") name = ET.SubElement(spd, 'name') name.text="version" description=ET.SubElement(spd, 'description') description.text="The product version" defaultValue=ET.SubElement(spd, 'defaultValue') defaultValue.text="" ET.dump(spd) tree.
921
328
import random def format_fasta(title, sequence): """ This formats a fasta sequence Input: title - String - Title of the sequence sequence - String - Actual sequence Output: String - Fully formatted fasta sequence """ fasta_width = 70 # Number of characters in one line n_lines = 1 + len(sequence) // fasta_width # Number of lines lines = [ sequence[i*fasta_width: (i+1)*fasta_width] for i in range(n_lines)] lines = "\n".join(lines) formatted = f"> {title}\n{lines}\n\n" return formatted bases = "actg" # Bases for our randon protein # Writing random sequences in a file with open("random_sequences.fa", "w") as f: for length in range(1, 25): # Sequences of different lengths for run in range(10): # Trying several times title = f"length_{length} run_{run}" sequence = "".join(random.choices(bases, k=length)) f.write(format_fasta(title, sequence))
986
307
import sublime import os class OutputPanel: def __init__( self, name, file_regex='', line_regex='', base_dir=None, word_wrap=False, line_numbers=False, gutter=False, scroll_past_end=False, syntax='Packages/Text/Plain text.tmLanguage' ): self.name = name self.window = sublime.active_window() self.output_view = self.window.get_output_panel(name) # default to the current file directory if not base_dir and self.window.active_view() and \ self.window.active_view().file_name(): base_dir = os.path.dirname(self.window.active_view().file_name()) settings = self.output_view.settings() settings.set("result_file_regex", file_regex) settings.set("result_line_regex", line_regex) settings.set("result_base_dir", base_dir) settings.set("word_wrap", word_wrap) settings.set("line_numbers", line_numbers) settings.set("gutter", gutter) settings.set("scroll_past_end", scroll_past_end) settings.set("syntax", syntax) self.closed = False def write(self, s): self.output_view.set_read_only(False) self.output_view.run_command('append', {'characters': s}), self.output_view.set_read_only(True) self.output_view.show(self.output_view.size()) def writeln(self, s): self.write(s + "\n") def flush(self): pass def show(self): self.window.run_command("show_panel", {"panel": "output." + self.name}) def close(self): self.closed = True pass
1,604
499
from flask import Flask, render_template app = Flask(__name__) app.config['DEBUG'] = True # Note: We don't need to call run() since our application is embedded within # the App Engine WSGI application server. @app.route('/') def hello(name=None): """Return a friendly HTTP greeting.""" return render_template('template.html', name=name, text="Jinja Flask") # return render_template('bootstrap_cover.html', name=name) # @app.route('/rates') # def helloRates(name='rates'): # return render_template('template.html',name=name) @app.errorhandler(404) def page_not_found(e): """Return a custom 404 error.""" return 'Sorry, nothing at this URL.', 404
673
220
from unittest.mock import Mock, patch import pytest import patterns.echo_server_contextvar as main @patch.object(main, "client_addr_var", Mock()) def test_render_goodbye(capsys): # Call 'render_goodbye' goodbye_string = main.render_goodbye() print(goodbye_string) # Assert. out, err = capsys.readouterr() assert err == "" assert "Good bye, client @" in out @pytest.mark.asyncio @patch("patterns.echo_server_contextvar.asyncio.start_server", autospec=True) @patch("patterns.echo_server_contextvar.asyncio.sleep", autospec=True) async def test_server(mock_asyncio_sleep, mock_asyncio_start_server): stop_after = 5 # Call 'server()'. await main.server(stop_after=stop_after) # Assert. assert mock_asyncio_sleep.call_count == stop_after args = main.handle_request, "127.0.0.1", 8081 mock_asyncio_start_server.assert_called_once_with(*args)
903
328
#!/usr/bin/env python # -*- coding: utf-8 -*- # Autor: rique_dev (rique_dev@hotmail.com) from SSLProxies24.Feed import Feed from SSLProxies24.Check import CheckProxy import time import gc # Recupera a listagem prx = Feed().PROXY_LIST # Inicia classe chk = CheckProxy() # Começa validação chk.validatelist(prx) # Ativa garbage gc.enable() time.sleep(30) # Contagem print('Sucesso: '+str(chk.getsucesscount())) print('Falhas: '+str(chk.getfailcount())) print('Total de Proxys: '+str(chk.getproxycount())) print('Restam: '+str(chk.getproxycount()-(chk.getsucesscount()+chk.getfailcount()))) # Lista de Proxys print(chk.getproxylist()) del prx del chk print('Classes eliminadas.') exit(0)
693
273
import math as m def yakobi(a, n, k): if a < 0: k *= pow(-1, (n - 1) // 2) yakobi(-a, n, k) if a % 2 == 0: k *= (-1) ** ((pow(n, 2) - 1) / 8) yakobi(a / 2, n, k) if a == 1: return k if a < n: k *= pow(-1, ((n - 1)(a - 1)) / 4) yakobi(n % a, a, k) def euler_test(p, x): if pow(x, (p - 1) / 2) % p == yakobi(x, p, k = 1): return bool(True) elif pow(x, (p - 1) / 2) % p - p == yakobi(x, p, k = 1): return bool(True) else: return bool(False)
577
275
## ========================================================================= ## ## Copyright (c) 2019 Agustin Durand Diaz. ## ## This code is licensed under the MIT license. ## ## hud_b2d.py ## ## ========================================================================= ## from core.hud_base import HudBase from enums import ScreenType, SimulationType from core.utils import getPathWithoutExtension, existsFile, getImageSize import settings class HudB2D(HudBase): def __init__(self, width, height): HudBase.__init__(self, width, height) def init(self): self.showFPS() self.addLabel((80, 30), (150, 30), 'Box2D') self.addButton((725, 40), (100, 50), 'Back', self.gotoMetamap) def gotoMetamap(self): self.m_manager.gotoScreen(ScreenType.META_MAP) class HudB2DNEATDIP(HudB2D): def __init__(self, width, height, params): self.params = params HudB2D.__init__(self, width, height) def init(self): self.showFPS() self.addLabel((75, 15), (150, 30), 'NEAT DIP') if 'isTraining' in self.params and self.params['isTraining']: self.addLabel((75, 45), (150, 30), str(self.params['currentStep']) + "/" + str(settings.NEAT_DIP_EVOLVING_STEPS)) else: imgPath = self.params['genomePath'] imgPath = getPathWithoutExtension(imgPath) + '.png' if existsFile(imgPath): size = getImageSize(imgPath) w, h = size if size[0] > 450: w = 450 if size[1] > 450: h = 450 self.addImage(((w/2) + 30, (h/2) + 30), (w, h), imgPath) self.addButton((770, 15), (60, 30), 'Back', self.gotoMetamap, alpha = 200) self.addButton((670, 15), (60, 30), 'Reset', self.resetDIP, alpha = 200) def resetDIP(self): self.m_manager.gotoScreen(ScreenType.SIMULATION, {'simulationType': SimulationType.NEAT_B2D_DIP}) class HudB2DNEATTIP(HudB2D): def __init__(self, width, height, params): self.params = params HudB2D.__init__(self, width, height) def init(self): self.showFPS() self.addLabel((75, 15), (150, 30), 'NEAT TIP') if 'isTraining' in self.params and self.params['isTraining']: self.addLabel((75, 45), (150, 30), str(self.params['currentStep']) + "/" + str(settings.NEAT_TIP_EVOLVING_STEPS)) else: imgPath = 'net_neat_tip.png' if existsFile(imgPath): size = getImageSize(imgPath) self.addImage(((size[0]/2) + 30, (size[1]/2) + 30), size, imgPath) self.addButton((770, 15), (60, 30), 'Back', self.gotoMetamap, alpha = 200) class HudB2DNEATWalker(HudB2D): def __init__(self, width, height, params): self.params = params HudB2D.__init__(self, width, height) def init(self): self.showFPS() self.addLabel((75, 15), (150, 30), 'NEAT Walker') if 'isTraining' in self.params and self.params['isTraining']: self.addLabel((75, 45), (150, 30), str(self.params['currentStep']) + "/" + str(settings.NEAT_WALKER_EVOLVING_STEPS)) else: imgPath = 'net_neat_walker.png' if existsFile(imgPath): size = getImageSize(imgPath) self.addImage(((size[0]/2) + 30, (size[1]/2) + 30), size, imgPath) self.addButton((770, 15), (60, 30), 'Back', self.gotoMetamap, alpha = 200)
3,787
1,369
def get_final_txt(grb, tables, sentences, output_path): """ Combine the data from [grb]_final_sentences.txt and [grb]_final_tables.txt. If a piece of data in tables and another piece in sentecnes are originially from the same GCN. Put them in the same GCN in [grb]_final.txt. """ # Avoid modifying the data for the later use. tables = tables.copy() sentences = sentences.copy() # Open up the file. file = open(f"{output_path}{grb}/{grb}_final.txt", 'w') # Loop through the sentences and for each sentence, check if there is any table # that are originially from the same GCN. for sentence in sentences: # The number of the GCN. num = sentence['number'] # The final string that we dumps into the text file. result = "=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=\n\n" result += f"GCN Number: {sentence['number']}\n\n" result += f"SENTENCE DATA:\n\n{sentence['sentences']}\n\n" # The variable to help check how many tables are from the same GCN. table_with_the_same_number = 0 # Loop through the tables to see if there are any tables in the same GCN. for idx, table in enumerate(tables): # If we find any tables in the same GCN. if table['number'] == num: if table_with_the_same_number == 0: result += "TABLE DATA:\n\n" table_with_the_same_number += 1 result += '\n'.join(table['table']) + '\n\n' tables.pop(idx) file.write(result) # Write the remaining tables to the text file. for table in tables: result = "=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=\n\n" result += f"GCN Number: {table['number']}\n" result += "TABLE DATA:\n\n" + '\n'.join(table['table']) + '\n\n' file.write(result)
1,958
625
from solid import * from solid.utils import * import util from util import * from math import pi def headband(r1=64.0, r2=85.0, t=3.0, w=12.0): combe = right(r1-t/2)(linear_extrude(1)(square([1,1], center=True) + left(0.5)(circle(d=1)))) combe_spacing = 3.0 # mm combe_count = pi*r1/combe_spacing combes = union()(*[ rotate([0,0, i*180.0/combe_count])(combe) for i in range(-int(combe_count/2), int(combe_count/2))]) def arcshell(r, ends): start, end = ends return (arc(rad=r+t/6, start_degrees = start, end_degrees=end) - arc(rad=r-t/6, start_degrees = start, end_degrees=end)) return (linear_extrude(w)( offset(r=t/3)( arcshell(r1, (-90, 90)) + forward(r2 - r1)(arcshell(r2, (-130, -90))) + back(r2 - r1)(arcshell(r2, (90, 130))))) + combes) def export_scad(): util.save('headband', headband()) if __name__ == '__main__': export_scad()
972
411
from django.apps import AppConfig class NativeShortuuidConfig(AppConfig): name = 'native_shortuuid'
106
31
r"""General solver of the 1D meridional advection-diffusion equation on the sphere: .. math:: \frac{\partial}{\partial t} \psi(\phi,t) &= -\frac{1}{a \cos\phi} \frac{\partial}{\partial \phi} \left[ \cos\phi ~ F(\phi,t) \right] \\ F &= U(\phi) \psi(\phi) -\frac{K(\phi)}{a} ~ \frac{\partial \psi}{\partial \phi} for a state variable :math:`\psi(\phi,t)`, arbitrary diffusivity :math:`K(\phi)` in units of :math:`x^2 ~ t^{-1}`, and advecting velocity :math:`U(\phi)`. :math:`\phi` is latitude and :math:`a` is the Earth's radius (in meters). :math:`K` and :math:`U` can be scalars, or optionally vector *specified at grid cell boundaries* (so their lengths must be exactly 1 greater than the length of :math:`\phi`). :math:`K` and :math:`U` can be modified by the user at any time (e.g., after each timestep, if they depend on other state variables). A fully implicit timestep is used for computational efficiency. Thus the computed tendency :math:`\frac{\partial \psi}{\partial t}` will depend on the timestep. In addition to the tendency over the implicit timestep, the solver also calculates several diagnostics from the updated state: - ``diffusive_flux`` given by :math:`-\frac{K(\phi)}{a} ~ \frac{\partial \psi}{\partial \phi}` in units of :math:`[\psi]~[x]`/s - ``advective_flux`` given by :math:`U(\phi) \psi(\phi)` (same units) - ``total_flux``, the sum of advective, diffusive and prescribed fluxes - ``flux_convergence`` (or instantanous scalar tendency) given by the right hand side of the first equation above, in units of :math:`[\psi]`/s Non-uniform grid spacing is supported. The state variable :math:`\psi` may be multi-dimensional, but the diffusion will operate along the latitude dimension only. """ from __future__ import division import numpy as np from .advection_diffusion import AdvectionDiffusion, Diffusion from climlab import constants as const class MeridionalAdvectionDiffusion(AdvectionDiffusion): """A parent class for meridional advection-diffusion processes. """ def __init__(self, K=0., U=0., use_banded_solver=False, prescribed_flux=0., **kwargs): super(MeridionalAdvectionDiffusion, self).__init__(K=K, U=U, diffusion_axis='lat', use_banded_solver=use_banded_solver, **kwargs) # Conversion of delta from degrees (grid units) to physical length units phi_stag = np.deg2rad(self.lat_bounds) phi = np.deg2rad(self.lat) self._Xcenter[...,:] = phi*const.a self._Xbounds[...,:] = phi_stag*const.a self._weight_bounds[...,:] = np.cos(phi_stag) self._weight_center[...,:] = np.cos(phi) # Now properly compute the weighted advection-diffusion matrix self.prescribed_flux = prescribed_flux self.K = K self.U = U class MeridionalDiffusion(MeridionalAdvectionDiffusion): """A parent class for meridional diffusion-only processes, with advection set to zero. Otherwise identical to the parent class. """ def __init__(self, K=0., use_banded_solver=False, prescribed_flux=0., **kwargs): # Just initialize the AdvectionDiffusion class with U=0 super(MeridionalDiffusion, self).__init__( U=0., K=K, prescribed_flux=prescribed_flux, use_banded_solver=use_banded_solver, **kwargs)
3,482
1,080
# Tencent is pleased to support the open source community by making GNES available. # # Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import grpc from .base import BaseService as BS, MessageHandler from ..helper import PathImporter from ..proto import gnes_pb2 class GRPCService(BS): handler = MessageHandler(BS.handler) def post_init(self): self.channel = grpc.insecure_channel( '%s:%s' % (self.args.grpc_host, self.args.grpc_port), options=[('grpc.max_send_message_length', self.args.max_message_size), ('grpc.max_receive_message_length', self.args.max_message_size)]) m = PathImporter.add_modules(self.args.pb2_path, self.args.pb2_grpc_path) # build stub self.stub = getattr(m, self.args.stub_name)(self.channel) def close(self): self.channel.close() super().close() @handler.register(NotImplementedError) def _handler_default(self, msg: 'gnes_pb2.Message'): yield getattr(self.stub, self.args.api_name)(msg)
1,624
519
import uuid from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.db import models from django.utils import timezone from django.utils.timesince import timesince from django.utils.translation import gettext_lazy as _ from dhost.dapps.models import Dapp def get_obj_model(obj): return ContentType.objects.get_for_model(obj) class DappLogManager(models.Manager): def log_action(self, obj, dapp, action_flag, user=None): return self.create( user=user, content_type=get_obj_model(obj), object_id=obj.pk, action_flag=action_flag, dapp=dapp, ) class ActionFlags(models.TextChoices): OTHER = "other", _("Other") DAPP_ADDITION = "dapp_add", _("Dapp created") DAPP_CHANGE = "dapp_change", _("Dapp updated") BUNDLE_ADDITION = "bundle_add", _("Bundle added") BUNDLE_DELETION = "bundle_del", _("Bundle removed") AUTO_DEPLOY_START = "auto_deploy_start", _("Auto deployment started") DEPLOY_START = "deploy_start", _("Deployment started") DEPLOY_SUCCESS = "deploy_success", _("Deployment successful") DEPLOY_FAIL = "deploy_fail", _("Deployment failed") BUILD_OPTIONS_ADDITION = "build_opt_add", _("Build options created") BUILD_OPTIONS_CHANGE = "build_opt_change", _("Build options updated") BUILD_OPTIONS_DELETION = "build_opt_del", _("Build options removed") AUTO_BUILD_START = "auto_build_start", _("Auto build started") BUILD_START = "build_start", _("Build started") BUILD_SUCCESS = "build_success", _("Build successful") BUILD_FAIL = "build_fail", _("Build failed") ENV_VAR_ADDITION = "env_var_add", _("New environment variable") ENV_VAR_CHANGE = "env_var_change", _("Environment variable updated") ENV_VAR_DELETION = "env_var_del", _("Environment variable removed") GITHUB_OPTIONS_ADDITION = "github_opt_add", _("Github options created") GITHUB_OPTIONS_CHANGE = "github_opt_change", _("Github options changed") GITHUB_OPTIONS_DELETION = "github_opt_del", _("Github options removed") class DappLog(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) user = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, blank=True, ) dapp = models.ForeignKey( Dapp, on_delete=models.CASCADE, related_name="logs", related_query_name="logs", ) content_type = models.ForeignKey( ContentType, on_delete=models.SET_NULL, null=True, blank=True, ) object_id = models.TextField(null=True, blank=True) action_flag = models.CharField( max_length=20, choices=ActionFlags.choices, default=ActionFlags.OTHER, ) change_message = models.TextField(blank=True) action_time = models.DateTimeField(default=timezone.now, editable=False) objects = DappLogManager() class Meta: verbose_name = _("Dapp log entry") verbose_name_plural = _("Dapp log entries") ordering = ["-action_time"] def __str__(self): data = { "user": self.user, "dapp": self.dapp, "action_flag": self.action_flag, "timesince": self.timesince(), } return "{user} {dapp} {action_flag} {timesince} ago".format(**data) def timesince(self, now=None): return timesince(self.action_time, now)
3,508
1,140
data = open("input.txt", "r").readlines() polymer = data[0] pair_insertion = {} for line in data[2:]: [token, replacement] = line.strip().split(" -> ") pair_insertion[token] = replacement result = [i for i in polymer.strip()] for step in range(0, 10): next = [] for i, si in enumerate(result): if i < len(result)-1: next.append(si) next.append(pair_insertion[result[i]+result[i+1]]) else: next.append(si) result = next count = [result.count(a) for a in set(pair_insertion.values())] print("The answer of part 1 is", max(count) - min(count))
620
217
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('raster', '0005_auto_20141014_0955'), ] operations = [ migrations.AddField( model_name='rastertile', name='tilex', field=models.IntegerField(null=True, db_index=True), preserve_default=True, ), migrations.AddField( model_name='rastertile', name='tiley', field=models.IntegerField(null=True, db_index=True), preserve_default=True, ), migrations.AddField( model_name='rastertile', name='tilez', field=models.IntegerField(db_index=True, null=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18)]), preserve_default=True, ), migrations.AlterField( model_name='rastertile', name='level', field=models.IntegerField(null=True, db_index=True), ), ]
1,199
442
''' Tests for utils submodule of the analysis module. ''' from nose.tools import assert_equal, assert_raises from coral import analysis, DNA, RNA, Peptide def test_utils(): test_DNA = DNA('ATAGCGATACGAT') test_RNA = RNA('AUGCGAUAGCGAU') test_peptide = Peptide('msvkkkpvqg') test_str = 'msvkkkpvgq' assert_equal(analysis.utils.sequence_type(test_DNA), 'dna') assert_equal(analysis.utils.sequence_type(test_RNA), 'rna') assert_equal(analysis.utils.sequence_type(test_peptide), 'peptide') assert_raises(Exception, analysis.utils.sequence_type, test_str)
588
222
""" Carry out template-based replacements in project files """ import os import sys from string import Template def replace_name(path, mapping): """ Handles replacement strings in the file or directory name """ # look for replacement strings in filename f_split = list(os.path.split(path)) name = f_split[1] if '${' in name: new_name = Template(name).substitute(mapping) new_path = os.path.join(f_split[0], new_name) os.rename(path, new_path) else: new_path = path return new_path def replace_ctnt(f, mapping): """ Handles replacement strings in the file content """ if not os.path.isfile(f): return try: # look for replacement strings in file t_file = open(f, 'r+') t = Template(t_file.read()) t_file.seek(0) t_file.write(t.substitute(mapping)) t_file.truncate() except Exception as e: sys.stderr.write(""" ERROR: while running template engine on file %s """ % f) raise e finally: t_file.close() def process(path, mapping): """ Performs all templating operations on the given path """ replace_ctnt(replace_name(path, mapping), mapping) def process_tree(directory, mapping): """ Performs all templating operations on the directory and its children """ directory = replace_name(directory, mapping) for dirpath, dirnames, filenames in os.walk(directory): for f in filenames: process(os.path.join(dirpath, f), mapping) for d in dirnames: dirnames.remove(d) dirnames.append(replace_name(os.path.join(dirpath, d), mapping))
1,765
580
import hashlib import os # 生成字符串的MD5值 def str2md5(content=None): if not content: return '' md5gen = hashlib.md5() md5gen.update(content.encode()) return md5gen.hexdigest() # 生成字符串的SHA256值 def str2sha256(content=None): if not content: return '' sha256gen = hashlib.sha256() sha256gen.update(content.encode()) return sha256gen.hexdigest() # 生成文件的MD5值 def file2md5(filename): hash_value = '' if os.path.exists(filename): try: md5obj = hashlib.md5() with open(filename, 'rb') as f: md5obj.update(f.read()) hash_value = md5obj.hexdigest() except Exception as e: print(e) return hash_value def file2sha256(filename): hash_value = '' if os.path.exists(filename): try: sha256obj = hashlib.sha256() with open(filename, "rb") as f: sha256obj.update(f.read()) hash_value = sha256obj.hexdigest() except Exception as e: print(e) return hash_value def file2sha1(filename): hash_value = '' if os.path.exists(filename): try: sha1obj = hashlib.sha1() with open(filename, 'rb') as f: sha1obj.update(f.read()) hash_value = sha1obj.hexdigest() except Exception as e: print(e) return hash_value def file2sha3(filename): hash_value = '' if os.path.exists(filename): try: sha3obj = hashlib.sha3_384() with open(filename, 'rb') as f: sha3obj.update(f.read()) hash_value = sha3obj.hexdigest() except Exception as e: print(e) return hash_value
1,747
610
import os import sys from dotenv import load_dotenv from facebook_scraper import get_posts load_dotenv() print ("hi") result = [] for post in get_posts(group=os.environ.get("FacebookGroupId"), pages=1, credentials=(os.environ.get("FacebookUser"), os.environ.get("FacebookPassword"))): result.append({ "post_id": post["post_id"], "text": post["text"], "user_id": post["user_id"], "username": post["username"], "time": post["time"] }) print ({ "post_id": post['post_id'] }) # print (post) print (result) sys.stdout.flush()
609
199
import os #github login SITE = 'https://api.github.com' CALLBACK = 'https://oneliner.sh/oauth2' AUTHORIZE_URL = 'https://github.com/login/oauth/authorize' TOKEN_URL = 'https://github.com/login/oauth/access_token' SCOPE = 'user' #redis config REDIS_HOST = os.environ['REDIS_HOST'] #REDIS_HOST = 'localhost' REDIS_PORT = 6379 REDIS_DB = 0 DATA_DIR = 'oneliners' DEBUG = True #app SUBMISSION_PATH = 'incoming'
442
175
import json from npt import log from . import tmpdir def read_geojson(filename): """ Return JSON object from GeoJSON """ with open(filename, 'r') as fp: js = json.load(fp) return js
214
72
""" Prueba creacion de usuarios """ # import json from typing import Any, Dict import pytest from django.contrib.auth import get_user_model from apps.user.serializers import UserHeavySerializer # from django.contrib.auth.models import User User = get_user_model() pytestmark = [pytest.mark.django_db, pytest.mark.users_views] @pytest.mark.users_crud def test_create_user(admin_client): """ ... """ data: Dict[str, Any] = { "username": "NEW", "email": "newemail@gmail.com", "password": "123", "first_name": "name", "last_name": "name2", "is_staff": False, } response = admin_client.post("/api/users/", data) serializer = UserHeavySerializer(User.objects.get(id=response.data["id"]),) assert response.status_code == 201 assert serializer.data == response.data @pytest.mark.users_crud def test_not_allowed_to_create_user(user_client, public_client): """ ... """ data: Dict[str, Any] = { "username": "NEW", "email": "newemail@gmail.com", "password": "123", "first_name": "name", "last_name": "name2", "is_staff": False, } response = user_client.post("/api/users/", data) assert response.status_code == 403 response = public_client.post("/api/users/", data) assert response.status_code == 401 @pytest.mark.users_crud def test_not_create_superuser(admin_client): """ ... """ data: Dict[str, Any] = { "username": "superuser", "email": "newsuperemail@gmail.com", "password": "123", "first_name": "name", "last_name": "name2", "is_staff": True, "is_superuser": True, } response = admin_client.post("/api/users/", data) serializer = UserHeavySerializer(User.objects.get(id=response.data["id"]),) assert response.status_code == 201 assert response.data == serializer.data assert not response.data["is_superuser"] @pytest.mark.users_crud def test_create_error_params(admin_client): """ ... """ data: Dict[str, Any] = { "names": "NEW_USER", "email": "newemail@gmail.com", } response = admin_client.post("/api/users/", data) assert response.status_code == 400 @pytest.mark.users_crud def test_create_error_duplicate(admin_client): """ ... """ data: Dict[str, Any] = { "username": "NEW", "email": "newemail@gmail.com", "password": "123", "first_name": "name", "last_name": "name2", "is_staff": False, } admin_client.post("/api/users/", data) response = admin_client.post("/api/users/", data) assert response.status_code == 400 @pytest.mark.users_crud def test_get_user(admin_client): """ ... """ response = admin_client.get("/api/user/" + str(1) + "/") serializer = UserHeavySerializer(User.objects.get(id=1)) assert response.status_code == 200 assert serializer.data == response.data @pytest.mark.users_crud def test_get_user_not_found(admin_client): """ ... """ response = admin_client.get("/api/user/" + str(1000) + "/") assert response.status_code == 404 @pytest.mark.users_crud def test_update_user(admin_client): """ ... """ oldvalues = UserHeavySerializer(User.objects.get(id=1)) newdata: Dict[str, Any] = { "username": "NEW", "first_name": "new name", "last_name": "new name2", "email": "update_email@django.com", } response = admin_client.put("/api/user/" + str(1) + "/", newdata) newvalues = UserHeavySerializer(User.objects.get(id=1)) # assert response.data == 'yeah' assert response.status_code == 200 assert newvalues.data != oldvalues.data assert newvalues.data == response.data @pytest.mark.users_crud def test_error_params_update_user(admin_client): """ ... """ oldvalues = UserHeavySerializer(User.objects.get(id=1)) newdata: Dict[str, Any] = { "usernames": "NEW", "first_namesss": "new name", } response = admin_client.put("/api/user/" + str(1) + "/", newdata) newvalues = UserHeavySerializer(User.objects.get(id=1)) assert response.status_code == 400 assert newvalues.data == oldvalues.data @pytest.mark.users_crud def test_delete_user(admin_client): """ ... """ response = admin_client.delete("/api/user/" + str(2) + "/") assert response.status_code == 204 @pytest.mark.users_crud def test_not_allowed_to_delete_user(user_client, public_client): """ ... """ response = user_client.delete("/api/user/" + str(4) + "/") assert response.status_code == 403 response = public_client.delete("/api/user/" + str(4) + "/") assert response.status_code == 401 @pytest.mark.users_crud def test_user_does_not_delete_himself(admin_client): """ ... """ response = admin_client.delete("/api/user/" + str(1) + "/") assert response.status_code == 400 assert response.data == "can't delete himself" @pytest.mark.users_crud def test_not_delete_superuser(admin_client): """ ... """ response = admin_client.delete("/api/user/" + str(3) + "/") assert response.status_code == 400 assert response.data == "super users cannot be deleted" @pytest.mark.users_crud def test_delete_admin_user(admin_client, staff_client): """ ... """ response = staff_client.delete("/api/user/" + str(5) + "/") assert response.status_code == 403 # response = staff_client.delete('/api/user/' + str(4) + '/') # assert response.status_code == 403 # assert response.data == 'user cannot delete administrators' response = admin_client.delete("/api/user/" + str(5) + "/") assert response.status_code == 204 @pytest.mark.users_crud def test_create_user_unique_email(admin_client): """ ... """ repeat_email = "admin@django.com" data: Dict[str, Any] = { "username": "NEW", "email": repeat_email, "password": "123", "first_name": "name", "last_name": "name2", "is_staff": False, } response = admin_client.post("/api/users/", data) # assert response.data == 'yeah' assert User.objects.filter(email__exact=repeat_email).count() == 1 assert response.status_code == 400
6,351
2,211
import pybullet as p import pybullet import time p.connect(p.GUI) p.loadURDF("toys/concave_box.urdf") p.setGravity(0,0,-10) for i in range (10): p.loadURDF("sphere_1cm.urdf",[i*0.02,0,0.5]) p.loadURDF("duck_vhacd.urdf") timeStep = 1./240. p.setTimeStep(timeStep) while (1): p.stepSimulation() time.sleep(timeStep)
319
161
from pylatex import Document, Tabular, Section, NoEscape, Command, MultiRow from Old.BioCatHubDatenmodell import DataModel first_name = "some firstname" last_name = "some lastname" e_mail = "some@adress.com" institution = "some institution" vessel_type = "some vessel" volume = int(42) vol_unit = "mol/l" add_attributes = [{"Sektor": "Kruzifix"}, {"Bereich": "Eisheiligen"}] temp = int(42) temp_unit = "°C" ph_value = int(7) buffer = "some buffer" class PdfLibrary (Document): def __init__(self, data_model): self.biocathub_model = data_model def create_pdf(self): geometry_options = { "margin": "2cm", "includeheadfoot": True } doc = Document(page_numbers=True, geometry_options=geometry_options) doc.preamble.append(Command("title", self.biocathub_model["title"])) doc.append(NoEscape(r"\maketitle")) with doc.create(Section("User:")): with doc.create(Tabular("|c|c|")) as table: table.add_hline() table.add_row(["First Name", first_name]) table.add_hline() table.add_row(["Last Name", last_name]) table.add_hline() table.add_row(["E-Mail", e_mail]) table.add_hline() table.add_row(["Institution", institution]) table.add_hline() with doc.create(Section("Vessel:")): with doc.create(Tabular("|c|c|")) as table2: for i in DataModel["vessel"]: key = list(i.keys()) table2.add_row([key, i[key]]) table2.add_hline() with doc.create(Section("Condition:")): with doc.create(Tabular("|c|c|")) as table3: table3.add_hline() table3.add_row(["Temperature", temp]) table3.add_hline() table3.add_row(["Unit", temp_unit]) table3.add_hline() table3.add_row(["pH", ph_value]) table3.add_hline() table3.add_row(["Buffer", buffer]) table3.add_hline() for i in add_attributes: key = list(i.keys())[0] table3.add_row([key, i[key]]) table3.add_hline() doc.generate_pdf("Gesamt_Test", compiler="pdflatex", clean_tex=False) doc = PdfLibrary(DataModel) doc.create_pdf()
2,472
769
import cv2 import numpy as np import matplotlib.pyplot as plt pic = cv2.imread('image2.png',0) #pic = imageio.imread('img/parrot.jpg') gray = lambda rgb : np.dot(rgb[... , :3] , [0.299 , 0.587, 0.114]) gray = gray(pic) ''' log transform -> s = c*log(1+r) So, we calculate constant c to estimate s -> c = (L-1)/log(1+|I_max|) ''' max_ = np.max(gray) def log_transform(): return (255/np.log(1+max_)) * np.log(1+gray) plt.figure(figsize = (5,5)) plt.imshow(log_transform(), cmap = plt.get_cmap(name = 'gray')) plt.axis('off');
535
243
#!/usr/bin/env python import pika import sys connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) channel = connection.channel() channel.queue_declare(queue='task_queue', durable=True) message = ''.join(sys.argv[1:]) or 'Hello World!' for i in range(30): message = str(i)+' '+i*'.' channel.basic_publish(exchange='', routing_key='task_queue',body=message,properties=pika.BasicProperties(delivery_mode=2,)) print " [x] Sent " + message connection.close()
493
164
from ConfigParser import SafeConfigParser from cStringIO import StringIO import sqlalchemy from sqlalchemy import create_engine from sqlalchemy import MetaData from sqlalchemy.orm import sessionmaker from os.path import sep from hashlib import md5 from datetime import datetime, timedelta import re import logging import functools NON_LTREE = re.compile(r'[^a-zA-Z0-9/]') LOG = logging.getLogger(__name__) CONFIG = None metadata = MetaData() Session = sessionmaker() def loadconfig(filename): defaults=StringIO("""\ [cli_logging] error_log= """) config = SafeConfigParser() config.readfp(defaults) config.read(filename) dsn = config.get('database', 'dsn', None) if not dsn: raise ValueError('No DSN found in the config file! This is required!') set_dsn(dsn) return config class memoized(object): """Decorator that caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned, and not re-evaluated. """ def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): obsoletion = datetime.now() - timedelta(seconds=60*5) if args in self.cache and self.cache[args][1] < obsoletion: # value too old. Remove it from the cache LOG.debug("Removing obsolete value for args %r from cache." % (args,)) del(self.cache[args]) try: output = self.cache[args][0] LOG.debug("Cache hit for args %r." % (args,)) return output except KeyError: LOG.debug("Initialising cache for args %r." % (args,)) value = self.func(*args) if isinstance(value, sqlalchemy.orm.query.Query): result = value.all() self.cache[args] = (result, datetime.now()) return result else: self.cache[args] = (value, datetime.now()) return value except TypeError: # uncachable -- for instance, passing a list as an argument. # Better to not cache than to blow up entirely. LOG.warning("Uncachable function call for args %r" % (args,)) return self.func(*args) def __repr__(self): """Return the function's docstring.""" return self.func.__doc__ def __get__(self, obj, objtype): """Support instance methods.""" return functools.partial(self.__call__, obj) def uri_depth(uri): "determines the depth of a uri" if not uri: return 0 if uri.endswith(sep): uri = uri[0:-1] return len(uri.split(sep)) def file_md5(path): """ Return the MD5 hash of the file """ hash = md5() fptr = open(path, "rb") chunk = fptr.read(1024) while chunk: hash.update(chunk) chunk = fptr.read(1024) fptr.close() return hash.hexdigest() def uri_to_ltree(uri): if not uri or uri == "/": return "ROOT" if uri.endswith(sep): uri = uri[0:-1] if uri.startswith(sep): ltree = "ROOT%s%s" % (sep, uri[1:]) else: ltree = uri # the ltree module uses "." as path separator. Replace dots by # underscores and path separators by dots ltree = NON_LTREE.sub("_", ltree) ltree = ltree.replace(sep, ".") return ltree def set_dsn(dsn): engine = create_engine(dsn) metadata.bind = engine Session.bind = engine from metafilter.model.nodes import Node from metafilter.model.queries import Query from metafilter.model.tags import Tag # # Parse the config file # from os.path import join, exists, expanduser from os import getcwd paths = [ join(getcwd(), 'config.ini'), join(expanduser("~"), '.metafilter', 'config.ini'), join('/', 'etc', 'metafilter', 'config.ini'), ] for path in paths: if not exists(path): continue LOG.debug('Reading config from %s' % path) CONFIG = loadconfig(path) if not CONFIG: LOG.error('Unable to open config file (search order: %s)' % (', '.join(paths)))
4,100
1,286
from pathlib import Path from typing import Union import yaml class Config(object): """Basic Config Class""" def __init__(self, cfg_yaml_path:str, root:str=".", data_path:str="./data"): r""" Configuration of Settings Args: root: root path of project, default="." data_path: data path that contains data directories cfg_yaml_path: argument file path(`str`) It will create directory automatically by `cfg_yaml_path`, ``` checkpoints └── data_type └── eval_type ├── exp_arg1 │ ├── exp1_summary │ ├── model_type + attr_type1 <-weights │ ├── model_type + attr_type2 │ └── model_type + attr_type3 ├── exp_arg2 └── exp_arg3 ``` `cfg_yaml_path` file shuould like below. ```yaml # confiugre.yaml type: data_type: mnist eval_type: roar model_type: resnet18 attr_type: ["vanillagrad", "gradcam"] ... ``` """ self.prj_path = Path(root) self.data_path = Path(data_path) with open(cfg_yaml_path, mode="r") as f: conf = yaml.load(f, Loader=yaml.FullLoader) # vars(self).update(conf) self.__dict__.update(conf) self.check_type_args() def check_type_args(self): r""" Check arguments and create experiment path """ type_args = self.conf["type_args"] check_types = ["data_type", "eval_type", "model_type", "attr_type"] for c_type in check_types: if not (c_type in type_args): raise KeyError(f"Configure file dosen't have {c_type}, check your argument file") self.exp_path = self.prj_path / "checkpoints" / type_args["data_type"] / type_args["eval_type"] self.check_dir_exist(self.exp_path) def check_dir_exist(self, path:Union[str, Path], file:bool=False): r""" Check directory file is exists, if not exists will create one Args: path: `str` or `pathlib.Path` type file: if True, will create a file, not a directory path """ if not isinstance(path, Path): path = Path(path) if file: if not path.exists(): path.touch() print(f"Given path doesn't exists, created {path}") else: if not path.exists(): path.mkdir(parents=True) print(f"Given path doesn't exists, created {path}") @property def conf(self): return self.__dict__ class Checkpoints(object): """Model Checkpoint Manager""" def __init__(self, cfg): r""" Save details about model weights and summaries """ def save_model(self): r""" Save model weights """ def save_summary(self): r""" Save training stats """
3,067
887
""" List Comprehension Aninhada OBJ: Encontrar o maior ou os maiores números de uma lista e imprimir outra lista """ listaGenerica = [1, 2, 3, 4, 1, 2, 3, 4, 10, 10, 10, 5, 3, -4] listaMaior = [x for x in listaGenerica if not False in [True if x >= y else False for y in listaGenerica]] print(listaMaior)
307
132
def main(): # age = input("How old are you?") # print("I am %s year old" % age) file = open("demo1") lines = file.readlines() print("lines",lines) for i in range(len(lines)): print(lines[i]) file.close() c,d = addOne(1,2) print(c,d) def addOne(a,b): return a+1, b+1 if __name__ == '__main__': main()
357
144
#!/usr/bin/python3 def nbracines(a, b, c): if a == 0: print("Le coefficient dominant est nul, ce n'est pas un trinome !") return d = b*b - 4*a*c k = 2 if abs(d) < 1e-10: k = 1 d = 0 elif d < 0: k = 0 print("Le polynome " + str(a) + "X^2 + " + str(b) + "X + " + str(c) + " admet " + str(k) + " racines distinctes (det = " + str(d) + ")") a = float(input("Entrez le coefficient dominant du trinome : ")) b = float(input("Entrez le coefficient d'ordre 1 du trinome : ")) c = float(input("Entrez la constante du trinome : ")) nbracines(a, b, c) nbracines(0, 3, 1) nbracines(1, 0.2, 0.01)
651
286
# Copyright (c) 2011 AlphaSierraPapa for the SharpDevelop Team # # Permission is hereby granted, free of charge, to any person obtaining a copy of this # software and associated documentation files (the "Software"), to deal in the Software # without restriction, including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons # to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE # FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from System import * from System.Collections.Generic import * from System.Linq import * from System.Threading import * from Mono.Cecil import * class AnalyzedTypeExtensionMethodsTreeNode(AnalyzerSearchTreeNode): def __init__(self, analyzedType): if analyzedType == None: raise ArgumentNullException("analyzedType") self._analyzedType = analyzedType def get_Text(self): return "Extension Methods" Text = property(fget=get_Text) def FetchChildren(self, ct): analyzer = ScopedWhereUsedAnalyzer[AnalyzerTreeNode](self._analyzedType, FindReferencesInType) return analyzer.PerformAnalysis(ct).OrderBy() def FindReferencesInType(self, type): if not self.HasExtensionAttribute(type): enumerator = type.Methods.GetEnumerator() while enumerator.MoveNext(): method = enumerator.Current if method.IsStatic and self.HasExtensionAttribute(method): if method.HasParameters and method.Parameters[0].ParameterType.Resolve() == self._analyzedType: node = AnalyzedMethodTreeNode(method) node.Language = self._Language def HasExtensionAttribute(self, p): if p.HasCustomAttributes: enumerator = p.CustomAttributes.GetEnumerator() while enumerator.MoveNext(): ca = enumerator.Current t = ca.AttributeType if t.Name == "ExtensionAttribute" and t.Namespace == "System.Runtime.CompilerServices": return True return False def CanShow(type): # show on all types except static classes return not (type.IsAbstract and type.IsSealed) CanShow = staticmethod(CanShow)
2,641
874
class Solution: def uniqueLetterString(self, S: str) -> int:
74
24
import sys import numpy as np class PiezoTransducer(object): """Piezoelectric transducer data structure """ def __init__(self,Glob,(zMin,zMax),h=1.): """initial instance of PiezoTransducer class Args: Glob (data structure): data structure holding domain configuration (zMin, zMax) (tuple, floats): boundary locations of sensing layer h (float): effective piezoelectric parameter Note: in case of mechanically free setup with open-circuit boundary condition, the effective piezoelectric parameter reads h = d / (eT sD), wherein d is the piezoelectric strain constant, eT is the dielectric coefficient, and sD is the mechanical compliance (see Refs. [1,2]). Refs: [1] PVDF piezoelectric polymer Ueberschlag, P. Sensor Review, 21 (2001) 118-126 [2] PVDF piezoelectric polymers: characterization and application to thermal energy harvesting Gusarov, B. Universite Grenoble Alpes (2015) """ self.dz = Glob.dz self.zIdMin = max(1,Glob._z2i(zMin)) self.zIdMax = Glob._z2i(zMax) self.E = np.zeros(self.zIdMax-self.zIdMin) self.h = h self.t = [] self.U = [] self.Us = [] def measure(self,n,dt,u,tau): """method implementing measurement at time instant Implements finite-difference approximation to state equation for direct piezoelectric effect. Args: n (int): current time step dt (float): increment between consequtive times steps u (numpy array, ndim=1): velocity profile p (numpy array, ndim=1): acoustic stress profile """ C = dt/self.dz E0 = self.E h = self.h zL, zH = self.zIdMin, self.zIdMax # evolve electric field within transducer self.E[:] = E0[:] - h*C*(u[zL-1:zH-1]-u[zL:zH]) # determine potential difference across transducer dU = -np.trapz(self.E,dx=self.dz) self.t.append(n*dt) self.U.append(dU) self.Us.append(tau[self.zIdMax]) def dumpField(self,fName=None): """method writing field configuration to file Args: fName (str, optional): optional output file-path. If none is given, sys.stdout is used instead """ fStream = open(fName,'w') if fName else sys.stdout fStream.write("# (z) (E) \n") for i in range(len(self.E)): fStream.write("%lf %lf\n"%(self.dz*(self.zIdMin+i),self.E[i])) def dumpSignal(self,fName=None): """method writing transducer response to file Args: fName (str, optional): optional output file-path. If none is given, sys.stdout is used instead """ fStream = open(fName,'w') if fName else sys.stdout fStream.write("# (t) (p) \n") for i in range(len(self.U)): fStream.write("%lf %lf\n"%(self.t[i],self.U[i])) # EOF: detector.py
3,187
1,009
number_int = int("32"); number_float= float(32); number_complex = complex(3222342332432435435345324435324523423); print(type(number_int),": ",number_int); print(type(number_float),": ",number_float); print(type(number_complex),": ",number_complex);
249
111
#!/usr/bin/env python3 import unittest import kitty from kitty import uncomfy_checker import mock class TestProgram(unittest.TestCase): def test_comfy(self): kitty.uncomfy_checker = mock.Mock(return_value='comfortable') self.assertIn(uncomfy_checker(), 'comfortable') if __name__ == '__main__': unittest.main()
338
124
from sympy.abc import s from sympy.physics.control.lti import TransferFunction from sympy.physics.control.control_plots import ramp_response_plot tf1 = TransferFunction(s, (s+4)*(s+8), s) ramp_response_plot(tf1, upper_limit=2) # doctest: +SKIP
251
93
""" Main command line interface to the pynorare package. """ import sys import pathlib import contextlib from cldfcatalog import Config, Catalog from clldutils.clilib import register_subcommands, get_parser_and_subparsers, ParserError, PathType from clldutils.loglib import Logging from pyconcepticon import Concepticon from pynorare import NoRaRe import pynorare.commands def main(args=None, catch_all=False, parsed_args=None): try: # pragma: no cover repos = Config.from_file().get_clone('concepticon') except KeyError: # pragma: no cover repos = pathlib.Path('.') parser, subparsers = get_parser_and_subparsers('norare') parser.add_argument( '--repos', help="clone of concepticon/concepticon-data", default=repos, type=PathType(type='dir')) parser.add_argument( '--repos-version', help="version of repository data. Requires a git clone!", default=None) parser.add_argument( '--norarepo', default=pathlib.Path('.'), type=PathType(type='dir')) register_subcommands(subparsers, pynorare.commands) args = parsed_args or parser.parse_args(args=args) if not hasattr(args, "main"): # pragma: no cover parser.print_help() return 1 with contextlib.ExitStack() as stack: stack.enter_context(Logging(args.log, level=args.log_level)) if args.repos_version: # pragma: no cover # If a specific version of the data is to be used, we make # use of a Catalog as context manager: stack.enter_context(Catalog(args.repos, tag=args.repos_version)) args.repos = Concepticon(args.repos) args.api = NoRaRe(args.norarepo, concepticon=args.repos) args.log.info('norare at {0}'.format(args.repos.repos)) try: return args.main(args) or 0 except KeyboardInterrupt: # pragma: no cover return 0 except ParserError as e: # pragma: no cover print(e) return main([args._command, '-h']) except Exception as e: # pragma: no cover if catch_all: # pragma: no cover print(e) return 1 raise if __name__ == '__main__': # pragma: no cover sys.exit(main() or 0)
2,309
720
from django.apps import AppConfig class AppKasirConfig(AppConfig): name = 'app_kasir'
92
33
""" Evaluate recommendations. """ import config from collections import defaultdict class ConfusionMatrixEvaluator(object): """Evaluate result's precision and recall.""" def __init__(self): self.experiment_reset() self.reset() def experiment_reset(self): self.exp_results = defaultdict(list) def reset(self): self.results = [] def round_start(self): """Reset data for new round.""" self.precisions = [] self.recalls = [] def evaluate(self, actual_result, test_result): """ @param actual_results the set of actual products @param test_results the set of calculated products """ correct_count = float(len(actual_result & test_result)) precision = correct_count / len(test_result) if test_result else 0.0 self.precisions.append(precision) recall = correct_count / len(actual_result) if actual_result else 0.0 self.recalls.append(recall) def round_end(self): avg_precision = sum(self.precisions) / len(self.precisions) avg_recall = sum(self.recalls) / len(self.recalls) self.results.append((avg_precision, avg_recall)) def _avg_result(self, results, print_each=False, scale='Round'): all_precision = 0.0 all_recall = 0.0 for i, result in enumerate(results, 1): precision, recall = result if print_each: print '%s %2d: precision: %.4f | recall: %.4f' % (scale, i, precision, recall) all_precision += precision all_recall += recall n = len(results) return all_precision/n, all_recall/n def summary(self, recommender, print_result): avg_precision, avg_recall = self._avg_result(self.results, config.verbose) self.exp_results[str(recommender)].append((avg_precision, avg_recall)) if print_result: print '>Average: precision: %.4f | recall: %.4f' % (avg_precision, avg_recall) def grand_summary(self): statistics = [] for recommender, results in self.exp_results.iteritems(): avg_precision, avg_recall = self._avg_result(results) statistics.append((recommender, avg_precision, avg_recall)) statistics.sort(key=lambda t:t[1], reverse=True) for recommender, precision, recall in statistics: print 'Precision: %.4f | Recall %.4f -- %s' % (precision, recall, recommender)
2,472
763
import os import cv2 import time import argparse import torch import warnings import numpy as np from detector import build_detector from deep_sort import build_tracker from utils.draw import draw_boxes from utils.parser import get_config from utils.log import get_logger from utils.io import write_results from numpy import loadtxt #gt.txt yi almak için class VideoTracker(object): def __init__(self, cfg, args, video_path): self.cfg = cfg self.args = args self.video_path = video_path self.logger = get_logger("root") use_cuda = args.use_cuda and torch.cuda.is_available() if not use_cuda: warnings.warn("Running in cpu mode which maybe very slow!", UserWarning) if args.display: cv2.namedWindow("test", cv2.WINDOW_NORMAL) cv2.resizeWindow("test", args.display_width, args.display_height) if args.cam != -1: print("Using webcam " + str(args.cam)) self.vdo = cv2.VideoCapture(args.cam) else: self.vdo = cv2.VideoCapture() self.detector = build_detector(cfg, use_cuda=use_cuda) self.deepsort = build_tracker(cfg, use_cuda=use_cuda) self.class_names = self.detector.class_names def __enter__(self): if self.args.cam != -1: ret, frame = self.vdo.read() assert ret, "Error: Camera error" self.im_width = frame.shape[0] self.im_height = frame.shape[1] else: assert os.path.isfile(self.video_path), "Path error" self.vdo.open(self.video_path) self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH)) self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT)) assert self.vdo.isOpened() if self.args.save_path: os.makedirs(self.args.save_path, exist_ok=True) # path of saved video and results self.save_video_path = os.path.join(self.args.save_path, "results.avi") self.save_results_path = os.path.join(self.args.save_path, "results.txt") # create video writer fourcc = cv2.VideoWriter_fourcc(*'MJPG') self.writer = cv2.VideoWriter(self.save_video_path, fourcc, 20, (self.im_width, self.im_height)) # logging self.logger.info("Save results to {}".format(self.args.save_path)) #eğer gt'den veriler okunacaksa if self.args.gt: gtFolder = self.video_path + "/../gt/gt.txt" gt = loadtxt(gtFolder, delimiter=",") def sortwithFrame(elem): return elem[0] # sort list with key gt_sorted = sorted(gt,key=sortwithFrame) #----------------------------- # object_type=1 olmayanları sil, def filterType(param): if (param[7]==1): return True else: return False gt_filtered = list(filter(filterType, gt_sorted)) #------------------------------- #not_ignored=0 olanları sil def filterIgnore(param): if (param[6]==1): return True else: return False gt_filtered2 = list(filter(filterIgnore, gt_filtered)) self.gt = np.array(gt_filtered2) return self def __exit__(self, exc_type, exc_value, exc_traceback): if exc_type: print(exc_type, exc_value, exc_traceback) #deep_sort içindeki fonksiyon doğru çalışmadığı için düzenleyip buraya fonksiyon olarak yazdım. #input: frame görüntüsü, xywh formatında bbox matrisi (shape=#ofDetections,4) #output: xywh formatında matrisin xyxy formatında matris karşılığı def my_xywh_to_xyxy(self,ori_img, bbox_xywh): x,y,w,h = bbox_xywh[:,0],bbox_xywh[:,1],bbox_xywh[:,2],bbox_xywh[:,3] x = x.reshape((x.size,1)) y = y.reshape((y.size,1)) w = w.reshape((w.size,1)) h = h.reshape((h.size,1)) #ekranın boyutu alınıyor height, width = ori_img.shape[:2] x1 = np.maximum(np.int_(x-w/2),0) x2 = np.minimum(np.int_(x+w/2),width-1) y1 = np.maximum(np.int_(y-h/2),0) y2 = np.minimum(np.int_(y+h/2),height-1) arr = np.concatenate((x1,y1,x2,y2),axis=1) return arr def my_tlwh_to_xywh(self,ori_img, bbox_tlwh): x,y,w,h = bbox_tlwh[:,0],bbox_tlwh[:,1],bbox_tlwh[:,2],bbox_tlwh[:,3] x = x.reshape((x.size,1)) y = y.reshape((y.size,1)) w = w.reshape((w.size,1)) h = h.reshape((h.size,1)) #ekranın boyutu alınıyor height, width = ori_img.shape[:2] x1 = np.minimum(np.int_(x+w/2),width-1) y1 = np.minimum(np.int_(y+h/2),height-1) arr = np.concatenate((x1,y1,w,h),axis=1) return arr #topleft(xy)wh >> xyxy dönüştürücü #gt içinde veriler tlxy şeklinde verilmiş. yolo verilerini xywh olarak üretiyor. (xy orta nokta) def my_tlwh_to_xyxy(self,ori_img, bbox_tlwh): x,y,w,h = bbox_tlwh[:,0],bbox_tlwh[:,1],bbox_tlwh[:,2],bbox_tlwh[:,3] x = x.reshape((x.size,1)) y = y.reshape((y.size,1)) w = w.reshape((w.size,1)) h = h.reshape((h.size,1)) #ekranın boyutu alınıyor height, width = ori_img.shape[:2] x1 = np.maximum(np.int_(x),0) x2 = np.minimum(np.int_(x+w),width-1) y1 = np.maximum(np.int_(y),0) y2 = np.minimum(np.int_(y+h),height-1) arr = np.concatenate((x1,y1,x2,y2),axis=1) return arr def run(self): results = [] idx_frame = 0 while self.vdo.grab(): idx_frame += 1 if idx_frame % self.args.frame_interval: continue start = time.time() _, ori_im = self.vdo.retrieve() im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB) #print(im.shape) #video_boyu,video_eni,3 # do detection bbox_xywh, cls_conf, cls_ids = self.detector(im) #bbox_xywh, confidence, labels #gt'leri gt'den okuyarak yolo yerine veren kısım if (self.args.gt): #py çalıştırılırken --gt yazıldıysa if(idx_frame == 1 or idx_frame == 2 or idx_frame == 3): #üç frame boyunca gt verileri yolo yerine veriliyor gt_curr_frame = self.gt[self.gt[:,0]==idx_frame].astype('float64') #filtreli gt verilerinden içinde bulunuğunuz kısım çıkarılıyor gt_curr_frame = gt_curr_frame[:,2:6] #tlwh tipinde veriler alınıyor #print(gt_curr_frame) #print(self.my_tlwh_to_xywh(im, gt_curr_frame)) bbox_xywh = self.my_tlwh_to_xywh(im, gt_curr_frame) #yolo yerine gt bboxları cls_conf = np.ones((bbox_xywh.shape[0],), dtype=int) #yolo conf skorları yerine (tüm skorlar 1) cls_ids = np.zeros(bbox_xywh.shape[0]) #bütün bboxlar yolo için 0 id'li yani person. ori_im = draw_boxes(ori_im, self.my_tlwh_to_xyxy(im,gt_curr_frame)) #gt'deki bboxları çizdir print("yolo yerine gt kullanıldı, frame: ",idx_frame) #test amaçlı bilerek yanlış vererek başlangıçtaki verilerin tracker üzerindeki etkisini incelemek için """ bbox_xywh = np.array([[100,200,400.1,600.1],[500,600.1,600.1,800.1]]) #test amaçlı bilerek yanlış vermek için cls_conf = np.ones((bbox_xywh.shape[0],), dtype=int) #test amaçlı bilerek yanlış vermek için cls_ids = np.zeros(bbox_xywh.shape[0]) #test amaçlı bilerek yanlış vermek için ori_im = draw_boxes(ori_im, bbox_xywh) """ """ labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] """ # select person class 0-people 22-zebra 20-elephant #mask = (cls_ids == 20) + (cls_ids == 22) mask = cls_ids == 0 bbox_xywh = bbox_xywh[mask] # bbox dilation just in case bbox too small, delete this line if using a better pedestrian detector bbox_xywh[:, 3:] *= 1.2 cls_conf = cls_conf[mask] # do tracking outputs = self.deepsort.update(bbox_xywh, cls_conf, im) #im.shape = video_boyu,video_eni,3 #print(bbox_xywh) # number_of_detection, 4 #print(cls_conf) # number_of_detection, # draw boxes for visualization if len(outputs) > 0: bbox_tlwh = [] bbox_xyxy = outputs[:, :4] identities = outputs[:, -1] #detection'ları ekrana çizen kendi yazdığım kod #ori_im = draw_boxes(ori_im, self.my_xywh_to_xyxy(im,bbox_xywh)) #doğru eşleşmeleri ekrana çizen orjinal kod ori_im = draw_boxes(ori_im, bbox_xyxy, identities) for bb_xyxy in bbox_xyxy: bbox_tlwh.append(self.deepsort._xyxy_to_tlwh(bb_xyxy)) results.append((idx_frame - 1, bbox_tlwh, identities)) end = time.time() if self.args.display: cv2.imshow("test", ori_im) cv2.waitKey(1) if self.args.save_path: self.writer.write(ori_im) # save results write_results(self.save_results_path, results, 'mot') # logging self.logger.info("time: {:.03f}s, fps: {:.03f}, detection numbers: {}, tracking numbers: {}" \ .format(end - start, 1 / (end - start), bbox_xywh.shape[0], len(outputs))) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("VIDEO_PATH", type=str) parser.add_argument("--config_detection", type=str, default="./configs/yolov3.yaml") parser.add_argument("--config_deepsort", type=str, default="./configs/deep_sort.yaml") # parser.add_argument("--ignore_display", dest="display", action="store_false", default=True) parser.add_argument("--display", action="store_true") parser.add_argument("--gt", action="store_true") #gt'den alınan verileri kullanmak istiyorsak parser.add_argument("--frame_interval", type=int, default=1) parser.add_argument("--display_width", type=int, default=800) parser.add_argument("--display_height", type=int, default=600) parser.add_argument("--save_path", type=str, default="./output/") parser.add_argument("--cpu", dest="use_cuda", action="store_false", default=True) parser.add_argument("--camera", action="store", dest="cam", type=int, default="-1") return parser.parse_args() if __name__ == "__main__": args = parse_args() cfg = get_config() cfg.merge_from_file(args.config_detection) cfg.merge_from_file(args.config_deepsort) with VideoTracker(cfg, args, video_path=args.VIDEO_PATH) as vdo_trk: vdo_trk.run()
12,263
4,463
import numpy as onp import casadi as cas def array(object, dtype=None): try: a = onp.array(object, dtype=dtype) if a.dtype == "O": raise Exception return a except (AttributeError, Exception): # If this occurs, it needs to be a CasADi type. # First, determine the dimension def make_row(row): try: return cas.horzcat(*row) except (TypeError, Exception): # If not iterable or if it's a CasADi MX type return row return cas.vertcat( *[ make_row(row) for row in object ] ) def length(array) -> int: """ Returns the length of an 1D-array-like object. Args: array: Returns: """ try: return len(array) except TypeError: # array has no function len() -> either float, int, or CasADi type try: if len(array.shape) >= 1: return array.shape[0] else: raise AttributeError except AttributeError: # array has no attribute shape -> either float or int return 1
1,172
325
# -*- coding: utf-8 -*- __author__ = 'eeneku' from main_menu import MainMenu from world_map import WorldMap from local_map import LocalMap __all__ = [MainMenu, WorldMap, LocalMap]
181
64
# Generated by Django 2.2.5 on 2019-10-22 07:03 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Instagram', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=20)), ('email', models.EmailField(max_length=254)), ], ), migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('photo', models.ImageField(blank=True, upload_to='')), ('bio', models.CharField(max_length=120)), ], options={ 'ordering': ['photo'], }, ), migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image_image', models.ImageField(default='images/default.jpeg', upload_to='images/')), ('name', models.CharField(max_length=100)), ('caption', models.CharField(max_length=150)), ('comments', models.TextField()), ('pub_date', models.DateTimeField(default=django.utils.timezone.now)), ('like', models.ManyToManyField(blank=True, related_name='likes', to=settings.AUTH_USER_MODEL)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instagram.Profile')), ], ), migrations.CreateModel( name='Comments', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('comments', models.TextField()), ('image', models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, related_name='image_comments', to='instagram.Image')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), ]
2,502
706
""" Payin API """ from dataclasses import dataclass from typing import Union from api_payin_model import ( PayinAdjustPaymentRequest, PayinCancelRequest, PayinCancelResponse, PayinCaptureRequest, PayinCaptureResponse, PayinMandateRequest, PayinMandateResponse, PayinOrderDetailsRequest, PayinOrderDetailsResponse, PayinPaymentDetailsRequest, PayinPaymentDetailsResponse, PayinPaymentIframeRequest, PayinPaymentIframeResponse, PayinPaymentMethodsRequest, PayinPaymentMethodsResponse, PayinPaymentRequest, PayinPaymentResponse, PayinRefundRequest, PayinRefundResponse, PayinTicketRequest, PayinTicketResponse, ) from base import BaseRequest from model import Response @dataclass class ApiPayin(BaseRequest): """Payin API requests""" def payment( self, payload: PayinPaymentRequest ) -> Union[PayinPaymentResponse, Response]: """Submit a payment""" return self.request("POST", "/payin/payment", payload) def payment_details( self, payload: PayinPaymentDetailsRequest ) -> Union[PayinPaymentDetailsResponse, Response]: """Submit additionnal payment details""" return self.request("POST", "/payin/paymentDetails", payload) def payment_methods( self, payload: PayinPaymentMethodsRequest ) -> Union[PayinPaymentMethodsResponse, Response]: """Submit an order/get payment methods""" return self.request("POST", "/payin/paymentMethods", payload) def capture( self, payload: PayinCaptureRequest ) -> Union[PayinCaptureResponse, Response]: """Capture a transaction/order""" return self.request("POST", "/payin/capture", payload) def cancel( self, payload: PayinCancelRequest ) -> Union[PayinCancelResponse, Response]: """Cancel a transaction/order""" return self.request("POST", "/payin/cancel", payload) def order_details( self, payload: PayinOrderDetailsRequest ) -> Union[PayinOrderDetailsResponse, Response]: """Get all the order details""" return self.request("GET", "/payin/orderDetails", payload) def adjust_payment(self, payload: PayinAdjustPaymentRequest) -> Response: """Adjust the amount of the payment/change the breakdown of the payment""" return self.request("POST", "/payin/adjustPayment", payload) def payment_iframe( self, payload: PayinPaymentIframeRequest ) -> Union[PayinPaymentIframeResponse, Response]: """Submit an order/get an authent code""" return self.request("POST", "/payin/paymentIframe", payload) def refund( self, payload: PayinRefundRequest ) -> Union[PayinRefundResponse, Response]: """Refund a transaction/order""" return self.request("POST", "/payin/refund", payload) def mandate( self, payload: PayinMandateRequest ) -> Union[PayinMandateResponse, Response]: """Get signed mandate file""" return self.request("GET", "/payin/mandate", payload) def ticket( self, payload: PayinTicketRequest ) -> Union[PayinTicketResponse, Response]: """Get card payment ticket""" return self.request("GET", "/payin/ticket", payload)
3,283
925
from rest_framework.reverse import reverse from rest_framework.test import APITestCase from fdadb.models import MedicationName, MedicationNDC, MedicationStrength class APITests(APITestCase): def setUp(self): for name in ("DrugName", "OtherDrugName", "DruuuugName", "NamedDrug"): medication_name = MedicationName.objects.create( name=name, active_substances=[name + " Substance 1", name + " Substance 2"] ) for strength in (1, 2, 3): medication_strength = MedicationStrength.objects.create( medication_name=medication_name, strength={ name + " Substance 1": {"strength": strength, "unit": "mg/l"}, name + " Substance 2": {"strength": strength + 5, "unit": "mg/l"}, }, ) for manufacturer in ("M1", "M2"): MedicationNDC.objects.create( medication_strength=medication_strength, ndc=name[:5] + str(strength) + manufacturer, manufacturer=manufacturer, ) def test_names_api(self): url = reverse("fdadb-medications-names") response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(response.data["count"], 4) self.assertEqual(response.data["results"][0]["name"], "DrugName") self.assertEqual( response.data["results"][0]["active_substances"], ["DrugName Substance 1", "DrugName Substance 2"] ) response = self.client.get(url + "?q=Druuu") self.assertEqual(response.status_code, 200) self.assertEqual(response.data["count"], 1) self.assertEqual(response.data["results"][0]["name"], "DruuuugName") self.assertEqual( response.data["results"][0]["active_substances"], ["DruuuugName Substance 1", "DruuuugName Substance 2"] ) def test_strengths_api(self): url = reverse("fdadb-medications-strengths", kwargs={"medication_name": "NamedDrug"}) response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(response.data["count"], 3) self.assertEqual(response.data["results"][0]["name"], "NamedDrug") self.assertEqual( response.data["results"][0]["active_substances"], ["NamedDrug Substance 1", "NamedDrug Substance 2"] ) self.assertEqual( response.data["results"][0]["strength"], { "NamedDrug Substance 1": {"strength": 1, "unit": "mg/l"}, "NamedDrug Substance 2": {"strength": 6, "unit": "mg/l"}, }, ) response = self.client.get(url + "?q=3") self.assertEqual(response.status_code, 200) self.assertEqual(response.data["count"], 1) self.assertEqual(response.data["results"][0]["name"], "NamedDrug") self.assertEqual( response.data["results"][0]["active_substances"], ["NamedDrug Substance 1", "NamedDrug Substance 2"] ) self.assertEqual( response.data["results"][0]["strength"], { "NamedDrug Substance 1": {"strength": 3, "unit": "mg/l"}, "NamedDrug Substance 2": {"strength": 8, "unit": "mg/l"}, }, ) def test_ndcs_api(self): strength = MedicationStrength.objects.filter(medication_name__name="OtherDrugName").first() url = reverse("fdadb-medications-ndcs", kwargs={"medication_name": "OtherDrugName", "strength_id": strength.pk}) response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(response.data["count"], 2) self.assertEqual(response.data["results"][0]["name"], "OtherDrugName") self.assertEqual( response.data["results"][0]["active_substances"], ["OtherDrugName Substance 1", "OtherDrugName Substance 2"] ) self.assertEqual( response.data["results"][0]["strength"], { "OtherDrugName Substance 1": {"strength": 1, "unit": "mg/l"}, "OtherDrugName Substance 2": {"strength": 6, "unit": "mg/l"}, }, ) self.assertEqual(response.data["results"][0]["manufacturer"], "M1") self.assertEqual(response.data["results"][0]["ndc"], "Other1M1") strength = MedicationStrength.objects.filter(medication_name__name="OtherDrugName").first() url = reverse("fdadb-medications-ndcs", kwargs={"medication_name": "OtherDrugName", "strength_id": strength.pk}) response = self.client.get(url + "?q=m2") self.assertEqual(response.status_code, 200) self.assertEqual(response.data["count"], 1) self.assertEqual(response.data["results"][0]["name"], "OtherDrugName") self.assertEqual( response.data["results"][0]["active_substances"], ["OtherDrugName Substance 1", "OtherDrugName Substance 2"] ) self.assertEqual( response.data["results"][0]["strength"], { "OtherDrugName Substance 1": {"strength": 1, "unit": "mg/l"}, "OtherDrugName Substance 2": {"strength": 6, "unit": "mg/l"}, }, ) self.assertEqual(response.data["results"][0]["manufacturer"], "M2") self.assertEqual(response.data["results"][0]["ndc"], "Other1M2")
5,516
1,765
#!/usr/bin/env python """ Copyright 2020 Zhao HG Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ with open('UnicodeData.txt', 'r') as reader: last, indices, canonicals, classes = '', [], [], {} for line in reader: parts = line.strip().split(';') if parts[3] != last: last = parts[3] indices.append(parts[0]) canonicals.append(parts[3]) classes[parts[3]] = parts[0] with open('include/unicode_data.h', 'a') as writer: writer.write('/** The total number of indices used to store the canonical combing class. */\n') writer.write('const int32_t CANONICAL_COMBINING_NUM = {};\n'.format(len(indices))) writer.write('/** The indices of the first character that have a different type. */\n') writer.write('extern const int32_t CANONICAL_COMBINING_INDEX[];\n') writer.write('/** The canonical combining class data. */\n') writer.write('extern const int32_t CANONICAL_COMBINING_CLASS[];\n\n') with open('src/canonical_combining_class.cpp', 'w') as writer: with open('copyright.txt', 'r') as reader: writer.write(reader.read()) writer.write('#include "unicode_data.h"\n\n') writer.write('namespace unicode {\n\n') writer.write('\nconst int32_t CANONICAL_COMBINING_INDEX[] = {') for i, index in enumerate(indices): if i == 0: writer.write('\n ') elif i % 8 == 0: writer.write(',\n ') else: writer.write(', ') writer.write('0x' + index) writer.write('\n};\n') writer.write('\nconst int32_t CANONICAL_COMBINING_CLASS[] = {') for i, canonical in enumerate(canonicals): if i == 0: writer.write('\n ') elif i % 8 == 0: writer.write(',\n ') else: writer.write(', ') writer.write(canonical) writer.write('\n};\n\n') writer.write('} // namespace unicode\n') with open('tests/test_canonical_combining_class_gen.cpp', 'w') as writer: with open('copyright.txt', 'r') as reader: writer.write(reader.read()) writer.write('#include "test.h"\n') writer.write('#include "unicode_char.h"\n\n') writer.write('namespace test {\n\n') writer.write('class CanonicalCombiningClassGenTest : public UnitTest {};\n\n') writer.write('__TEST_U(CanonicalCombiningClassGenTest, test_classes) {\n') for canonical, code in classes.items(): writer.write(' __ASSERT_EQ({}, unicode::getCanonicalCombiningClass({}));\n'.format( canonical, '0x' + code )) writer.write('}\n\n') writer.write('} // namespace test\n')
3,626
1,209
from flask import Flask, request from twilio.twiml.voice_response import VoiceResponse, Gather import datetime import os import json import http.client app = Flask(__name__) allowUntil = datetime.datetime.now() # Fetch env vars whitelisted_numbers = os.environ['WHITELISTED_NUMBERS'].split(",") # Numbers allowed to dial into the system forward_number = os.environ['FORWARD_NUMBER'] # Number that will be forwarded to if not whitelisted forward_number_from = os.environ['FORWARD_NUMBER_FROM'] # Number that will be forwarded to if not whitelisted buzzcode = os.environ['BUZZCODE'] # Digits to dial to let them in minutes = int(os.environ['MINUTES']) # Number of minutes to unlock the system slack_path = os.environ['SLACK_PATH'] # Slack path for slack message say_message = os.environ['SAY_MESSAGE'] # The message to be said to the dialer # Buzzer ########################################################################## @app.route("/buzzer/webhook", methods=['GET', 'POST']) def voice(): """Respond to incoming phone calls""" resp = VoiceResponse() incoming_number = request.values['From'] # If an unknown number, filter out robo callers and forward to cell if incoming_number not in whitelisted_numbers: gather = Gather(num_digits=1, action='/buzzer/forward') gather.say('Press 1 to continue') resp.append(gather) return str(resp) # Tell the user a nice message that they are not permitted to enter if not allowed_to_buzz(): resp.say("The system cannot let you in. Did you dial the right buzzcode?") send_message("A visitor was just rejected as the buzzer system was not unlocked") return str(resp) # Otherwise, unlock the door resp.say(say_message, language='zh-CN') resp.play(digits=buzzcode) send_message("A visitor was just let in") return str(resp) @app.route("/buzzer/forward", methods=['GET', 'POST']) def forward(): resp = VoiceResponse() incoming_number = request.values['From'] send_message("About to forward a call from " + str(incoming_number)) resp.say("Please note your call may be recorded for the benefit of both parties") resp.dial(forward_number, caller_id=forward_number_from) return str(resp) @app.route("/buzzer/state", methods=['POST']) def change_state(): """Tells the buzzer to unlock the door for the next 30 minutes""" global allowUntil c = request.json if "active" not in c: return "missing \"active\" field", 400 if c["active"] == "true": allowUntil = datetime.datetime.now() + datetime.timedelta(minutes=minutes) if c["active"] == "false": allowUntil = datetime.datetime.now() return "OK", 200 @app.route("/buzzer/state", methods=['GET']) def status(): """Fetches whether the system will buzz people in""" return json.dumps({"is_active": str(allowed_to_buzz()).lower()}), 200 def allowed_to_buzz(): """Fetches whether the system is allowed to buzz somebody in""" global allowUntil return allowUntil > datetime.datetime.now() def send_message(message): try: conn = http.client.HTTPSConnection("hooks.slack.com") payload = "{\"text\": \"" + message + "\"}" headers = { 'content-type': "application/json", } conn.request("POST", slack_path, payload, headers) conn.getresponse() except: print("error sending message") if __name__ == "__main__": app.run(host='0.0.0.0', port=8080)
3,676
1,102
import tkinter as tk from tkinter import font as tkfont, ttk import logging as log import sys from cx_Oracle import DatabaseError from GUI_Pages.BasicPage import TitlePage from Utilities.Cipher import Cipher, get_hash FORMAT = '[%(asctime)s] [%(levelname)s] : %(message)s' log.basicConfig(stream=sys.stdout, level=log.DEBUG, format=FORMAT) class LoginPage(TitlePage): def __init__(self, parent, controller): super().__init__(parent, controller) self.init() def init(self): width_label = 10 width_entry = 25 text_font = tkfont.Font(family='Helvetica', size=13) button_font = tkfont.Font(family='Helvetica', size=10) login_frame = tk.Frame(master=self, bg='gold') login_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True) login_frame.grid_rowconfigure(0, weight=1) login_frame.grid_columnconfigure(0, weight=1) login_label_frame = tk.LabelFrame(login_frame, bg='gray80') login_label_frame.grid(row=0, column=0) tk.Label(login_label_frame, text='username', font=text_font, bg=login_label_frame['bg'], fg='red', width=width_label).grid(row=0, column=0, padx=5, pady=10) self.username_entry = tk.Entry(login_label_frame, width=width_entry) self.username_entry.grid(row=0, column=1,) tk.Label(login_label_frame, text='password', font=text_font, bg=login_label_frame['bg'], fg='red', width=width_label).grid(row=1, column=0, padx=5, pady=10) self.password_entry = tk.Entry(login_label_frame, show="*", width=width_entry) self.password_entry.grid(row=1, column=1, padx=5, pady=10) self.login_button = tk.Button(login_label_frame, text='Login', font=button_font, command=self.on_login, bg='green', fg='white') self.login_button.grid(row=2, column=1, padx=5, pady=5) self.sign_up_button = tk.Button(login_label_frame, text='Sign Up', font=button_font, command=self.on_sign_up, bg='blue', fg='white') self.sign_up_button.grid(row=2, column=0, padx=5, pady=5) def set_states(self, user_level): if user_level == 'admin': return else: self.controller.set_state(self.controller.frames['HomePage'].advanced_options_button) self.controller.set_state(self.controller.frames['ShopPage'].insert_frame) self.controller.set_state(self.controller.frames['ShopPage'].update_frame) self.controller.set_state(self.controller.frames['ShopPage'].delete_frame) self.controller.set_state(self.controller.frames['ProductPage'].insert_frame) self.controller.set_state(self.controller.frames['ProductPage'].update_frame) self.controller.set_state(self.controller.frames['ProductPage'].delete_frame) self.controller.set_state(self.controller.frames['ShippingPage'].insert_frame) self.controller.set_state(self.controller.frames['ShippingPage'].update_frame) self.controller.set_state(self.controller.frames['ShippingPage'].delete_frame) if user_level == 'admin_shop': self.controller.set_state(self.controller.frames['ShopPage'].insert_frame, 'normal') self.controller.set_state(self.controller.frames['ShopPage'].update_frame, 'normal') self.controller.set_state(self.controller.frames['ShopPage'].delete_frame, 'normal') self.controller.set_state(self.controller.frames['ProductPage'].insert_frame, 'normal') self.controller.set_state(self.controller.frames['ProductPage'].update_frame, 'normal') self.controller.set_state(self.controller.frames['ProductPage'].delete_frame, 'normal') if user_level == 'admin_ship': self.controller.set_state(self.controller.frames['ShippingPage'].insert_frame, 'normal') self.controller.set_state(self.controller.frames['ShippingPage'].update_frame, 'normal') self.controller.set_state(self.controller.frames['ShippingPage'].delete_frame, 'normal') def on_login(self): username = self.username_entry.get() password = self.password_entry.get() #-------Use encryption when sending data across internet pass_encrypted = Cipher.encrypt(password) log.info("Password encrypted: {}".format(pass_encrypted.decode())) password = Cipher.decrypt(pass_encrypted) log.info("Password decrypted: {}".format(password)) #end encryption and decryption part # --------Get hash of password password = get_hash(password) log.info("Password Hash: {}".format(password)) try: user_account_var = self.controller.get_complex_type_var('AUTH_PKG.USER_ACCOUNT') self.controller.run_procedure('auth_pkg.login', [username, password, user_account_var]) except DatabaseError as e: log.info("Login Failed Incorect username as password") if e.args[0].code == 20100: from tkinter import messagebox messagebox.showinfo("Login Failed", "Wrong username or password") return user_info = self.controller.get_dict_from_oracle_object(user_account_var) self.controller.user_info = user_info self.controller.re_create_frames() self.set_states(user_info['user_level']) self.controller.frames["HomePage"].home_page_welcome_label_var.set("Welcome {}".format(user_info['first_name'])) self.controller.frames["HomePage"].populate_the_table_with_all_values() self.controller.show_frame("HomePage") def on_sign_up(self): self.controller.show_frame("SignUpPage")
5,738
1,733
from bert_hierarchy_extractor.datasets.train_dataset import TrainHierarchyExtractionDataset from bert_hierarchy_extractor.datasets.utils import cudafy from bert_hierarchy_extractor.logging.utils import log_metrics import numpy as np from torch.utils.data import DataLoader from transformers import AdamW, get_linear_schedule_with_warmup import torch import time from tqdm import tqdm from comet_ml import Experiment def placeholder_num_correct(x, y, print_result=False): result = torch.argmax(x, dim=1) result = result.view(-1) y2 = y.view(-1) mask = (y2 != -1) y2 = y2[mask] result = result[mask] if print_result: print('*************') y1mask = (y[0] != -1) print(y[0][y1mask]) print('-------------') rez = torch.argmax(x[0], dim=0) print(rez[y1mask]) print('**************') total_correct = (result == y2).sum().detach().cpu().numpy() total = result.shape[0] return total_correct, total class BertExtractorTrainer: def __init__( self, experiment: Experiment, model, data_path: str, base_model: str, bsz: int, num_workers: int, lr: float, weight_decay: float, warmup_updates: int, max_updates: int, accumulation_steps: int, validate_interval: int, save_metric: str, save_min: bool, device: str, seed=1, num_correct=placeholder_num_correct, ): """ :param model: Initialized model :param dataset_path: Path to dataset :param base_model: Path to base model :param bsz: Batch size :param num_workers: Num workers available :param lr: Learning rate :param weight_decay: weight decay :param warmup_updates: number of samples to warmup learning rate :param max_updates: max number of samples :param accumulation_steps: Number of batches to accumulate loss over before running an update :param validate_interval: num updates before validating :param save_metric: metric to use to save best model :param save_min: Whether we're looking to minimize or maximize the save metric :param seed: Random seed for iteration """ torch.manual_seed(seed) self.experiment = experiment self.device = device print(device) self.model = model.to(device) self.max_accumulation = accumulation_steps print("Loading training dataset") self.train_dataset = TrainHierarchyExtractionDataset(data_path) num_classes = len(self.train_dataset.label_map)-1 class_counts = np.zeros(num_classes) for i in range(len(self.train_dataset)): _, l = self.train_dataset[i] for cl in l: class_counts[cl] += 1 effective_num = 1.0 - np.power(0.9999, class_counts) weights = (1.0 - 0.9999) / np.array(effective_num) weights = weights / np.sum(weights * num_classes) self.weights = torch.FloatTensor(weights).to(device) print(self.weights) #print("Loading validation dataset") #self.val_dataset = TrainHierarchyExtractionDataset(data_path, base_model, "val") self.train_dataloader = DataLoader( self.train_dataset, batch_size=bsz, num_workers=num_workers, pin_memory=True, shuffle=True, collate_fn=TrainHierarchyExtractionDataset.collate, ) self.val_dataloader = DataLoader( self.train_dataset, batch_size=bsz, num_workers=num_workers, pin_memory=True, shuffle=True, collate_fn=TrainHierarchyExtractionDataset.collate, ) self.bsz = bsz self.optimizer = AdamW(model.parameters(), lr=lr, weight_decay=0.01) self.scheduler = get_linear_schedule_with_warmup( self.optimizer, num_warmup_steps=warmup_updates, num_training_steps=max_updates, ) self.max_updates = max_updates self.validate_interval = validate_interval self.num_correct = num_correct self.save_metric = save_metric self.current_best_metric = float('inf') def validate(self, validate_cap=None, best_save_metric=None): self.model.eval() val_cap = validate_cap if validate_cap is not None else len(self.val_dataloader) with tqdm(total=val_cap) as pbar: total_loss = 0 total_correct = 0 total_instances = 0 for ind, batch in enumerate(self.val_dataloader): if ind > val_cap: break xs, labels = cudafy(batch) loss, logits = self.model(xs, labels=labels, weights=self.weights) nc, t = self.num_correct(logits, labels, print_result=True if ind < 5 else False) total_correct += nc total_instances += t total_loss += loss.detach().cpu().numpy() pbar.update(1) loss_per_sample = total_loss / val_cap / self.bsz accuracy = total_correct / total_instances metrics = {} metrics["val_loss"] = loss_per_sample metrics["val_accuracy"] = accuracy metrics["val_per_sample_loss"] = total_loss if best_save_metric is not None: if metrics[best_save_metric] <= self.current_best_metric: self.model.save_pretrained('best') self.current_best_metric = metrics[best_save_metric] return metrics def train(self): """ """ start_time = time.time() # Verify forward pass using validation loop metrics = self.validate(validate_cap=5) self.model.train() with tqdm(total=self.max_updates, desc='Number of updates') as pbar: total_updates = 1 val_updates = 1 while total_updates < self.max_updates: accumulation_steps = 0 accumulation_loss = None for batch in self.train_dataloader: xs, labels = cudafy(batch) loss, _ = self.model(xs, labels=labels, weights=self.weights) if accumulation_loss is None: accumulation_steps += 1 accumulation_loss = loss elif accumulation_steps > self.max_accumulation: self.optimizer.zero_grad() accumulation_loss.backward() torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0) self.optimizer.step() self.scheduler.step() pbar.update(1) total_updates += 1 accumulation_steps = 0 accumulation_loss = loss l = loss.detach().cpu().numpy() metrics = {} metrics["train_update_loss"] = l metrics["train_per_sample_loss"] = l / self.bsz # TODO: Accuracy, f1, etc metrics log_metrics(self.experiment, metrics, total_updates) if total_updates % self.validate_interval == 0: metrics = self.validate(validate_cap=100, best_save_metric=self.save_metric) val_updates += 1 log_metrics(self.experiment, metrics, val_updates) else: accumulation_steps += 1 accumulation_loss += loss metrics = self.validate(validate_cap=1000) print(f"Final validation metrics: {metrics}") torch.save(self.model.state_dict(), 'last.pt') val_updates += 1 log_metrics(self.experiment, metrics, val_updates) end_time = time.time() total_time = end_time - start_time print(f"Total train time: {total_time}")
8,151
2,358
# Level 2 of pythonchallenge.com! # Challenge: within the source code of this level, there is a # set of jumbled characters. Within these characters, find the # letters and join them together to find the correct url. from solution_framework import Solution import requests # to view source code import re # to use regular expressions url_result = "" res = requests.get("http://www.pythonchallenge.com/pc/def/ocr.html") # import the HTML code from the site that hosts this challenge. text_array = res.text.split("<!--") text_to_search = text_array[2] # select only the text that needs to be searched regex_results = re.findall(r'\w', text_to_search) for item in regex_results: if item == '_': pass else: url_result += item Solution(url_result)
786
254
# Generated by Django 3.1.7 on 2021-05-10 07:38 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import saleor.core.utils.json_serializer class Migration(migrations.Migration): initial = True dependencies = [ ('store', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Social', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('private_metadata', models.JSONField(blank=True, default=dict, encoder=saleor.core.utils.json_serializer.CustomJsonEncoder, null=True)), ('metadata', models.JSONField(blank=True, default=dict, encoder=saleor.core.utils.json_serializer.CustomJsonEncoder, null=True)), ('follow', models.BooleanField(default=True)), ('store', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='socials', to='store.store')), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ('pk',), 'permissions': (('manage_socials', 'Manage social.'),), }, ), ]
1,444
440
import cv2 import os import sqlite3 import dlib import re,time from playsound import playsound import pyttsx3 cam = cv2.VideoCapture(0) cam.set(3, 640) # set video width cam.set(4, 480) # set video height face_detector = cv2.CascadeClassifier('C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/PROJECT ALL RESOURCE/Face recognition/HaarCascade/haarcascade_frontalface_default.xml') detector = dlib.get_frontal_face_detector() # init function to get an engine instance for the speech synthesis engine1 = pyttsx3.init() engine2 = pyttsx3.init() # For each person, enter one numeric face id detector = dlib.get_frontal_face_detector() regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$' Id =int(input("Enter ID:")) fullname = input("Enter FullName : ") email=input("Enter Email:") match = re.match(regex,email) if match == None: print('Invalid Email') raise ValueError('Invalid Email') rollno = int(input("Enter Roll Number : ")) print("\n [INFO] Initializing face capture. Look the camera and wait ...") # say method on the engine that passing input text to be spoken playsound('sound.mp3') engine1.say('User Added Successfully') # run and wait method, it processes the voice commands. engine2.runAndWait() connects = sqlite3.connect("C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/PROJECT ALL RESOURCE/Face recognition/sqlite3/Studentdb.db")# connecting to the database c = connects.cursor() c.execute('CREATE TABLE IF NOT EXISTS Student (ID INT NOT NULL UNIQUE PRIMARY KEY, FULLNAME TEXT NOT NULL, EMAIL NOT NULL, ROLLNO INT UNIQUE NOT NULL , STATUS TEXT DATE TIMESTAMP)') c.execute("INSERT INTO Student(ID, FULLNAME, EMAIL,ROLLNO) VALUES(?,?,?,?)",(Id,fullname,email,rollno)) print('Record entered successfully') connects.commit()# commiting into the database c.close() connects.close()# closing the connection # Initialize individual sampling face count count = 0 while(True): ret, img = cam.read() img = cv2.flip(img,1) # flip video image vertically gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_detector.detectMultiScale(gray, 1.3, 5) for (x,y,w,h) in faces: cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) count += 1 # Save the captured image into the datasets folder cv2.imwrite("dataset/User." + str(Id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w]) cv2.imshow('image', img) k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video if k == 27: break elif count >= 30: # Take 30 face sample and stop video playsound('sound.mp3') engine2.say('DataSets Captured Successfully') # run and wait method, it processes the voice commands. engine2.runAndWait() break # Doing a bit of cleanup print("\n [INFO] Exiting Program and cleanup stuff") cam.release() cv2.destroyAllWindows()
2,900
1,028
from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import * from sqlalchemy import * from sqlalchemy.sql import func, or_ from sqlalchemy.types import TIMESTAMP from sqlalchemy.ext.hybrid import hybrid_property from time import time import markupsafe from sqlalchemy.ext.associationproxy import association_proxy #from auth import * Base = declarative_base() #metadata = Base.metadata #session = StackedObjectProxy() #database_type = "MySQL" class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) username = Column(String(255)) fullname = Column(String(255)) email = Column(String(255)) website = Column(String(255)) apikey = Column(String(255)) _password = Column('password', String(128), nullable=False) access = Column(Integer()) disabled = Column(Integer()) last_seen = Column(Integer()) create_date = Column(Integer()) validation = Column(String(255)) def _set_password(self, value): if value is None: self._password = None return import hashlib encoder = hashlib.new('sha512') encoder.update(value) self._password = encoder.hexdigest() password = synonym('_password', descriptor=property(lambda self: self._password, _set_password)) @classmethod def authenticate(cls, identifier, password=None, force=False): if not force and not password: return None try: user = cls.get(identifier) except: return None if force: return user.id, user import hashlib encoder = hashlib.new('sha512') encoder.update(password) if user.password is None or user.password != encoder.hexdigest(): return None return user.id, user class Account(Base): __tablename__ = 'accounts' __repr__ = lambda self: "Account(%s, '%s')" % (self.id, self.name) id = Column(String(32), primary_key=True) name = Column(Unicode(255), nullable=False) _password = Column('password', String(128)) def _set_password(self, value): if value is None: self._password = None return import hashlib encoder = hashlib.new('sha512') encoder.update(value) self._password = encoder.hexdigest() password = synonym('_password', descriptor=property(lambda self: self._password, _set_password)) groups = association_proxy('_groups', 'id') @property def permissions(self): perms = [] for group in self._groups: for perm in group.permissions: perms.append(perm) return set(perms) @classmethod def lookup(cls, identifier): user = session.query(cls).filter(cls.id==identifier).one() return user @classmethod def authenticate(cls, identifier, password=None, force=False): if not force and not password: return None try: #user = cls.get(identifier) user = session.query(cls).filter(cls.name==identifier).one() except: return None if force: return user.id, user import hashlib encoder = hashlib.new('sha512') encoder.update(password) if user.password is None or user.password != encoder.hexdigest(): return None return user.id, user account_groups = Table('account_groups', Base.metadata, Column('account_id', String(32), ForeignKey('accounts.id')), Column('group_id', Unicode(32), ForeignKey('groups.id')) ) class Group(Base): __tablename__ = 'groups' __repr__ = lambda self: "Group(%s, %r)" % (self.id, self.name) __str__ = lambda self: str(self.id) __unicode__ = lambda self: self.id id = Column(String(32), primary_key=True) description = Column(Unicode(255)) members = relation(Account, secondary=account_groups, backref='_groups') permissions = association_proxy('_permissions', 'id') group_permissions = Table('group_perms', Base.metadata, Column('group_id', Unicode(32), ForeignKey('groups.id')), Column('permission_id', Unicode(32), ForeignKey('permissions.id')) ) class Permission(Base): __tablename__ = 'permissions' __repr__ = lambda self: "Permission(%s)" % (self.id, ) __str__ = lambda self: str(self.id) __unicode__ = lambda self: self.id id = Column(String(32), primary_key=True) description = Column(Unicode(255)) groups = relation(Group, secondary=group_permissions, backref='_permissions') #def ready(sessionmaker): # global session # session = sessionmaker # request.environ['catalogs'] = session.query(SiteOptions).limit(1).one()
4,847
1,460
import multiprocessing as mp # mp.set_start_method('spawn') import math import os import pickle import random from glob import glob from os import path import albumentations as alb import cv2 import numpy as np import skimage import torch import imageio from albumentations.pytorch import ToTensorV2 from skimage.color import gray2rgb from torch.utils.data import Dataset from conet.config import get_cfg # https://github.com/albumentations-team/albumentations/pull/511 # Fix grid distortion bug. #511 # GridDistortion bug修复..... train_size_aug = alb.Compose([ # alb.RandomSizedCrop(min_max_height=(300, 500)), alb.PadIfNeeded(min_height=100, min_width=600, border_mode=cv2.BORDER_REFLECT101), alb.Rotate(limit=6), alb.RandomScale(scale_limit=0.05,), alb.ElasticTransform(), # alb.GridDistortion(p=1, num_steps=20, distort_limit=0.5), # alb.GridDistortion(num_steps=10, p=1), # alb.OneOf([ # alb.OpticalDistortion(), # ]), # alb.MaskDropout(image_fill_value=0, mask_fill_value=-1,p=0.3), alb.HorizontalFlip(), # alb.VerticalFlip(), # alb.RandomBrightness(limit=0.01), alb.PadIfNeeded(min_height=224, min_width=512, border_mode=cv2.BORDER_REFLECT101), alb.RandomCrop(224, 512), # alb.Normalize(), # alb.pytorch.ToTensor(), # ToTensorV2() ]) train_content_aug = alb.Compose([ # alb.MedianBlur(3), # alb.GaussianBlur(3), alb.RGBShift(r_shift_limit=5, g_shift_limit=5, b_shift_limit=5), alb.RandomBrightnessContrast(brightness_limit=0.05), alb.Normalize(), # ToTensorV2() ]) val_aug = alb.Compose([ # alb.PadIfNeeded(512, border_mode=cv2.BORDER_REFLECT101), # alb.Normalize(), # alb.Resize(512, 512), alb.PadIfNeeded(min_height=224, min_width=512, border_mode=cv2.BORDER_REFLECT101), alb.CenterCrop(224, 512), # ToTensorV2(), ]) val_c_aug = alb.Compose([ alb.Normalize(), # ToTensorV2() ]) # train_aug_f = alb.Compose([ # # alb.RandomSizedCrop(min_max_height=(300, 500)), # alb.RandomScale(), # # alb.HorizontalFlip(), # alb.VerticalFlip(), # alb.RandomBrightness(limit=0.01), # alb.Rotate(limit=30), # # 224 548 # alb.PadIfNeeded(min_height=224, min_width=548, border_mode=cv2.BORDER_REFLECT101), # alb.RandomCrop(224, 512), # alb.Normalize(), # # alb.pytorch.ToTensor(), # ToTensorV2() # ]) # val_aug_f = alb.Compose([ # alb.PadIfNeeded(min_height=224, min_width=512, border_mode=cv2.BORDER_REFLECT101), # alb.Normalize(), # # alb.Resize(512, 512), # alb.CenterCrop(224, 512), # ToTensorV2(), # ]) class DukeOctFlatSPDataset(Dataset): def __init__(self, split='train', n_seg=0): cfg = get_cfg() self.cfg = cfg self.data_dir = path.join(cfg.dme_flatten_sp, str(n_seg)) print(f'Load data from {self.data_dir}') # with open(path.join(cfg.data_dir, 'split.dp'), 'rb') as infile: # self.d_split = pickle.load(infile) self.split = split data_files = glob(path.join(self.data_dir, '*.jpg')) # img_bname = ['_'.join(path.basename(x).split('_')[:-1]) for x in img_files] data_bnames = [path.basename(x).split('.')[0] for x in data_files] # self.data_bnames = data_bnames subject_ids = [int(x.split('_')[1]) for x in data_bnames] if split == 'train': self.bnames = [data_bnames[i] for i in range(len(data_files)) if subject_ids[i] < 6] else: self.bnames = [data_bnames[i] for i in range(len(data_files)) if subject_ids[i] >= 6] if split == 'train': self.b_aug = train_size_aug self.c_aug = train_content_aug elif split == 'val': self.b_aug = val_aug self.c_aug = val_c_aug else: raise NotImplementedError self.cache = [] for idx in range(len(self)): bname = self.bnames[idx] img_fp = path.join(self.data_dir, f'{bname}.jpg') label_fp = path.join(self.data_dir, f'{bname}_label.npy') softlabel_fp = path.join(self.data_dir, f'{bname}_softlabel.npy') img = imageio.imread(img_fp) label = np.load(label_fp) softlabel = np.load(softlabel_fp) self.cache.append((img_fp, img, label, softlabel)) def __len__(self): # return len(self.d_basefp) return len(self.bnames) def __getitem__(self, idx): # carr = np.load(path.join(self.data_dir, self.d_basefp[idx])) # carr = np.load(self.bnames[idx]) # if idx in self.cache.keys(): # img_fp, img, label, soft_label = self.cache[idx] # else: # bname = self.bnames[idx] # img_fp = path.join(self.data_dir, f'{bname}.jpg') # label_fp = path.join(self.data_dir, f'{bname}_label.npy') # softlabel_fp = path.join(self.data_dir, f'{bname}_softlabel.npy') # img = imageio.imread(img_fp) # label = np.load(label_fp) # softlabel = np.load(softlabel_fp) # self.cache[idx] = (img_fp, img, label, softlabel) img_fp, img, label, softlabel = self.cache[idx] img_fp, img, label, softlabel = img_fp, img.copy(), label.copy(), softlabel.copy() # img = gray2rgb(img) # if self.split == 'train': # auged = train_aug_f(image=img, mask=label) # else: # auged = val_aug_f(image=img, mask=label) # auged['fname'] = img_fp # auged['softlabel'] = torch.tensor(0.) # return auged # img = np.transpose(img, (1, 2, 0)) softlabel = np.transpose(softlabel, (1, 2, 0)) img = np.expand_dims(img, axis=-1) img_a = np.concatenate([img, softlabel], axis=-1) # img = gray2rgb(img) # grid_distortion 可能不支持负数 label[label == -1] = 255 auged = self.b_aug(image=img_a, mask=label) img = auged['image'] label = auged['mask'] label[label == 255] = -1 softlabel = img[:, :, 1:] image = img[:, :, 0] # print(image.shape, image.max(), image.min()) image = np.clip(image, 0, 255).astype('uint8') # image = skimage.img_as_ubyte(image) image = gray2rgb(image) image = self.c_aug(image=image)['image'] # normi # image = alb.Normalize()(image)['image'] image = np.transpose(image, (2, 0, 1)) softlabel = np.transpose(softlabel, (2, 0, 1)) loss_mask = (label !=-1).astype("float") image = torch.from_numpy(image) softlabel = torch.from_numpy(softlabel).float() label = torch.from_numpy(label) loss_mask = torch.from_numpy(loss_mask) # img = auged['image'] # print(img.shape) return { 'image': image, 'softlabel': softlabel, 'mask': label, 'fname': img_fp, 'loss_mask': loss_mask } if __name__ == "__main__": from skimage import segmentation, color, filters, exposure import skimage import os from os import path import imageio from matplotlib import pyplot as plt from torch.utils.data import DataLoader import random np.random.seed(42) random.seed(42) save_dir = '/data1/hangli/oct/debug' os.makedirs(save_dir, exist_ok=True) cmap = plt.cm.get_cmap('jet') n_seg = 1200 training_dataset = DukeOctFlatSPDataset(split='train', n_seg=n_seg) # val_dataset = DukeOctFlatSPDataset(split='val', n_seg=n_seg) data_loader = DataLoader(training_dataset, batch_size=16, shuffle=False, num_workers=8, pin_memory=False) # val_loader = DataLoader(val_dataset, batch_size=4, shuffle=False, num_workers=2, pin_memory=True) for t in range(40): for bidx, batch in enumerate(data_loader): data = batch['image'] target = batch['mask'] for b_i in range(len(data)): img = data[b_i] img = img.permute(1, 2, 0).cpu().numpy() img = (img - img.min()) / (img.max() - img.min()) img = skimage.img_as_ubyte(img) mask = target[b_i] # mask_color = cmap(mask) mask_color = color.label2rgb(mask.cpu().numpy()) mask_color = skimage.img_as_ubyte(mask_color) print(img.shape, mask_color.shape) save_img = np.hstack((img, mask_color)) p = path.join(save_dir, f'{t}_{bidx}_{b_i}.jpg') print(f'=> {p}') imageio.imwrite(p, save_img)
8,753
3,280
from fractions import Fraction num1 = Fraction(1, 3) num2 = Fraction(1, 7) num1 * num2 # Fraction(1, 21)
107
51
from io import StringIO from pathlib import Path import pytest from toxn.config import from_toml @pytest.mark.asyncio async def test_load_from_io(): content = StringIO(""" [build-system] requires = ['setuptools >= 38.2.4'] build-backend = 'setuptools:build_meta' [tool.toxn] default_tasks = ['py36'] """) build, project, filename = await from_toml(content) assert build.backend == 'setuptools:build_meta' assert build.requires == ['setuptools >= 38.2.4'] assert project == {'default_tasks': ['py36']} assert filename is None @pytest.mark.asyncio async def test_load_from_path(tmpdir): filename: Path = Path(tmpdir) / 'test.toml' with open(filename, 'wt') as f: f.write(""" [build-system] requires = ['setuptools >= 38.2.4'] build-backend = 'setuptools:build_meta' [tool.toxn] default_tasks = ['py36'] """) build, project, config_path = await from_toml(filename) assert build.backend == 'setuptools:build_meta' assert build.requires == ['setuptools >= 38.2.4'] assert project == {'default_tasks': ['py36']} assert filename == config_path
1,109
404
from constantMonthlyModel import ConstantMonthlyModel from constantModel import ConstantModel from twoParameterModel import TwoParameterModel from threeParameterModel import ThreeParameterModel from anyModel import AnyModelFactory from schoolModel import SchoolModel, SchoolModelFactory from recurrentModel import RecurrentModel, RecurrentModelFactory from weeklyModel import WeeklyModel, WeeklyModelFactory from monthlyModel import MonthlyModel, MonthlyModelFactory from nanModel import NanModel from profile import ConsumptionProfile
536
112
host = 'https://api.gotinder.com' #leave tinder_token empty if you don't use phone verification tinder_token = "0bb19e55-5f12-4a23-99df-8e258631105b" # Your real config file should simply be named "config.py" # Just insert your fb_username and fb_password in string format # and the fb_auth_token.py module will do the rest!
326
125
from django.test.runner import DiscoverRunner from io import StringIO from logging import StreamHandler, getLogger from unittest import TextTestRunner, TextTestResult class SimoneTestRunner(TextTestRunner): def __init__(self, *args, **kwargs): kwargs['buffer'] = True super().__init__(*args, **kwargs) class SimoneTestResult(TextTestResult): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._stream = StringIO() self._stream_handlers = [StreamHandler(self._stream)] self._root_logger = getLogger() self._original_handlers = self._root_logger.handlers def startTest(self, test): self._stream.truncate(0) self._root_logger.handlers = self._stream_handlers return super().startTest(test) def stopTest(self, test): self._root_logger.handlers = self._original_handlers return super().startTest(test) # i don't like overriding a _ property, but otherwise we'd have to # reimplement ~3 add* methods, one of which is non-trivial, which seems # more likely to be flakey. Essentially we don't want ot mirror the output # during the test runs when _restoreStdout is called so this effectively # disables the property that gets set to cause that to happen. @property def _mirrorOutput(self): return False @_mirrorOutput.setter def _mirrorOutput(self, val): pass # same here :-(. This is so that we can include any logging for the failed # tests. def _exc_info_to_string(self, err, test): return ( super()._exc_info_to_string(err, test) + '\nLogging:\n' + self._stream.getvalue() ) class SimoneRunner(DiscoverRunner): test_runner = SimoneTestRunner def get_resultclass(self): ret = super().get_resultclass() if ret is None: ret = SimoneTestResult return ret
1,958
569
__all__ = ["fibo"]
18
10
row = [-1, -1, -1, 0, 0, 1, 1, 1] col = [-1, 0, 1, -1, 1, -1, 0, 1] def isValid(x, y, mat): return 0 <= x < len(mat) and 0 <= y < len(mat[0]) def findMaxLength(mat, x, y, previous): if not isValid(x, y, mat) or chr(ord(previous) + 1) != mat[x][y]: return 0 max_len = 0 for k in range(len(row)): length = findMaxLength(mat, x + row[k], y + col[k], mat[x][y]) max_len = max(max_len, 1 + length) return max_len def findMaximumLength(mat, ch): if not mat or not len(mat): return 0 (M, N) = (len(mat), len(mat[0])) max_len = 0 for x in range(M): for y in range(N): if mat[x][y] == ch: for k in range(len(row)): length = findMaxLength(mat, x + row[k], y + col[k], ch) max_len = max(max_len, 1 + length) return max_len if __name__ == '__main__': mat = [ ['D', 'E', 'H', 'X', 'B'], ['A', 'O', 'G', 'P', 'E'], ['D', 'D', 'C', 'F', 'D'], ['E', 'B', 'E', 'A', 'S'], ['C', 'D', 'Y', 'E', 'N'] ] ch = 'C' print("The length of the longest path with consecutive characters starting from " "character", ch, "is", findMaximumLength(mat, ch))
1,340
558
__all__ = ('get_session_list', 'get_animal_list', 'get_event', 'get_tag_pattern', 'get_pattern_animalList', 'get_current_animals') import datetime import logging from .. import Root from .. import File from .. import Profile from ..Profile import EventProfile from .singleAnimal import * def get_session_list(root: Root, animalList: list = None, profile: Profile = None): """ This function returns list of sessions with certain 'profile' for all the animals in animalList. if animalList=Nonr, it will search all the animals. """ if profile is None: profile = Profile(root=root) if animalList is None or animalList == '' or animalList == []: animalList = root.get_all_animals() profileOut = Profile(root=root) for animal in animalList: tagFile = File(root, animal) sessionProfile = tagFile.get_profile_session_list(profile) profileOut += sessionProfile return profileOut def get_animal_list(root: Root, profile: Profile = None): """ this function returns list of animals with at least one session matching the "profile" """ if profile is None: profile = Profile(root=root) allProfiles = get_session_list(root, animalList=None, profile=profile) sessionList = allProfiles.Sessions animalList = [] for session in sessionList: animalList.append(session[:len(profile._prefix) + 3]) animalList = list(set(animalList)) return sorted(animalList) def get_event(root: Root, profile1: Profile, profile2: Profile, badAnimals: list = None): """ This function finds the animals that match both profile1 and profile2 IN SUCCESSION I.E., when the conditions changed """ if badAnimals is None: badAnimals = [] animalList1 = get_animal_list(root, profile1) animalList2 = get_animal_list(root, profile2) animalList0 = set(animalList1).intersection(set(animalList2)) animalList0 = [animal for animal in animalList0 if animal not in badAnimals] # remove bad animals from animalList0 animalList0.sort() eventProfile = EventProfile(profile1, profile2) for animal in animalList0: sessionProfile1 = get_session_list(root, animalList=[animal], profile=profile1) sessionProfile2 = get_session_list(root, animalList=[animal], profile=profile2) sessionTotal = get_session_list(root, animalList=[animal], profile=root.get_profile()) try: index = sessionTotal.Sessions.index(sessionProfile1.Sessions[-1]) if sessionProfile2.Sessions[0] == sessionTotal.Sessions[index + 1]: # Two profiles succeed, meaning the Event happended. eventProfile.append(sessionProfile1.Sessions, sessionProfile2.Sessions) except Exception: pass return eventProfile def get_tag_pattern(root: Root, animalList: list = None, tagPattern: str = '*'): """ applies 'get_pattern_session_list' to a list of animals """ if animalList is None or animalList == []: animalList = root.get_all_animals() profileDict = root.get_profile() for animal in animalList: tagFile = File(root, animal) profileDict += tagFile.get_pattern_session_list(tagPattern=tagPattern) return profileDict def get_pattern_animalList(root: Root, tagPattern: str): """ this function returns list of animals with at least one session matching the 'tagPattern' """ allProfile = get_tag_pattern(root, animalList=None, tagPattern=tagPattern) sessionList = allProfile.Sessions animalList = [] for session in sessionList: animalList.append(session[:len(root.prefix) + 3]) animalList = list(set(animalList)) return sorted(animalList) def get_current_animals(root: Root, days_passed: int = 4): """ this function returns the list of animals with a new session within the last few ('days_passed') days """ now = datetime.datetime.now() all_animals = root.get_all_animals() if all_animals == []: logging.warning('No animal found!') return [] animalList = [] for animal in all_animals: animalTag = File(root, animal) sessionList = animalTag.get_all_sessions() if not sessionList: continue lastSessionDate = animalTag.get_session_date(sessionList[-1]) if (now - lastSessionDate).days <= days_passed: animalList.append(animal) return animalList
4,653
1,314
from random import choice from copy import deepcopy from game_data import GameData from agents import Agent import numpy as np import random import pickle import pandas as pd class IsaacAgent(Agent): def __init__(self, max_time=2, max_depth=300): self.max_time = max_time self.max_depth = max_depth # self.heuristic = [ # [0], [0], [0], [0], [0], [0], [0], # [0], [0], [0], [0], [0], [0], [0], # [0], [0], [0], [0], [0], [0], [0], # [0], [0], [0], [0], [0], [0], [0], # ... # [0], [0], [-1], [-1], [-1], [0], [0], # odd player # [0], [1, -1], [0], [0], [0], [1, -1], [0] # even player # ] self.heuristic = [ [0], [0], [0], [0], [0], [0], [0], [0], [0], [1, -1], [2, -2], [1, -1], [0], [0], [0], [0], [1, -2], [2, -2], [1, -2], [0], [0], [0], [0], [3, -2], [3, -2], [3, -2], [0], [0], [0], [0], [2, -3], [2, -3], [2, -3], [0], [0], [0], [1, -1], [3, -3], [4, -4], [3, -3], [1, -1], [0] ] self.game_data = None self.model = pickle.load(open("./c4model.sav", 'rb')) def get_name(self) -> str: return "IsaacAgent" def get_move(self, game_data) -> int: self.game_data = game_data rows_reversed_connect4_board = [] for row in list(game_data.game_board): rows_reversed_connect4_board.append(row[::-1]) connect4_board = list(np.concatenate(rows_reversed_connect4_board).flat)[::-1] for sn, sv in enumerate(connect4_board): if sv == 0: connect4_board[sn] = ' ' elif sv == 1: connect4_board[sn] = 'R' else: connect4_board[sn] = 'B' # self.print_board(connect4_board) turn = self.player(connect4_board) actions = self.actions(connect4_board) best_action = random.choice(actions) if turn == 'R': # max player local_best_min_v = -float('inf') for action in actions: self.current_depth = 0 min_v = self.min_value(self.result(connect4_board, action)) # print(f"Action: {action + 1}, Min Value: {min_v}") if min_v > local_best_min_v: local_best_min_v = min_v best_action = action else: # min player local_best_max_v = float('inf') for action in actions: self.current_depth = 0 max_v = self.max_value(self.result(connect4_board, action)) # print(f"Action: {action + 1}, Max Value: {max_v}") if max_v < local_best_max_v: local_best_max_v = max_v best_action = action return best_action def print_board(self, board): for l in range(0, 42, 7): row = ''.join([board[l + i] + '|' for i in range(7)]) print(row[:13]) print('-+-+-+-+-+-+-') def player(self, board): return 'B' if board.count('R') > board.count('B') else 'R' def is_tie(self, board): return len([sq for sq in board if sq == ' ']) == 0 def utility(self, board): return 0 if self.is_tie(board) else -1000 if self.player(board) == "R" else 1000 def terminal(self, board): # use modulo 7 to detect new row row = 0 for sq in range(42): if sq % 7 == 0: row += 1 distance_to_new_row = 7 * row - (sq + 1) distance_to_column_end = [i for i in range(6) if (sq + 1) + i * 7 > 35][0] if board[sq] == ' ': continue # 4 horizontally if distance_to_new_row >= 3 and board[sq] == board[sq + 1] and board[sq] == board[sq + 2] and board[sq] == board[sq + 3]: return True # 4 vertically elif distance_to_column_end > 2 and board[sq] == board[sq + 7] and board[sq] == board[sq + 14] and board[sq] == board[sq + 21]: return True # 4 diagonally elif distance_to_new_row >= 3 and distance_to_column_end >= 2 and sq + 24 < len(board) and board[sq] == board[sq + 8] and board[sq] == board[sq + 16] and board[sq] == board[sq + 24]: return True elif distance_to_new_row >= 3 and distance_to_column_end <= 2 and 0 <= sq - 18 < len(board) and board[sq] == board[sq - 6] and board[sq] == board[sq - 12] and board[sq] == board[sq - 18]: return True return self.is_tie(board) def actions(self, board): return [sn for sn in range(7) if board[sn] == ' '] def result(self, board, action): result = board[:] for r in range(6): current_sq = board[action + 35 - r * 7] if current_sq == ' ': result[action + 35 - r * 7] = self.player(board) break return result def count_two_in_row(self, board, player): two_in_row = 0 row = 0 for sq in range(42): if sq % 7 == 0: row += 1 distance_to_new_row = 7 * row - (sq + 1) distance_to_column_end = [i for i in range(6) if (sq + 1) + i * 7 > 35][0] if board[sq] != player or board[sq].isdigit() or board[sq] == ' ': continue # 4 horizontally if distance_to_new_row >= 3 and board[sq] == board[sq + 1]: two_in_row += 1 # 4 vertically elif distance_to_column_end > 2 and board[sq] == board[sq + 7]: two_in_row += 1 # 4 diagonally elif distance_to_new_row >= 3 and distance_to_column_end >= 2 and sq + 8 < len(board) and board[sq] == board[sq + 8]: two_in_row += 1 elif distance_to_new_row >= 3 and distance_to_column_end <= 2 and 0 <= sq - 6 < len(board) and board[sq] == board[sq - 6]: two_in_row += 1 return two_in_row def count_three_in_row(self, board, player): three_in_row = 0 row = 0 for sq in range(42): if sq % 7 == 0: row += 1 distance_to_new_row = 7 * row - (sq + 1) distance_to_column_end = [i for i in range(6) if (sq + 1) + i * 7 > 35][0] if board[sq] != player or board[sq].isdigit() or board[sq] == ' ': continue # 4 horizontally if distance_to_new_row >= 3 and board[sq] == board[sq + 1] and board[sq] == board[sq + 2]: three_in_row += 1 # 4 vertically elif distance_to_column_end > 2 and board[sq] == board[sq + 7] and board[sq] == board[sq + 14]: three_in_row += 1 # 4 diagonally elif distance_to_new_row >= 3 and distance_to_column_end >= 2 and sq + 16 < len(board) and board[sq] == board[sq + 8] and board[sq] == board[sq + 16]: three_in_row += 1 elif distance_to_new_row >= 3 and distance_to_column_end <= 2 and 0 <= sq - 12 < len(board) and board[sq] == board[sq - 6] and board[sq] == board[sq - 12]: three_in_row += 1 return three_in_row def evaluate(self, board): """ Heuristic: - Squares value: [0, 0, -1, -1, -1, 0, 0, 0, 0, 2, 2, 2, 0, 0, 0, 0, -2, -2, -2, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, -3, -3, -3, 0, 0, 0, 0, 1, 1, 1, 0, 0] - Include win squares of each player and where they are located. Heuristic based off Odd-Even strategy: https://www.youtube.com/watch?v=YqqcNjQMX18 """ total_score = 0 for vn, values in enumerate(self.heuristic): for value in values: if value < 0 and board[vn] == 'B': total_score += value elif value > 0 and board[vn] == 'R': total_score += value # three_in_row_modifier = 10 # total_score += self.count_three_in_row(board, 'R') * three_in_row_modifier # total_score -= self.count_three_in_row(board, 'B') * three_in_row_modifier # total_score += self.count_two_in_row(board, 'R') * three_in_row_modifier # total_score -= self.count_two_in_row(board, 'B') * three_in_row_modifier # divisor = 5 # for i in range(7): # action_result = self.result(board, i) # if self.terminal(action_result): # total_score += self.utility(action_result) / divisor # print(total_score) # multiplier = 2 # r_win_states = 0 # b_win_states = 0 # for i in range(7): # action_result = self.result(board, i) # if self.terminal(action_result): # if self.utility(action_result) == 1000: # r_win_states += 1 # else: # b_win_states += 1 # total_score += r_win_states * multiplier # total_score -= b_win_states * multiplier # if r_win_states >= 2: # total_score += 400 # elif b_win_states >= 2: # total_score -= 400 # print(f"Red Win States: {r_win_states}, Blue Win States: {b_win_states}") # multiplier = 30 # conv_data = [] # for sq in board: # if sq.isdigit() or sq == ' ': # conv_data.append(0) # elif sq == 'R': # conv_data.append(1) # else: # conv_data.append(-1) # c4_board = pd.Series(conv_data, index=[f"pos_{sn + 1}" for sn, sv in enumerate(board)]) # total_score += self.model.predict([c4_board])[0][0] return total_score def min_value(self, board): if self.terminal(board): return self.utility(board) if self.current_depth > self.max_depth: return self.evaluate(board) self.current_depth += 1 v = float('inf') for action in self.actions(board): max_v = self.max_value(self.result(board, action)) v = min(v, max_v) return v def max_value(self, board): if self.terminal(board): return self.utility(board) if self.current_depth > self.max_depth: return self.evaluate(board) self.current_depth += 1 v = -float('inf') for action in self.actions(board): min_v = self.min_value(self.result(board, action)) v = max(v, min_v) return v
10,747
3,767
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler from SocketServer import ThreadingMixIn from os import curdir, sep import threading import urlparse import mimetypes PORT_NUMBER = 8080 VERSION_NUMBER = '1.0.0' class Handler(BaseHTTPRequestHandler): def do_GET(self): #Parse path into dictionary and process url = urlparse.urlparse(self.path) url_dict = urlparse.parse_qs(url.query) if self.path=='/': self.path="index.html" self.respond('text/html') return mimetype = mimetypes.guess_type(self.path) mimetype = mimetype[0] try: self.respond(mimetype) except IOError: self.send_error(404,'File Not Found: %s' % self.path) return def respond(self, mimetype): #Open the static file requested and send it f = open(curdir + sep + self.path) self.send_response(200) self.send_header('Content-type',mimetype) self.end_headers() self.wfile.write(f.read()) f.close() return class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): """Handle Requests in a seperate thread.""" if __name__ == '__main__': server = ThreadedHTTPServer(('', PORT_NUMBER), Handler) print 'Starting Server on port ' + str(PORT_NUMBER) print 'Version Code: ' + VERSION_NUMBER print 'Author @dannyb648 | danbeglin.co.uk' server.serve_forever()
1,288
494
from pyteal import * ADMIN_KEY = Bytes("admin") WHITELISTED_KEY = Bytes("whitelisted") REQUESTS_BALANCE_KEY = Bytes("requests_balance") MAX_BUY_AMOUNT = Int(1000000000) MIN_BUY_AMOUNT = Int(10000000) REQUESTS_SELLER = Addr("N5ICVTFKS7RJJHGWWM5QXG2L3BV3GEF6N37D2ZF73O4PCBZCXP4HV3K7CY") MARKET_EXCHANGE_NOTE = Bytes("algo-oracle-app-4") def approval_program(): on_creation = Seq( [ Assert(Txn.application_args.length() == Int(0)), App.localPut(Int(0), ADMIN_KEY, Int(1)), Return(Int(1)) ] ) is_contract_admin = App.localGet(Int(0), ADMIN_KEY) # set/remove an admin for this contract admin_status = Btoi(Txn.application_args[2]) set_admin = Seq( [ Assert( And( is_contract_admin, Txn.application_args.length() == Int(3), Txn.accounts.length() == Int(1), ) ), App.localPut(Int(1), ADMIN_KEY, admin_status), Return(Int(1)), ] ) register = Seq( [ App.localPut(Int(0), WHITELISTED_KEY, Int(0)), Return(Int(1)) ] ) # Depending on what you do, you should always consider implementing a whitelisting to # control who access your app. This will allow you to process offchain validation before # allowing an account to call you app. # You may also consider case by case whitelisting to allow access to specific business methods. whitelist = Seq( [ Assert( And( is_contract_admin, Txn.application_args.length() == Int(2), Txn.accounts.length() == Int(1) ) ), App.localPut(Int(1), WHITELISTED_KEY, Int(1)), Return(Int(1)) ] ) # This should be added to the checklist of business methods. is_whitelisted = App.localGet(Int(0), WHITELISTED_KEY) # An admin can increase the request balance of a user. requests_amount = Btoi(Txn.application_args[1]) allocate_requests = Seq( [ Assert( And( is_contract_admin, # Sent by admin Txn.application_args.length() == Int(3), # receiver and amount are provided Txn.accounts.length() == Int(1), App.localGet(Int(1), WHITELISTED_KEY), # receiver is whitelisted ) ), App.localPut( Int(1), REQUESTS_BALANCE_KEY, App.localGet(Int(1), REQUESTS_BALANCE_KEY) + requests_amount ), Return(Int(1)) ] ) # a client can buy requests buy_requests = Seq( [ Assert( And( is_whitelisted, Global.group_size() == Int(2), # buying requests must be done using an atomic transfer Gtxn[0].type_enum() == TxnType.Payment, # the first transaction must be a payment... Gtxn[0].receiver() == REQUESTS_SELLER, # ...to our address Gtxn[0].amount() >= MIN_BUY_AMOUNT, # we don't sell for less than 10... Gtxn[0].amount() <= MAX_BUY_AMOUNT, # ...or more than 1000 ALGO Txn.group_index() == Int(1), # call to the contract is the second transaction Txn.application_args.length() == Int(2), Txn.accounts.length() == Int(1) # the address which will use the requests must be provided ) ), App.localPut( Int(1), REQUESTS_BALANCE_KEY, App.localGet(Int(1), REQUESTS_BALANCE_KEY) + (Gtxn[0].amount() / Int(100000)), ), Return(Int(1)) ] ) market_exchange_rate_request = Seq( [ Assert( And( is_whitelisted, Txn.note() == MARKET_EXCHANGE_NOTE, Txn.application_args.length() == Int(4), Txn.accounts.length() == Int(0), App.localGet(Int(0), REQUESTS_BALANCE_KEY) >= Int(1) ) ), App.localPut( Int(0), REQUESTS_BALANCE_KEY, App.localGet(Int(0), REQUESTS_BALANCE_KEY) - Int(1), ), Return(Int(1)) ] ) # Implement other oracle methods... program = Cond( [Txn.application_id() == Int(0), on_creation], [Txn.on_completion() == OnComplete.DeleteApplication, Return(is_contract_admin)], [Txn.on_completion() == OnComplete.UpdateApplication, Return(is_contract_admin)], [Txn.on_completion() == OnComplete.CloseOut, Return(Int(1))], [Txn.on_completion() == OnComplete.OptIn, register], [Txn.application_args[0] == Bytes("set_admin"), set_admin], [Txn.application_args[0] == Bytes("whitelist"), whitelist], [Txn.application_args[0] == Bytes("allocate_requests"), allocate_requests], [Txn.application_args[0] == Bytes("buy_requests"), buy_requests], [Txn.application_args[0] == Bytes("get_market_exchange_rate"), market_exchange_rate_request] ) return program def clear_state_program(): program = Seq( [ Return(Int(1)) ] ) return program if __name__ == "__main__": with open("algorand_oracle_approval.teal", "w") as f: compiled = compileTeal(approval_program(), mode=Mode.Application, version=5) f.write(compiled) with open("algorand_oracle_clear_state.teal", "w") as f: compiled = compileTeal(clear_state_program(), mode=Mode.Application, version=5) f.write(compiled)
5,872
1,885
""" Support for Shelly smart home devices. For more details about this component, please refer to the documentation at https://home-assistant.io/components/shelly/ """ # pylint: disable=broad-except, bare-except, invalid-name, import-error from datetime import timedelta import logging import time import asyncio import voluptuous as vol from homeassistant.const import ( CONF_DEVICES, CONF_DISCOVERY, CONF_ID, CONF_NAME, CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP) from homeassistant import config_entries from homeassistant.helpers import discovery from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.entity import Entity from homeassistant.helpers.script import Script from homeassistant.util import slugify from .const import * from .configuration_schema import CONFIG_SCHEMA REQUIREMENTS = ['pyShelly==0.1.16'] _LOGGER = logging.getLogger(__name__) __version__ = "0.1.6.b6" VERSION = __version__ BLOCKS = {} DEVICES = {} BLOCK_SENSORS = [] DEVICE_SENSORS = [] #def _get_block_key(block): # key = block.id # if not key in BLOCKS: # BLOCKS[key] = block # return key def get_block_from_hass(hass, discovery_info): """Get block from HASS""" if SHELLY_BLOCK_ID in discovery_info: key = discovery_info[SHELLY_BLOCK_ID] return hass.data[SHELLY_BLOCKS][key] def _dev_key(dev): key = dev.id + "-" + dev.device_type if dev.device_sub_type is not None: key += "-" + dev.device_sub_type return key #def _get_device_key(dev): # key = _dev_key(dev) # if not key in DEVICES: # DEVICES[key] = dev # return key def get_device_from_hass(hass, discovery_info): """Get device from HASS""" device_key = discovery_info[SHELLY_DEVICE_ID] return hass.data[SHELLY_DEVICES][device_key] async def async_setup(hass, config): """Set up this integration using yaml.""" if DOMAIN not in config: return True hass.data[DOMAIN] = config hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={} ) ) return True async def async_setup_entry(hass, config_entry): """Setup Shelly component""" _LOGGER.info("Starting shelly, %s", __version__) config = hass.data[DOMAIN] conf = config.get(DOMAIN, {}) #todo! hass.data[SHELLY_CONFIG] = conf hass.data[SHELLY_DEVICES] = DEVICES hass.data[SHELLY_BLOCKS] = BLOCKS if conf.get(CONF_WIFI_SENSOR) is not None: _LOGGER.warning("wifi_sensor is deprecated, use rssi in sensors instead.") if conf.get(CONF_WIFI_SENSOR) and SENSOR_RSSI not in conf[CONF_SENSORS]: conf[CONF_SENSORS].append(SENSOR_RSSI) if conf.get(CONF_UPTIME_SENSOR) is not None: _LOGGER.warning("uptime_sensor is deprecated, use uptime in sensors instead.") if conf.get(CONF_UPTIME_SENSOR) and SENSOR_UPTIME not in conf[CONF_SENSORS]: conf[CONF_SENSORS].append(SENSOR_UPTIME) hass.data["SHELLY_INSTANCE"] = ShellyInstance(hass, config_entry, conf) #def update_status_information(): # pys.update_status_information() #for _, block in pys.blocks.items(): # block.update_status_information() #async def update_domain_callback(_now): # """Update the Shelly status information""" # await hass.async_add_executor_job(update_status_information) #if conf.get(CONF_ADDITIONAL_INFO): # hass.helpers.event.async_track_time_interval( # update_domain_callback, update_interval) return True class ShellyInstance(): """Config instance of Shelly""" def __init__(self, hass, config_entry, conf): self.hass = hass self.config_entry = config_entry self.platforms = {} self.pys = None self.conf = conf self.discover = conf.get(CONF_DISCOVERY) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._stop) hass.loop.create_task( self.start_up() ) async def start_up(self): conf = self.conf if conf.get(CONF_LOCAL_PY_SHELLY): _LOGGER.info("Loading local pyShelly") #pylint: disable=no-name-in-module from .pyShelly import pyShelly else: from pyShelly import pyShelly additional_info = conf.get(CONF_ADDITIONAL_INFO) update_interval = conf.get(CONF_SCAN_INTERVAL) self.pys = pys = pyShelly() _LOGGER.info("pyShelly, %s", pys.version()) pys.cb_block_added.append(self._block_added) pys.cb_device_added.append(self._device_added) pys.cb_device_removed.append(self._device_removed) pys.username = conf.get(CONF_USERNAME) pys.password = conf.get(CONF_PASSWORD) pys.cloud_auth_key = conf.get(CONF_CLOUD_AUTH_KEY) pys.cloud_server = conf.get(CONF_CLOUD_SEREVR) pys.tmpl_name = conf.get(CONF_TMPL_NAME, pys.tmpl_name) if additional_info: pys.update_status_interval = update_interval pys.only_device_id = conf.get(CONF_ONLY_DEVICE_ID) pys.igmp_fix_enabled = conf.get(CONF_IGMPFIX) pys.mdns_enabled = conf.get(CONF_MDNS) pys.host_ip = conf.get(CONF_HOST_IP, '') pys.start() pys.discover() discover_by_ip = conf.get(CONF_DISCOVER_BY_IP) for ip_addr in discover_by_ip: pys.add_device_by_ip(ip_addr, 'IP-addr') if conf.get(CONF_VERSION): attr = {'version': VERSION, 'pyShellyVersion': pys.version()} self._add_device("sensor", attr) fake_block = { 'id' : "694908", 'fake_block': True, 'info_values': {'temperature':5}, 'cb_updated' : [], } attr = {'sensor_type':'temperature', 'itm': fake_block} self._add_device("sensor", fake_block) async def _stop(self, _): """Stop Shelly.""" _LOGGER.info("Shutting down Shelly") self.pys.close() def _get_specific_config_root(self, key, *ids): item = self._get_specific_config(key, None, *ids) if item is None: item = self.conf.get(key) return item def _find_device_config(self, device_id): device_conf_list = self.conf.get(CONF_DEVICES) for item in device_conf_list: if item[CONF_ID].upper() == device_id: return item return None def _get_device_config(self, device_id, id_2=None): """Get config for device.""" item = self._find_device_config(device_id) if item is None and id_2 is not None: item = self._find_device_config(id_2) if item is None: return {} return item def _get_specific_config(self, key, default, *ids): for device_id in ids: item = self._find_device_config(device_id) if item is not None and key in item: return item[key] return default def _get_sensor_config(self, *ids): sensors = self._get_specific_config(CONF_SENSORS, None, *ids) if sensors is None: sensors = self.conf.get(CONF_SENSORS) if SENSOR_ALL in sensors: return [*SENSOR_TYPES.keys()] if sensors is None: return {} return sensors def _add_device(self, platform, dev): self.hass.add_job(self._async_add_device(platform, dev)) async def _async_add_device(self, platform, dev): if platform not in self.platforms: self.platforms[platform] = asyncio.Event() await self.hass.config_entries.async_forward_entry_setup( self.config_entry, platform) self.platforms[platform].set() await self.platforms[platform].wait() async_dispatcher_send(self.hass, "shelly_new_" + platform \ , dev, self) def _block_updated(self, block): hass_data = block.hass_data if hass_data['discover']: if hass_data['allow_upgrade_switch']: has_update = block.info_values.get('has_firmware_update', False) update_switch = getattr(block, 'firmware_switch', None) if has_update: if update_switch is None: attr = {'firmware': True, 'block':block} self._add_device("switch", attr) elif update_switch is not None: update_switch.remove() #block_key = _get_block_key(block) for key, _value in block.info_values.items(): ukey = block.id + '-' + key if not ukey in BLOCK_SENSORS: BLOCK_SENSORS.append(ukey) for sensor in hass_data['sensor_cfg']: if SENSOR_TYPES[sensor].get('attr') == key: attr = {'sensor_type':key, 'itm': block} self._add_device("sensor", attr) def _block_added(self, block): self.hass.add_job(self._async_block_added(block)) async def _async_block_added(self, block): block.cb_updated.append(self._block_updated) discover_block = self.discover \ or self._get_device_config(block.id) != {} block.hass_data = { 'allow_upgrade_switch' : self._get_specific_config_root(CONF_UPGRADE_SWITCH, block.id), 'sensor_cfg' : self._get_sensor_config(block.id), 'discover': discover_block } #Config block if block.unavailable_after_sec is None: block.unavailable_after_sec \ = self._get_specific_config_root(CONF_UNAVALABLE_AFTER_SEC, block.id) #if conf.get(CONF_ADDITIONAL_INFO): #block.update_status_information() # cfg_sensors = conf.get(CONF_SENSORS) # for sensor in cfg_sensors: # sensor_type = SENSOR_TYPES[sensor] # if 'attr' in sensor_type: # attr = {'sensor_type':sensor_type['attr'], # SHELLY_BLOCK_ID : block_key} # discovery.load_platform(hass, 'sensor', DOMAIN, attr, # config) def _device_added(self, dev, _code): self.hass.add_job(self._async_device_added(dev, _code)) async def _async_device_added(self, dev, _code): device_config = self._get_device_config(dev.id, dev.block.id) if not self.discover and device_config == {}: return if dev.device_type == "ROLLER": self._add_device("cover", dev) if dev.device_type == "RELAY": if device_config.get(CONF_LIGHT_SWITCH): self._add_device("light", dev) else: self._add_device("switch", dev) elif dev.device_type == 'POWERMETER': sensor_cfg = self._get_sensor_config(dev.id, dev.block.id) if SENSOR_POWER in sensor_cfg: self._add_device("sensor", dev) elif dev.device_type == 'SWITCH': sensor_cfg = self._get_sensor_config(dev.id, dev.block.id) if SENSOR_SWITCH in sensor_cfg: self._add_device("sensor", dev) elif dev.device_type in ["SENSOR"]: #, "INFOSENSOR"]: self._add_device("sensor", dev) elif dev.device_type in ["LIGHT", "DIMMER"]: self._add_device("light", dev) def _device_removed(self, dev, _code): dev.shelly_device.remove() try: pass #key = _dev_key(dev) #del DEVICES[key] except KeyError: pass class ShellyBlock(Entity): """Base class for Shelly entities""" def __init__(self, block, instance, prefix=""): conf = instance.conf id_prefix = conf.get(CONF_OBJECT_ID_PREFIX) self._unique_id = slugify(id_prefix + "_" + block.type + "_" + block.id + prefix) self.entity_id = "." + self._unique_id entity_id = instance._get_specific_config(CONF_ENTITY_ID , None, block.id) if entity_id is not None: self.entity_id = "." + slugify(id_prefix + "_" + entity_id + prefix) self._unique_id += "_" + slugify(entity_id) #self._name = None #block.type_name() #if conf.get(CONF_SHOW_ID_IN_NAME): # self._name += " [" + block.id + "]" self.fake_block = isinstance(block, dict) #:'fake_block' in block self._show_id_in_name = conf.get(CONF_SHOW_ID_IN_NAME) self._block = block self.hass = instance.hass self.instance = instance self._block.cb_updated.append(self._updated) block.shelly_device = self self._name = instance._get_specific_config(CONF_NAME, None, block.id) self._name_ext = None self._is_removed = False self.hass.add_job(self.setup_device(block)) async def setup_device(self, block): dev_reg = await self.hass.helpers.device_registry.async_get_registry() dev_reg.async_get_or_create( config_entry_id=self.entity_id, identifiers={(DOMAIN, block.id)}, manufacturer="Shelly", name=block.friendly_name(), model=block.type_name(), sw_version="0.0.1", ) @property def name(self): """Return the display name of this device.""" if self.fake_block: name = 'Fake' if self._name is None: name = self._block.friendly_name() else: name = self._name if self._name_ext: name += ' - ' + self._name_ext if self._show_id_in_name: name += " [" + self._block.id + "]" return name def _updated(self, _block): """Receive events when the switch state changed (by mobile, switch etc)""" if self.entity_id is not None and not self._is_removed: self.schedule_update_ha_state(True) @property def device_state_attributes(self): """Show state attributes in HASS""" if self.fake_block: return {} attrs = {'ip_address': self._block.ip_addr, 'shelly_type': self._block.type_name(), 'shelly_id': self._block.id, 'discovery': self._block.discovery_src } room = self._block.room_name() if room: attrs['room'] = room if self._block.info_values is not None: for key, value in self._block.info_values.items(): attrs[key] = value return attrs @property def device_info(self): return { 'identifiers': { (DOMAIN, self._block.id) } # 'name': self.name, # 'manufacturer': "Shelly", # 'model': self._block.type, # 'sw_version': '0.0.1', # #'via_device': (hue.DOMAIN, self.api.bridgeid), } def remove(self): self._is_removed = True self.hass.add_job(self.async_remove) class ShellyDevice(Entity): """Base class for Shelly entities""" def __init__(self, dev, instance): conf = instance.conf id_prefix = conf.get(CONF_OBJECT_ID_PREFIX) self._unique_id = id_prefix + "_" + dev.type + "_" + dev.id self.entity_id = "." + slugify(self._unique_id) entity_id = instance._get_specific_config(CONF_ENTITY_ID, None, dev.id, dev.block.id) if entity_id is not None: self.entity_id = "." + slugify(id_prefix + "_" + entity_id) self._unique_id += "_" + slugify(entity_id) self._show_id_in_name = conf.get(CONF_SHOW_ID_IN_NAME) #self._name = dev.type_name() #if conf.get(CONF_SHOW_ID_IN_NAME): # self._name += " [" + dev.id + "]" # 'Test' #light.name self._dev = dev self.hass = instance.hass self.instance = instance self._dev.cb_updated.append(self._updated) dev.shelly_device = self self._name = instance._get_specific_config(CONF_NAME, None, dev.id, dev.block.id) self._sensor_conf = instance._get_sensor_config(dev.id, dev.block.id) self._is_removed = False def _updated(self, _block): """Receive events when the switch state changed (by mobile, switch etc)""" if self.entity_id is not None and not self._is_removed: self.schedule_update_ha_state(True) if self._dev.info_values is not None: for key, _value in self._dev.info_values.items(): ukey = self._dev.id + '-' + key if not ukey in DEVICE_SENSORS: DEVICE_SENSORS.append(ukey) for sensor in self._sensor_conf: if SENSOR_TYPES[sensor].get('attr') == key: attr = {'sensor_type':key, 'itm':self._dev} conf = self.hass.data[SHELLY_CONFIG] #discovery.load_platform(self.hass, 'sensor', # DOMAIN, attr, conf) @property def name(self): """Return the display name of this device.""" if self._name is None: name = self._dev.friendly_name() else: name = self._name if self._show_id_in_name: name += " [" + self._dev.id + "]" return name @property def device_state_attributes(self): """Show state attributes in HASS""" attrs = {'ip_address': self._dev.ip_addr, 'shelly_type': self._dev.type_name(), 'shelly_id': self._dev.id, 'discovery': self._dev.discovery_src } room = self._dev.room_name() if room: attrs['room'] = room if self._dev.block.info_values is not None: for key, value in self._dev.block.info_values.items(): attrs[key] = value if self._dev.info_values is not None: for key, value in self._dev.info_values.items(): attrs[key] = value if self._dev.sensor_values is not None: for key, value in self._dev.sensor_values.items(): attrs[key] = value return attrs @property def device_info(self): return { 'identifiers': { # Serial numbers are unique identifiers within a specific domain (DOMAIN, self._dev.block.id) }, # 'name': self._dev.block.friendly_name(), # 'manufacturer': "Shelly", # 'model': self._dev.block.type_name(), # 'sw_version': '0.0.1', #'via_device': (hue.DOMAIN, self.api.bridgeid), } @property def unique_id(self): """Return the ID of this device.""" return self._unique_id @property def available(self): """Return true if switch is available.""" return self._dev.available() def remove(self): self._is_removed = True self.hass.add_job(self.async_remove) @property def should_poll(self): """No polling needed.""" return False
20,198
6,720
"""remove unique constraint Revision ID: 36745fa33987 Revises: 6b7ad8fd60f9 Create Date: 2022-01-06 08:31:55.141039 """ from alembic import op # revision identifiers, used by Alembic. revision = "36745fa33987" down_revision = "6b7ad8fd60f9" branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_constraint("species_name_key", "species", type_="unique") # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_unique_constraint("species_name_key", "species", ["name"]) # ### end Alembic commands ###
664
257
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo.addons.website_event_track_live.controllers.track_live import EventTrackLiveController class EventTrackLiveQuizController(EventTrackLiveController): def _prepare_track_suggestion_values(self, track, track_suggestion): res = super(EventTrackLiveQuizController, self)._prepare_track_suggestion_values(track, track_suggestion) res['current_track']['show_quiz'] = bool(track.quiz_id) and not track.is_quiz_completed return res
564
180
embed <drac2> GVARS = load_json(get_gvar("c1ee7d0f-750d-4f92-8d87-70fa22c07a81")) CLASSES = [load_json(get_gvar(gvar)) for gvar in GVARS] DISPLAY = { "acrobatics": "Acrobatics", "animalhandling": "Animal Handling", "athletics": "Athletics", "arcana": "Arcana", "deception": "Deception", "dex": "Dexterity", "dexterity": "Dexterity", "cha": "Charisma", "charisma": "Charisma", "con": "Constitution", "constitution": "Constitution", "history": "History", "investigation": "Investigation", "insight": "Insight", "int": "Intelligence", "intelligence": "Intelligence", "intimidation": "Intimidation", "medicine": "Medicine", "nature": "Nature", "perception": "Perception", "performance": "Performance", "persuasion": "Persuasion", "religion": "Religion", "sleightofhand": "Sleight of Hand", "survival": "Survival", "stealth": "Stealth", "str": "Strength", "strength": "Strength", "wis": "Wisdom", "wisdom": "Wisdom" } char = character() ret_name = get("_retainerName") ret_class = get("_retainerClass") ret_level = int(get("_retainerLevel", 0)) ret_hp = char.get_cc("Retainer HP") if char and char.cc_exists("Retainer HP") else 0 title = f"{char.name} doesn't have a retainer!" if ret_name and ret_class and ret_level: title = f"{char.name} has {ret_name} a level {ret_level} {ret_class} retainer!" cl_info = [c for c in CLASSES if c["name"] == ret_class] cl_info = cl_info[0] if cl_info else None fields = "" if cl_info: fields += f"""-f "HP|{ret_hp}/{ret_level}|inline" """ fields += f"""-f "AC|{cl_info["ac"]}|inline" """ fields += f"""-f "Primary Ability|{DISPLAY[cl_info["primary"]]}|inline" """ fields += f"""-f "Saves|{", ".join(DISPLAY[x] for x in cl_info["saves"])}|inline" """ fields += f"""-f "Skills|{", ".join(DISPLAY[x] for x in cl_info["skills"])}|inline" """ attack_text = [node for node in cl_info["attack"]["automation"] if node["type"] == "text"] fields += f"""-f "{cl_info["attack"]["name"]}|{attack_text[0]["text"] if attack_text else ""}" """ for action in cl_info["actions"]: if ret_level < action["level"]: continue attack_text = [node for node in action["attack"]["automation"] if node["type"] == "text"] fields += f"""-f "{action["attack"]["name"]} ({action["cc_max"]}/Day)|{attack_text[0]["text"] if attack_text else ""} {char.cc_str(action["cc"]) if char and action["cc"] and char.cc_exists(action["cc"]) else ""}" """ </drac2> -title "{{title}}" {{fields}} -footer "!retainer | kbsletten#5710" -color <color> -thumb {{get("_retainerImage")}}
2,640
1,080
################################################################################ ##### For Bloomberg ------------------------------------------------------------ ##### Can't use this if you're on a Mac :( ################################################################################ from __future__ import print_function from __future__ import absolute_import from optparse import OptionParser import os import platform as plat import sys if sys.version_info >= (3, 8) and plat.system().lower() == "windows": # pylint: disable=no-member with os.add_dll_directory(os.getenv('BLPAPI_LIBDIR')): import blpapi else: import blpapi from utils import date_to_str import pandas as pd def parseCmdLine(): parser = OptionParser(description="Retrieve reference data.") parser.add_option("-a", "--ip", dest="host", help="server name or IP (default: %default)", metavar="ipAddress", default="localhost") parser.add_option("-p", dest="port", type="int", help="server port (default: %default)", metavar="tcpPort", default=8194) (options, args) = parser.parse_args() return options def req_historical_data(bbg_identifier, startDate, endDate): # Recast start & end dates in Bloomberg's format startDate = date_to_str(startDate, "%Y%m%d") endDate = date_to_str(endDate, "%Y%m%d") if(pd.to_datetime(startDate) >= pd.to_datetime(endDate)): sys.exit( "in req_historical_data in 'bloomberg_functions.py': " + \ "specified startDate is later than endDate!" ) # First, check to see if there is already a local .p data file with the # data you need for bbg_identifier. If it's not there, create it. if not os.path.isdir("bbg_data"): os.makedirs("bbg_data") print("created the 'bbg_data' folder.") if (bbg_identifier + ".csv") in os.listdir("bbg_data"): old_bbg_data = pd.read_csv("bbg_data/" + bbg_identifier + ".csv") first_old = pd.to_datetime(min(old_bbg_data['Date'])).date() last_old = pd.to_datetime(max(old_bbg_data['Date'])).date() first_new = pd.to_datetime(startDate).date() last_new = pd.to_datetime(endDate).date() if first_old <= first_new and last_old >= last_new: # Don't need to make a query; have all data we need. histdata = old_bbg_data[[ (pd.to_datetime(x).date() <= last_new) & ( pd.to_datetime(x).date() >= first_new ) for x in old_bbg_data['Date'] ]] histdata.reset_index(drop=True, inplace=True) return histdata if first_old > first_new and last_old < last_new: # do nothing for now, just requery the bigger dataset. Can refine # this case later. print( "overwriting old data with date range: " + startDate + \ " to " + endDate ) else: if first_new < first_old: endDate = date_to_str(first_old, "%Y%m%d") else: startDate = date_to_str(last_old, "%Y%m%d") print(startDate) options = parseCmdLine() # Fill SessionOptions sessionOptions = blpapi.SessionOptions() sessionOptions.setServerHost(options.host) sessionOptions.setServerPort(options.port) print("Connecting to %s:%s" % (options.host, options.port)) # Create a Session session = blpapi.Session(sessionOptions) # Start a Session if not session.start(): print("Failed to start session.") return try: # Open service to get historical data from if not session.openService("//blp/refdata"): print("Failed to open //blp/refdata") return # Obtain previously opened service refDataService = session.getService("//blp/refdata") # Create and fill the request for the historical data request = refDataService.createRequest("HistoricalDataRequest") request.getElement("securities").appendValue(bbg_identifier) request.getElement("fields").appendValue("OPEN") request.getElement("fields").appendValue("HIGH") request.getElement("fields").appendValue("LOW") request.getElement("fields").appendValue("PX_LAST") request.getElement("fields").appendValue("EQY_WEIGHTED_AVG_PX") request.set("periodicityAdjustment", "ACTUAL") request.set("periodicitySelection", "DAILY") request.set("startDate", startDate) request.set("endDate", endDate) request.set("maxDataPoints", 1400) # Don't adjust please :) print("Sending Request:", request) # Send the request session.sendRequest(request) # Process received events while (True): # We provide timeout to give the chance for Ctrl+C handling: ev = session.nextEvent(500) for msg in ev: if str(msg.messageType()) == "HistoricalDataResponse": histdata = [] for fd in msg.getElement("securityData").getElement( "fieldData").values(): histdata.append([fd.getElementAsString("date"), \ fd.getElementAsFloat("OPEN"), fd.getElementAsFloat( "HIGH"), fd.getElementAsFloat("LOW"), \ fd.getElementAsFloat("PX_LAST"), \ fd.getElementAsFloat( "EQY_WEIGHTED_AVG_PX")]) histdata = pd.DataFrame(histdata, columns=["Date", "Open", "High", "Low", "Close", "VWAP"]) if ev.eventType() == blpapi.Event.RESPONSE: # Response completely received, so we could exit if 'old_bbg_data' in locals(): histdata = pd.concat([histdata, old_bbg_data], axis=0) histdata = histdata.drop_duplicates('Date') histdata = histdata.sort_values('Date') histdata.reset_index(drop=True, inplace=True) pd.DataFrame.to_csv( histdata, "bbg_data/" + bbg_identifier + ".csv", index=False ) return histdata finally: # Stop the session session.stop() __copyright__ = """ Copyright 2012. Bloomberg Finance L.P. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ ####### End of Bloomberg Section ----------------------------------------------- ################################################################################
8,351
2,414
import os from typing import Sequence, Union import numpy as np import tifffile from deepcell.applications import Mesmer from imctools.io.ometiff.ometiffparser import OmeTiffParser from skimage import measure from sqlalchemy.orm import Session from histocat.core.acquisition import service as acquisition_service from histocat.core.dataset.models import DatasetModel from histocat.core.errors import SegmentationError from histocat.core.segmentation.dto import SegmentationSubmissionDto from histocat.core.utils import timeit def normalize_by_minmax(img: Sequence[Union[np.ndarray, np.ndarray]]): channel_mins = np.nanmin(img, axis=(1, 2), keepdims=True) channel_maxs = np.nanmax(img, axis=(1, 2), keepdims=True) img = (img - channel_mins) / (channel_maxs - channel_mins) return img def normalize_by_zscore(img: Sequence[Union[np.ndarray, np.ndarray]]): channel_means = np.nanmean(img, axis=(1, 2), keepdims=True) channel_stds = np.nanstd(img, axis=(1, 2), keepdims=True) img = (img - channel_means) / channel_stds return img @timeit def process_acquisition( db: Session, acquisition_id: int, params: SegmentationSubmissionDto, model, dataset: DatasetModel ): acquisition = acquisition_service.get_by_id(db, acquisition_id) if not acquisition: raise SegmentationError(f"Acquisition id:{acquisition_id} not found") parser = OmeTiffParser(acquisition.location) acquisition_data = parser.get_acquisition_data() nuclei_channels = acquisition_data.get_image_stack_by_names(params.nuclei_channels) cytoplasm_channels = acquisition_data.get_image_stack_by_names(params.cytoplasm_channels) if params.preprocessing.channels_normalization == "minmax": nuclei_channels = normalize_by_minmax(nuclei_channels) cytoplasm_channels = normalize_by_minmax(cytoplasm_channels) elif params.preprocessing.channels_normalization == "zscore": nuclei_channels = normalize_by_zscore(nuclei_channels) cytoplasm_channels = normalize_by_zscore(cytoplasm_channels) nuclei_channels = np.nanmean(nuclei_channels, axis=0) cytoplasm_channels = np.nanmean(cytoplasm_channels, axis=0) # Combined together and expand to 4D im = np.stack((nuclei_channels, cytoplasm_channels), axis=-1) im = np.expand_dims(im, 0) app = Mesmer(model) segmentation_predictions = app.predict( im, batch_size=1, image_mpp=1.0, compartment=params.compartment, preprocess_kwargs=params.preprocessing.dict(), postprocess_kwargs_whole_cell=params.postprocessing.dict(), postprocess_kwargs_nuclear=params.postprocessing.dict(), ) mask_filename = os.path.basename(acquisition.location).replace("ome.tiff", "mask.tiff") tifffile.imwrite(os.path.join(dataset.location, mask_filename), segmentation_predictions[0, :, :, 0]) output = { "acquisition": acquisition, "mask_location": os.path.join(dataset.location, mask_filename), "object_numbers": None, "centroids_x": None, "centroids_y": None, "mean_intensities": {}, "channel_names": acquisition_data.channel_names, } for c in acquisition_data.channel_names: d = measure.regionprops_table( label_image=segmentation_predictions[0, :, :, 0], intensity_image=acquisition_data.get_image_by_name(c), properties=("label", "centroid", "mean_intensity"), ) if output["object_numbers"] is None: output["object_numbers"] = d.get("label") output["centroids_x"] = d.get("centroid-1") output["centroids_y"] = d.get("centroid-0") output["mean_intensities"][c] = d.get("mean_intensity") return output
3,767
1,269
import math import numpy as np import random import os from PIL import Image import pyttsx3 class TopError(Exception): pass class OddResolutionError(Exception): pass class Fractal: ''' Makes images of the Mandelbrot set given a center coordinate and the imaginary coordinate of the top row of pixels. ''' # If the image directory doesn't exist, create it. IMAGE_DIR = os.path.dirname(os.path.realpath(__file__)) IMAGE_DIR = os.path.join(IMAGE_DIR, 'PNG') if not os.path.exists(IMAGE_DIR): os.makedirs(IMAGE_DIR) # String for image progress. P_STRING = 'Your {cr}+{ci}i image is {p} percent complete.' def __init__(self): ''' Initiate the TTS engine. These take a while to render, so I found, for while I wait, to hear the progress of the image helped. ''' self.tts = pyttsx3.init() def mandelbrot(self, image_size, colors, center=None, top=1.6, magnification=1, divergence_iterations=1600, speak=True, image_number=None): # Speech toggle. self.SPEAK = speak # Since the resolution of the image is divided by two, an even number is # required. if image_size[0]%2 == 1 or image_size[1]%2 == 1: raise OddResolutionError # This center with the default top place the whole Mandelbrot # visualisation centered in the image. if center is None: center = (-0.66, 0) # If the center of the image is set to the same value as the top of the # image, raise an error. if center[1] >= top: raise TopError ''' Magnification is how many subpixels each pixel is divided into. This defines the brightness of the pixel. Divergence iterations define how many iterations the code executes until saying whether a coordinate diverges or not. Divergence levels are gradations of color for those coordinates who do end up diverging from the set. ''' self.MAGNIFICATION = magnification self.DIVERGENCE_ITERATIONS = divergence_iterations self.DIVERGENCE_LEVELS = [self.DIVERGENCE_ITERATIONS//x for x in \ reversed(range(1, 256))] for key, value in colors.items(): colors[key] = np.array(value).reshape(1, 3) # Initiate a few variables for the image. self.IMAGE_SIZE = image_size self.CENTER = center self.TOP = top self.CENTER_PIXEL = (int(self.IMAGE_SIZE[0] / 2), int(self.IMAGE_SIZE[1] / 2)) image_array = np.zeros((self.IMAGE_SIZE[1], self.IMAGE_SIZE[0], 3), dtype=np.uint8) # Variables for percent complete. i = 0 p_complete = 0 print(self.P_STRING.format(cr=self.CENTER[0], ci=self.CENTER[1], p=p_complete)) if self.SPEAK: self.tts.say(self.P_STRING.format(cr=self.CENTER[0], ci=self.CENTER[1], p=p_complete)) self.tts.runAndWait() ''' Test each pixel of the image for divergence or inclusion in the Mandelbrot set. ''' for pixel_y in range(self.IMAGE_SIZE[1]): for pixel_x in range(self.IMAGE_SIZE[0]): # Count for subpixels. tally = {'mandelbrot': 0, 'divergence': 0} # Reflects how many iterations a divergent coordinate hangs on. divergence_multipliers = [] for magnification in range(1, self.MAGNIFICATION+1): # Subpixel float values. x = pixel_x + magnification/self.MAGNIFICATION y = pixel_y + magnification/self.MAGNIFICATION # Find the coordinates for a subpixel. real, imaginary = self.pixels_to_coordinates(x, y) # "Good" is defined as not already diverged from the circle # of radius 2 that contains the Mandelbrot set so as to not # spend time calculating what is already known. if self.coordinates_good(real, imaginary): # Iterate the equation to test for divergence. c = real + imaginary * 1j z = None diverges = False for d_i in range(self.DIVERGENCE_ITERATIONS): z = self.next_mandelbrot(z, c) if not self.coordinates_good(z.real, z.imag): # Count for divergence. tally['divergence'] += 1 # The divergence multiplier determines the # brightness of a pixel in the image. If the # divergent coordinate hangs on for more # iterations, it gets a brighter color. d_min = min([x for x in self.DIVERGENCE_LEVELS if d_i+1 <= x]) divergence_multiplier = self.DIVERGENCE_LEVELS.index(d_min) divergence_multipliers.append(divergence_multiplier) diverges = True break if not diverges: # Count for the Mandelbrot set. tally['mandelbrot'] += 1 else: tally['divergence'] += 1 divergence_multipliers.append(0) ''' Make a NumPy array with one of each color value for each tally. If there are 3 subpixels in the Mandelbrot set and 1 subpixel divergent, then the array has three RGB values of the Mandelbrot color and one RGB value for divergent subpixels. The divergent subpixel RGB value is brighter if the subpixel remained in the circle for more iterations. The average of these RGB values is what determines the RGB value for the entire pixel. ''' color = np.empty((0, 3)) if tally['mandelbrot'] > 0: for _ in range(tally['mandelbrot']): color = np.append(color, colors['m'], axis=0) if tally['divergence'] > 0: for _ in range(tally['divergence']): divergence_multiplier = np.average(divergence_multipliers) divergence_multiplier /= len(self.DIVERGENCE_LEVELS) color = np.append(color, colors['d'] * divergence_multiplier, axis=0) color = np.average(color, axis=0) image_array[pixel_y, pixel_x] = color # The calculations are complete for this iteration, so show the # percent complete if the percent complete is a multiple of 10. i += 1 percent_complete = (i+1) / (self.IMAGE_SIZE[0] * \ self.IMAGE_SIZE[1]) p_complete_ = int(percent_complete*100)//10*10 if p_complete_ > p_complete: p_complete = p_complete_ print(self.P_STRING.format(cr=self.CENTER[0], ci=self.CENTER[1], p=p_complete)) if self.SPEAK: self.tts.say(self.P_STRING.format(cr=self.CENTER[0], ci=self.CENTER[1], p=p_complete)) self.tts.runAndWait() # Finally, write the image. IMAGE_F = 'mandelbrot center=' + str(self.CENTER[0]) + '+' + \ str(self.CENTER[1]) + 'i, top=' + str(self.TOP) + 'i, ' + \ str(self.IMAGE_SIZE[0]) + '×' + str(self.IMAGE_SIZE[1]) + \ '.png' if image_number is not None: IMAGE_F = "%04d" % image_number + ' ' + IMAGE_F IMAGE_PATH = os.path.join(self.IMAGE_DIR, IMAGE_F) self.write_image(image_array, IMAGE_PATH) def write_image(self, image_array, IMAGE_PATH): ''' Write the image array to an image. ''' image = Image.fromarray(image_array) image.save(IMAGE_PATH) def next_mandelbrot(self, z, c): ''' Iterate the Mandelbrot equation. ''' if z is None: z = complex(0 + 0j) return z**2 + c def pixels_to_coordinates(self, pixel_x, pixel_y): ''' Find the coordinate of a pixel or subpixel in the complex plane. ''' unit_per_pixel = (self.TOP - self.CENTER[1]) / (self.IMAGE_SIZE[1] / 2) real = (pixel_x - self.CENTER_PIXEL[0]) * unit_per_pixel + \ self.CENTER[0] imaginary = (pixel_y - self.CENTER_PIXEL[1]) * unit_per_pixel - \ self.CENTER[1] return (real, imaginary) def coordinates_good(self, real, imaginary): ''' Return True if the coordinates are in a circle of radius 2 with the center at the origin of the complex plane and False if otherwise. ''' return math.sqrt(real**2 + imaginary**2) <= 2 if __name__ == '__main__': pass
9,800
2,802