seq_id
stringlengths
4
11
text
stringlengths
113
2.92M
repo_name
stringlengths
4
125
sub_path
stringlengths
3
214
file_name
stringlengths
3
160
file_ext
stringclasses
18 values
file_size_in_byte
int64
113
2.92M
program_lang
stringclasses
1 value
lang
stringclasses
93 values
doc_type
stringclasses
1 value
stars
int64
0
179k
dataset
stringclasses
3 values
pt
stringclasses
78 values
37325991579
import numpy as np from zipline.pipeline import CustomFactor from zipline.pipeline.data import USEquityPricing class CCI(CustomFactor): """ Commodity Channel Index Momentum indicator **Default Inputs:** USEquityPricing.close, USEquityPricing.high, USEquityPricing.low **Default Window Length:** 14 http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:commodity_channel_index_cci """ inputs = [USEquityPricing.high, USEquityPricing.low, USEquityPricing.close] window_length = 14 + 1 outputs = ['CCI_t_1', 'CCI_t'] def compute(self, today, assets, out, high, low, close): # typical price matrix typical_prices = (high + low + close) / 3. # mean of each column mean_typical_t = np.nanmean(typical_prices[-1:0:-1], axis=0) mean_typical_t_1 = np.nanmean(typical_prices[-2::-1], axis=0) # mean deviation mean_deviation_t = np.sum( np.abs(typical_prices[-1:0:-1] - np.tile(mean_typical_t, (len(typical_prices) - 1, 1))), axis=0) / ( self.window_length - 1) mean_deviation_t_1 = np.sum( np.abs(typical_prices[-2::-1] - np.tile(mean_typical_t_1, (len(typical_prices) - 1, 1))), axis=0) / ( self.window_length - 1) # CCI out.CCI_t[:] = (typical_prices[-1] - mean_typical_t) / (.015 * mean_deviation_t) out.CCI_t_1[:] = (typical_prices[-2] - mean_typical_t_1) / (.015 * mean_deviation_t_1)
ahmad-emanuel/quant_trading_system
Indicators/CCI_self.py
CCI_self.py
py
1,542
python
en
code
1
github-code
36
24019764012
import numpy as np import random from matplotlib import pyplot as plt from matplotlib.patches import Circle from matplotlib.patches import Rectangle # ========================== CONSTANTS ============================ L = 7 SHAPES = ['CUBE', 'SPHERE', 'EMPTY'] COLORS = ['R', 'G', 'B'] def get_shape_pattern(i_start, i_end, j_start, j_end): shape1 = random.choice([0,1]) shape2 = random.choice([0,1,2]) thickness = random.choice([1,2,3]) def shape1_pred(i,j): is_wall = [ (i - i_start) < thickness, (j - j_start) < thickness, (i_end - i) < thickness, (j_end - j) < thickness, ] return any(is_wall) def pattern(i,j): if shape1_pred(i,j): return shape1 else: return shape2 return pattern def get_color_pattern(i_start, i_end, j_start, j_end): pattfs = [lambda x : 0, lambda x : 1, lambda x : 2, lambda x : 0 if x % 2 else 1, lambda x : 1 if x % 2 else 2, lambda x : 2 if x % 2 else 0, ] indexs = ['i', 'j', 'i+j'] pattfunc = random.choice(pattfs) patt_id = random.choice(indexs) def pattern(i,j): return pattfunc(eval(patt_id)) return pattern # renders the program into a dictionary of (i,j) => (shape, color) def render_shapes(): def gen_range(): start = random.choice([_ for _ in range(L)]) end = random.choice([_ for _ in range(L)]) if start + 2 <= end: return (start, end) else: return gen_range() i_start, i_end = gen_range() j_start, j_end = gen_range() shape_fun = get_shape_pattern(i_start, i_end, j_start, j_end) color_fun = get_color_pattern(i_start, i_end, j_start, j_end) ret = dict() for i in range(i_start, i_end+1): for j in range(j_start, j_end+1): shape = SHAPES[shape_fun(i,j)] color = COLORS[color_fun(i,j)] if shape != 'EMPTY': ret[(i,j)] = (shape, color) return ret # draws the shapes onto a canvas def draw(shapes, name): R = 0.9 / 2 / L plt.figure() currentAxis = plt.gca(aspect='equal') for coord in shapes: shape, color = shapes[coord] x,y = coord if shape == 'CUBE': currentAxis.add_patch(Rectangle((x/L, y/L), 2*R,2*R, facecolor=color)) if shape == 'SPHERE': currentAxis.add_patch(Circle((x/L+R, y/L+R), R, facecolor=color)) plt.savefig(f'drawings/{name}.png') plt.close() # generate a legal program, where legality is defined loosely def gen_legal_shape(): shapes = render_shapes() if len(shapes) >= 1: return shapes else: return gen_legal_shape() # turn shape into a cononical repr so to keep duplicate programs out def shape_to_repr(shapes): return tuple(sorted(list(shapes.items()))) def unrepr_shape(shape_repr): return dict(shape_repr) if __name__ == '__main__': shapes = gen_legal_shape() # print (shapes) # print (shape_to_repr(shapes)) draw(shapes, 'prog')
evanthebouncy/program_synthesis_pragmatics
version_space/grid.py
grid.py
py
3,191
python
en
code
7
github-code
36
3119120370
''' 参考资料:https://blog.csdn.net/weixin_45971950/article/details/122331273 ''' import cv2 def is_inside(o, i): ox, oy, ow, oh = o ix, iy, iw, ih = i return ox > ix and oy > iy and ox+ow < ix+iw and oy+oh < iy+ih def draw_person(image, person): x, y, w, h = person cv2.rectangle(image, (x, y), (x+w, y+h), (0, 0, 255), 1) # 读取摄像头 cap = cv2.VideoCapture('20220827-093000-100000.mp4') # 视频的帧率FPS fps = cap.get(cv2.CAP_PROP_FPS) # 视频的总帧数 total_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT) # 视屏读取速度,1为1倍速,8为8倍速 speed = 8 # 使用opencv的hog特征进行行人检测 hog = cv2.HOGDescriptor() hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) for i in range(int(total_frame)): ret, frame = cap.read() if not ret: break if i % (int(fps)*speed) == 0: # 检测代码 found, w = hog.detectMultiScale(frame) #print(found, w) foundList = [] for ri, r in enumerate(found): flag = 0 for qi, q in enumerate(found): if ri != qi and is_inside(r, q): flag = 1 if (flag == 0): foundList.append(r) for person in foundList: draw_person(frame, person) cv2.imshow("face", frame) if ord('q') == cv2.waitKey(1): break # 释放内存 cv2.destroyAllWindows() # 释放摄像头 cap.release()
ryan6liu/demo
facedetect/demo/demo_movedetect_hog_svm.py
demo_movedetect_hog_svm.py
py
1,477
python
en
code
0
github-code
36
1122067056
# Sweet home Alabama class Node: def __init__(self, input_info): self.__info = input_info self.__children = [] # End of Node class class Tree: def __init__(self): self.root = Node( {"id": 1, "Name": "Root", "Family Name": "Root", "Birthday": "Eternal"} ) self.node_counter = 2 def displayNodes(self, node, indent=""): if node is not None: print( f"{indent}{node.info['id']}) {node.info['Name']} {node.info['Family Name']}" ) for child in node.children: self.displayNodes(child, indent + " ") def showNodes(self): print("\nCurrent Family Members:") self.displayNodes(self.root) def search(self, data, node): if node is None: return None if data == node.info["name"]: print(node.info["name"], "found") return node for child in node.children: found_node = self.search(data, child) if found_node is not None: print(found_node.info["name"], "found") return found_node def addChild(self, parent_node_id, child_data): parent_node = self.findNodeById(parent_node_id, self.root) if parent_node is not None: child_data["id"] = self.node_counter self.node_counter += 1 parent_node.children.append(Node(child_data)) print(f"{child_data['name']} added to the family tree.") else: print("Parent node not found.") def findNodeById(self, node_id, node): if node is None: return None if node.info["id"] == node_id: return node for child in node.children: found_node = self.findNodeById(node_id, child) if found_node is not None: return found_node return None ## End of Tree class def getBirthday(): try: birth_year = int(input("What year was this person born: ")) if birth_year > 2023: print("What are you some sort of psychic?") return birth_month = int(input("What month (1-12): ")) if birth_month < 1 or birth_month > 12: print("Have you ever seen a calendar in your life?") return birth_day = int(input("What day (1-31): ")) if birth_day < 1 or birth_day > 31: print("You're trying to trick me...") return except ValueError: print("Try again...") return birth_year, birth_month, birth_day def addFamilyMember(family_tree, parent_id, first_name, last_name, birthday): try: parent_id = int(parent_id) if parent_id <= 0 or parent_id >= family_tree.node_counter: print("Invalid parent ID. Please choose a valid ID.") return child_data = { "id": family_tree.node_counter, "Name": first_name, "Family Name": last_name, "Birthday": birthday, } family_tree.addChild(parent_id, child_data) except ValueError: print("Invalid input. Please enter valid information.") def main(): family_tree = Tree() while True: print("\nWelcome! Here are your options: ") print("1) Add A Family Member.") print("2) Display Sorted Birthdays. (Oldest to youngest)") print("3) Find Relationship.") print("4) Visualize Family Tree.") print("5) Count Same First Names.") print("6) Exit. (The programm that is)") user_choise = int(input("What would you like to do?")) try: if user_choise == 1: print("Let's add a new member:") first_name = input("Provide a first name: ") last_name = input("Provide a last Name: ") birthday = getBirthday() family_tree.displayNodes() parent_id = "Choose the id of the parents: " addFamilyMember(family_tree, parent_id, first_name, last_name, birthday) elif user_choise == 2: print() elif user_choise == 3: print() elif user_choise == 4: print() elif user_choise == 5: print() elif user_choise == 6: print("Thank you for using me... Reminds me of my ex...") break else: print( "It looks like something is off... Maybe try picking an actual option?" ) except ValueError: print("Please pick a valid option...")
xgiberish/foundations-cs-python
assignment_05_Dina_Fallah.py
assignment_05_Dina_Fallah.py
py
4,706
python
en
code
0
github-code
36
36956278049
from suite_subprocess import suite_subprocess from contextlib import contextmanager from wtscenario import make_scenarios import json, re, wiredtiger, wttest # Shared base class used by verbose tests. class test_verbose_base(wttest.WiredTigerTestCase, suite_subprocess): # The maximum number of lines we will read from stdout in any given context. nlines = 50000 # The JSON schema we expect all messages to follow. Captures all possible fields, detailing # each field's name, associated type and whether we always expect for that field to be # present. expected_json_schema = { 'category': {'type': str, 'always_expected': True }, 'category_id': {'type': int, 'always_expected': True }, 'error_str': {'type': str, 'always_expected': False }, 'error_code': {'type': int, 'always_expected': False }, 'msg': {'type': str, 'always_expected': True }, 'session_dhandle_name': {'type': str, 'always_expected': False }, 'session_err_prefix': {'type': str, 'always_expected': False }, 'session_name': {'type': str, 'always_expected': False }, 'thread': {'type': str, 'always_expected': True }, 'ts_sec': {'type': int, 'always_expected': True }, 'ts_usec': {'type': int, 'always_expected': True }, 'verbose_level': {'type': str, 'always_expected': True }, 'verbose_level_id': {'type': int, 'always_expected': True }, } # Validates the JSON schema of a given event handler message, ensuring the schema is consistent and expected. def validate_json_schema(self, json_msg): expected_schema = dict(self.expected_json_schema) for field in json_msg: # Assert the JSON field is valid and expected. self.assertTrue(field in expected_schema, 'Unexpected field "%s" in JSON message: %s' % (field, str(json_msg))) # Assert the type of the JSON field is expected. self.assertEqual(type(json_msg[field]), expected_schema[field]['type'], 'Unexpected type of field "%s" in JSON message, expected "%s" but got "%s": %s' % (field, str(expected_schema[field]['type']), str(type(json_msg[field])), str(json_msg))) expected_schema.pop(field, None) # Go through the remaining fields in the schema and ensure we've seen all the fields that are always expected be present # in the JSON message for field in expected_schema: self.assertFalse(expected_schema[field]['always_expected'], 'Expected field "%s" in JSON message, but not found: %s' % (field, str(json_msg))) # Validates the verbose category (and ID) in a JSON message is expected. def validate_json_category(self, json_msg, expected_categories): # Assert the category field is in the JSON message. self.assertTrue('category' in json_msg, 'JSON message missing "category" field') self.assertTrue('category_id' in json_msg, 'JSON message missing "category_id" field') # Assert the category field values in the JSON message are expected. self.assertTrue(json_msg['category'] in expected_categories, 'Unexpected verbose category "%s"' % json_msg['category']) self.assertTrue(json_msg['category_id'] == expected_categories[json_msg['category']], 'The category ID received in the message "%d" does not match its expected definition "%d"' % (json_msg['category_id'], expected_categories[json_msg['category']])) def create_verbose_configuration(self, categories): if len(categories) == 0: return '' return 'verbose=[' + ','.join(categories) + ']' @contextmanager def expect_verbose(self, categories, patterns, expect_json, expect_output = True): # Clean the stdout resource before yielding the context to the execution block. We only want to # capture the verbose output of the using context (ignoring any previous output up to this point). self.cleanStdout() # Create a new connection with the given verbose categories. verbose_config = self.create_verbose_configuration(categories) # Enable JSON output if required. if expect_json: verbose_config += ",json_output=[message]" conn = self.wiredtiger_open(self.home, verbose_config) # Yield the connection resource to the execution context, allowing it to perform any necessary # operations on the connection (for generating the expected verbose output). yield conn # Read the contents of stdout to extract our verbose messages. output = self.readStdout(self.nlines) # Split the output into their individual messages. We want validate the contents of each message # to ensure we've only generated verbose messages for the expected categories. verbose_messages = output.splitlines() if expect_output: self.assertGreater(len(verbose_messages), 0) else: self.assertEqual(len(verbose_messages), 0) if len(output) >= self.nlines: # If we've read the maximum number of characters, its likely that the last line is truncated ('...'). In this # case, trim the last message as we can't parse it. verbose_messages = verbose_messages[:-1] # Test the contents of each verbose message, ensuring it satisfies the expected pattern. verb_pattern = re.compile('|'.join(patterns)) # To avoid truncated messages, slice out the last message string in the for line in verbose_messages: # Check JSON validity if expect_json: try: json.loads(line) except Exception as e: self.prout('Unable to parse JSON message: %s' % line) raise e self.assertTrue(verb_pattern.search(line) != None, 'Unexpected verbose message: ' + line) # Close the connection resource and clean up the contents of the stdout file, flushing out the # verbose output that occurred during the execution of this context. conn.close() self.cleanStdout() # test_verbose01.py # Verify basic uses of the verbose configuration API work as intended i.e. passing # single & multiple valid and invalid verbose categories. These tests are mainly focused on uses # of the interface prior to the introduction of verbosity levels, ensuring 'legacy'-style # uses of the interface are still supported. class test_verbose01(test_verbose_base): format = [ ('flat', dict(is_json=False)), ('json', dict(is_json=True)), ] scenarios = make_scenarios(format) collection_cfg = 'key_format=S,value_format=S' # Test use cases passing single verbose categories, ensuring we only produce verbose output for the single category. @wttest.skip_for_hook("tiered", "FIXME-WT-9809 - Fails for tiered") def test_verbose_single(self): # Close the initial connection. We will be opening new connections with different verbosity settings throughout # this test. self.close_conn() # Test passing a single verbose category, 'api'. Ensuring the only verbose output generated is related to # the 'api' category. with self.expect_verbose(['api'], ['WT_VERB_API'], self.is_json) as conn: # Perform a set of simple API operations (table creations and cursor operations) to generate verbose API # messages. uri = 'table:test_verbose01_api' session = conn.open_session() session.create(uri, self.collection_cfg) c = session.open_cursor(uri) c['api'] = 'api' c.close() session.close() # Test passing another single verbose category, 'compact'. Ensuring the only verbose output generated is related to # the 'compact' category. with self.expect_verbose(['compact'], ['WT_VERB_COMPACT'], self.is_json) as conn: # Create a simple table to invoke compaction on. We aren't doing anything interesting with the table # such that the data source will be compacted. Rather we want to simply invoke a compaction pass to # generate verbose messages. uri = 'table:test_verbose01_compact' session = conn.open_session() session.create(uri, self.collection_cfg) session.compact(uri) session.close() # Test use cases passing multiple verbose categories, ensuring we only produce verbose output for specified categories. def test_verbose_multiple(self): self.close_conn() # Test passing multiple verbose categories, being 'api' & 'version'. Ensuring the only verbose output generated # is related to those two categories. with self.expect_verbose(['api','version'], ['WT_VERB_API', 'WT_VERB_VERSION'], self.is_json) as conn: # Perform a set of simple API operations (table creations and cursor operations) to generate verbose API # messages. Beyond opening the connection resource, we shouldn't need to do anything special for the version # category. uri = 'table:test_verbose01_multiple' session = conn.open_session() session.create(uri, self.collection_cfg) c = session.open_cursor(uri) c['multiple'] = 'multiple' c.close() # Test use cases passing no verbose categories, ensuring we don't produce unexpected verbose output. def test_verbose_none(self): self.close_conn() # Testing passing an empty set of categories. Ensuring no verbose output is generated. with self.expect_verbose([], [], self.is_json, False) as conn: # Perform a set of simple API operations (table creations and cursor operations). Ensuring no verbose messages # are generated. uri = 'table:test_verbose01_none' session = conn.open_session() session.create(uri, self.collection_cfg) c = session.open_cursor(uri) c['none'] = 'none' c.close() # Test use cases passing invalid verbose categories, ensuring the appropriate error message is # raised. def test_verbose_invalid(self): self.close_conn() self.assertRaisesHavingMessage(wiredtiger.WiredTigerError, lambda:self.wiredtiger_open(self.home, 'verbose=[test_verbose_invalid]'), '/\'test_verbose_invalid\' not a permitted choice for key \'verbose\'/') if __name__ == '__main__': wttest.run()
mongodb/mongo
src/third_party/wiredtiger/test/suite/test_verbose01.py
test_verbose01.py
py
10,688
python
en
code
24,670
github-code
36
2997295714
# Import the socket module from socket import * # Specify the server's port and IP address port = 53 ip = '127.0.0.1' # Create a UDP socket object clientSocket = socket(AF_INET, SOCK_DGRAM) # Enter an infinite loop to allow the user to enter multiple DNS queries while True: # Get a DNS query from the user query = input("Enter DNS query: ") # Send the query to the server using the sendto() method of the socket object clientSocket.sendto(query.encode(), (ip, port)) # Receive the response from the server using the recvfrom() method of the socket object response, address = clientSocket.recvfrom(2048) # Print the response print(response.decode()) # Close the socket connection clientSocket.close()
mananmehtaa/DNS
dns_client.py
dns_client.py
py
766
python
en
code
0
github-code
36
44007123128
""" Django default settings for medfinder project. Crate a local.py in this same folder to set your local settings. """ import requests from os import path from django.utils.translation import ugettext_lazy as _ import environ import datetime import django_heroku root = environ.Path(__file__) - 3 env = environ.Env(DEBUG=(bool, False), ) environ.Env.read_env(env_file=root('.env')) BASE_DIR = root() dirname = path.dirname BASE_DIR = dirname(dirname(dirname(path.abspath(__file__)))) DEBUG = env('DEBUG') ALLOWED_HOSTS = env.list('ALLOWED_HOSTS', []) SECRET_KEY = env('SECRET_KEY') SITE_ID = env('SITE_ID') LOCAL_APPS = ( 'auth_ex', 'medfinder', 'medications', 'public', 'epidemic', 'historic', 'vaccinefinder', ) INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.gis', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.sites', 'django.contrib.staticfiles', 'activity_log', 'corsheaders', 'django_celery_beat', 'django_s3_storage', 'health_check', 'localflavor', 'phonenumber_field', 'rest_registration', 'rest_framework', 'rest_framework_swagger', 'tinymce', ) + LOCAL_APPS AUTH_USER_MODEL = 'auth_ex.User' LOGIN_REDIRECT_URL = '/admin/' # --- STATIC FILES --- STATIC_URL = '/static/' STATIC_ROOT = env('STATIC_ROOT', default=(root - 1)('static')) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # --- MEDIA --- MEDIA_URL = '/media/' MEDIA_ROOT = env('MEDIA_ROOT', default=(root - 1)('media')) TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': ( 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.request', 'django.template.context_processors.static', 'django.template.context_processors.tz', ) } }, ] MIDDLEWARE = ( 'corsheaders.middleware.CorsMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sites.middleware.CurrentSiteMiddleware', 'activity_log.middleware.ActivityLogMiddleware', ) ROOT_URLCONF = 'medfinder.urls' WSGI_APPLICATION = 'medfinder.wsgi.application' USE_TZ = True TIME_ZONE = 'UTC' # --- CORS RULES --- CORS_ORIGIN_ALLOW_ALL = True # --- ACTIVITY LOG --- ACTIVITYLOG_METHODS = ('POST',) NDC_DATABASE_URL = env('NDC_DATABASE_URL', default='') CENSUS_API_KEY = env('CENSUS_API_KEY', default='') GOOGLE_MAP_API_KEY = env('GOOGLE_MAP_API_KEY', default='') # --- LANGUAGES --- USE_I18N = True USE_L10N = True LANGUAGE_CODE = 'en-us' # --- FILE UPLOAD --- DATA_UPLOAD_MAX_MEMORY_SIZE = 104857600 # 100 * 1024 * 1024 # i.e. 100 MB FILE_UPLOAD_MAX_MEMORY_SIZE = 104857600 # 100 * 1024 * 1024 # i.e. 100 MB FILE_UPLOAD_PERMISSIONS = None FILE_UPLOAD_DIRECTORY_PERMISSIONS = None # --- DATABASE --- # --- POSTGRESQL DATABASES = { 'default': env.db( default='postgis://postgres:postgres@postgres:5432/postgres'), 'vaccinedb': { 'CONN_MAX_AGE': 3600, 'ENGINE': 'django.db.backends.mysql', 'HOST': env('VACCINEFINDER_HOST', default=''), 'NAME': env('VACCINEFINDER_NAME', default=''), 'PASSWORD': env('VACCINEFINDER_PASSWORD', default=''), 'USER': env('VACCINEFINDER_USER', default=''), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # --- S3 SETTINGS --- S3_STORAGE_ENABLE = env.bool('S3_STORAGE_ENABLE', default=False) if S3_STORAGE_ENABLE: DEFAULT_FILE_STORAGE = 'django_s3_storage.storage.S3Storage' STATICFILES_STORAGE = 'django_s3_storage.storage.StaticS3Storage' AWS_REGION = env('AWS_REGION') AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID') AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY') AWS_S3_BUCKET_NAME = env('AWS_S3_BUCKET_NAME') AWS_S3_BUCKET_NAME_STATIC = env('AWS_S3_BUCKET_NAME_STATIC') AWS_S3_BUCKET_AUTH = env.bool('AWS_S3_BUCKET_AUTH', default=False) AWS_S3_MAX_AGE_SECONDS = 60 * 60 * 24 * 365 # 1 year. # --- DJANGO COMPRESSOR --- # STATICFILES_FINDERS += ('compressor.finders.CompressorFinder',) # --- DJANGO REGISTRATION REDUX --- ACCOUNT_ACTIVATION_DAYS = 7 REGISTRATION_AUTO_LOGIN = False # --- CELERY --- CELERY_BROKER_URL = env('CELERY_BROKER_URL', default='redis://redis:6379/') CELERYD_TASK_SOFT_TIME_LIMIT = 60 * 60 * 24 # --- CACHE --- CACHES = { "default": { "BACKEND": "django_redis.cache.RedisCache", "LOCATION": "{}1".format(CELERY_BROKER_URL), "OPTIONS": { "CLIENT_CLASS": "django_redis.client.DefaultClient", } } } # CELERY_BEAT_SCHEDULE = { # 'import_existing_medications': { # 'task': 'medications.tasks.import_existing_medications', # 'schedule': crontab(day_of_month=15), # 'relative': True, # }, # } # DEBUG TOOLBAR ENABLE_DEBUG_TOOLBAR = env.bool( 'DEBUG', default=False, ) # --- DJANGO REST FRAMEWORK --- REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework_jwt.authentication.JSONWebTokenAuthentication', ), 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.AllowAny', ), } # --- JWT --- JWT_AUTH = { 'JWT_EXPIRATION_DELTA': datetime.timedelta(days=1), 'JWT_AUTH_HEADER_PREFIX': 'Token', } # --- REST REGISTRATION --- FRONTEND_URL = env('FRONTEND_URL', default='localhost:3000') REST_REGISTRATION = { 'REGISTER_VERIFICATION_ENABLED': False, 'REGISTER_EMAIL_VERIFICATION_ENABLED': False, 'VERIFICATION_FROM_EMAIL': 'no-reply@example.com', 'RESET_PASSWORD_VERIFICATION_URL': '{}/reset-password'.format(FRONTEND_URL), 'USER_HIDDEN_FIELDS': ( 'is_active', 'is_staff', 'is_superuser', 'user_permissions', 'groups', 'date_joined', 'secret', ), } # EMAIL information EMAIL_ENABLE = env.bool('EMAIL_ENABLE', default=True) EMAIL_USE_TLS = env.bool('EMAIL_USE_TLS', default=True) EMAIL_HOST = env('EMAIL_HOST', default='') EMAIL_HOST_USER = env('EMAIL_HOST_USER', default='') EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', default='') EMAIL_PORT = env('EMAIL_PORT', default=587) EMAIL_BACKEND = env( 'EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend', ) FROM_EMAIL = env( 'FROM_EMAIL', default='no-reply@example.com' ) DEFAULT_FROM_EMAIL = env( 'DEFAULT_FROM_EMAIL', default='webmaster@localhost', ) if ENABLE_DEBUG_TOOLBAR: INSTALLED_APPS += ( 'debug_toolbar', ) MIDDLEWARE += ( 'debug_toolbar.middleware.DebugToolbarMiddleware', ) INTERNAL_IPS = ('172.18.0.1', '127.0.0.1', 'localhost') DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', ] DEBUG_TOOLBAR_CONFIG = { 'INTERCEPT_REDIRECTS': False, 'SHOW_TOOLBAR_CALLBACK': lambda *x: True, } # ---PHONENUMBER FIELD --- PHONENUMBER_DEFAULT_REGION = 'US' DECIMAL_SEPARATOR = '.' # --- STATES, COUNTIES & ZIPCODES --- US_STATES_DATABASE = env( 'US_STATES_DATABASE', default='https://raw.githubusercontent.com/PublicaMundi/MappingAPI/' 'master/data/geojson/us-states.json', ) # Use the {}_{} to format with the correspondent state code and name US_ZIPCODES_DATABASE = env( 'US_ZIPCODES_DATABASE', default='https://raw.githubusercontent.com/OpenDataDE/' 'State-zip-code-GeoJSON/master/{}_{}_zip_codes_geo.min.json', ) US_COUNTIES_DATABASE = env( 'US_COUNTIES_DATABASE', default='http://eric.clst.org/assets/wiki/uploads/' 'Stuff/gz_2010_us_050_00_500k.json', ) GEOJSON_GEOGRAPHIC_CONTINENTAL_CENTER_US = { "type": "Point", "coordinates": [-98.579561, 39.828194], } ZOOM_US = 3 ZOOM_STATE = 7 ZOOM_ZIPCODE = 13 # --- SENTRY --- RAVEN_DSN = env('RAVEN_DSN', default='') if RAVEN_DSN: INSTALLED_APPS += ('raven.contrib.django.raven_compat', ) RAVEN_CONFIG = { 'dsn': RAVEN_DSN, } CELERYD_HIJACK_ROOT_LOGGER = False LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'root': { 'level': 'WARNING', 'handlers': ['sentry'], }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s ' '%(process)d %(thread)d %(message)s' }, }, 'handlers': { 'sentry': { # To capture more than ERROR, change to WARNING, INFO, etc. 'level': 'INFO', 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler', # noqa }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose' } }, 'loggers': { 'django.db.backends': { 'level': 'ERROR', 'handlers': ['console'], 'propagate': False, }, 'raven': { 'level': 'DEBUG', 'handlers': ['console'], 'propagate': False, }, 'sentry.errors': { 'level': 'DEBUG', 'handlers': ['console'], 'propagate': False, }, 'celery': { 'level': 'ERROR', 'handlers': ['sentry', 'console'], 'propagate': False, }, }, } # Activate Django-Heroku. django_heroku.settings(locals()) DATABASES['default']['ENGINE'] = 'django.contrib.gis.db.backends.postgis' TYNYMCE_JS_URL=path.join(MEDIA_URL, 'js/tiny_mce')
ninjadevtrack/medifiner-api
medfinder/settings/default.py
default.py
py
11,988
python
en
code
1
github-code
36
10350771272
import pandas as pd from astroquery.simbad import Simbad import astropy.units as u from astropy.coordinates import SkyCoord from astroquery.gaia import Gaia import numpy as np import argparse import sys from time import sleep parser = argparse.ArgumentParser(description='SIXTH: get information from Simbad and GAIA TAP (DR1)') parser.add_argument('-i', nargs=1, default=[0], help='start index', type=int) parser.add_argument('-f', nargs=1, help='file', type=str) args = parser.parse_args() istart = args.i[0] datx=pd.read_csv("../siwiyn/siwiyn.tsv",delimiter="|",comment="#") #_RAJ2000|_DEJ2000|WDS|Name|HD|HIP|Date|PA|Sep|Dmag|Wave|FWHM|f_FWHM|SimbadName|_RA|_DE mask=~datx.duplicated(subset='Name') dat=datx[mask] #dat=dat.set_index("Name") #print(dat["Name"].values[0]) print(len(datx),len(dat)) #sys.exit() #GAIA distance #f=open("siwiyn_position.txt","a") #f.write("System Number|name|RA(2000)|DEC(2000)|Simbad plx|GAIA plx|V|R|J|H|K"+"\n") #f.close() if args.f: namelist=np.loadtxt(args.f[0],dtype=int) else: namelist=range(istart,len(dat)) for i,sysi in enumerate(namelist): f=open("siwiyn_position.txt","a") name=dat["Name"].values[sysi] print(i,name) sleep(1) # if True: try: ra=dat["_RAJ2000"][sysi] dec=dat["_DEJ2000"][sysi] c = SkyCoord(ra+" "+dec, unit=(u.hourangle, u.deg)) width = u.Quantity(5, u.arcsec) height = u.Quantity(5, u.arcsec) #GAIA r = Gaia.query_object_async(coordinate=c, width=width, height=height) plx=None if len(r["parallax"]) == 0: sw = False elif type(r["parallax"][0]) == np.float64: plx=r["parallax"][0] sw = True else: sw = False print("GAIA",plx) Simbad.SIMBAD_URL = "http://simbad.u-strasbg.fr/simbad/sim-script" Simbad.add_votable_fields("parallax","flux(V)","flux(R)","flux(J)","flux(H)","flux(K)") result_table = Simbad.query_region(c, radius='0d0m5s') print(result_table) if result_table is None: plxs=np.nan magcom="|||||" elif len(result_table) == 1: plxs=result_table["PLX_VALUE"].item() V=result_table["FLUX_V"].item() R=result_table["FLUX_R"].item() J=result_table["FLUX_J"].item() H=result_table["FLUX_H"].item() K=result_table["FLUX_K"].item() magcom="|"+str(V)+"|"+str(R)+"|"+str(J)+"|"+str(H)+"|"+str(K) else: plxs=result_table["PLX_VALUE"][0] V=result_table["FLUX_V"][0] R=result_table["FLUX_R"][0] J=result_table["FLUX_J"][0] H=result_table["FLUX_H"][0] K=result_table["FLUX_K"][0] magcom="|"+str(V)+"|"+str(R)+"|"+str(J)+"|"+str(H)+"|"+str(K) #eplx=result_table["PLX_ERROR"].item() if plxs == plxs: f.write(str(sysi)+"|"+name+"|"+str(ra)+"|"+str(dec)+"|"+str(plxs)+"|"+str(plx)+magcom+"\n") else: f.write(str(sysi)+"|"+name+"|"+str(ra)+"|"+str(dec)+"|None|"+str(plx)+magcom+"\n") except: try: ra=dat["_RAJ2000"][sysi] dec=dat["_DEJ2000"][sysi] c = SkyCoord(ra+" "+dec, unit=(u.hourangle, u.deg)) f.write(str(sysi)+"|"+str(ra)+"|"+str(dec)+"|||||||"+"\n") except: f.write(str(sysi)+"|||||||||"+"\n") f.close()
HajimeKawahara/LookAtThis
database/python/siwiyn_parallax.py
siwiyn_parallax.py
py
3,494
python
en
code
0
github-code
36
21883804476
# coding: utf-8 # In[2]: CUDA_VISIBLE_DEVICES = 1 # In[3]: import cv2 import numpy as np import matplotlib.pyplot as plt # In[4]: trainA = [] trainB = [] for i in range(1,701): img = cv2.imread('rain/{}clean.jpg'.format(i)) img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) img = cv2.resize(img,(256,256)) trainA.append(img) img = cv2.imread('rain/{}bad.jpg'.format(i)) img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) img = cv2.resize(img,(256,256)) trainB.append(img) trainA = np.array(trainA) trainB = np.array(trainB) trainA = (trainA - 127.5)/127.5 trainB = (trainB - 127.5)/127.5 # In[ ]: from keras.layers import Input, Conv2D, MaxPooling2D, Conv2DTranspose, concatenate, BatchNormalization, Activation, add from keras.models import Model, model_from_json from keras.optimizers import Adam from keras.layers.advanced_activations import ELU, LeakyReLU from keras.utils.vis_utils import plot_model def conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(1, 1), activation='relu', name=None): ''' 2D Convolutional layers Arguments: x {keras layer} -- input layer filters {int} -- number of filters num_row {int} -- number of rows in filters num_col {int} -- number of columns in filters Keyword Arguments: padding {str} -- mode of padding (default: {'same'}) strides {tuple} -- stride of convolution operation (default: {(1, 1)}) activation {str} -- activation function (default: {'relu'}) name {str} -- name of the layer (default: {None}) Returns: [keras layer] -- [output layer] ''' x = Conv2D(filters, (num_row, num_col), strides=strides, padding=padding, use_bias=False)(x) x = BatchNormalization(axis=3, scale=False)(x) if(activation == None): return x x = Activation(activation, name=name)(x) return x def trans_conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(2, 2), name=None): ''' 2D Transposed Convolutional layers Arguments: x {keras layer} -- input layer filters {int} -- number of filters num_row {int} -- number of rows in filters num_col {int} -- number of columns in filters Keyword Arguments: padding {str} -- mode of padding (default: {'same'}) strides {tuple} -- stride of convolution operation (default: {(2, 2)}) name {str} -- name of the layer (default: {None}) Returns: [keras layer] -- [output layer] ''' x = Conv2DTranspose(filters, (num_row, num_col), strides=strides, padding=padding)(x) x = BatchNormalization(axis=3, scale=False)(x) return x def MultiResBlock(U, inp, alpha = 1.67): ''' MultiRes Block Arguments: U {int} -- Number of filters in a corresponding UNet stage inp {keras layer} -- input layer Returns: [keras layer] -- [output layer] ''' W = alpha * U shortcut = inp shortcut = conv2d_bn(shortcut, int(W*0.167) + int(W*0.333) + int(W*0.5), 1, 1, activation=None, padding='same') conv3x3 = conv2d_bn(inp, int(W*0.167), 4, 4, activation='relu', padding='same') conv5x5 = conv2d_bn(conv3x3, int(W*0.333), 4, 4, activation='relu', padding='same') conv7x7 = conv2d_bn(conv5x5, int(W*0.5), 4, 4, activation='relu', padding='same') out = concatenate([conv3x3, conv5x5, conv7x7], axis=3) out = BatchNormalization(axis=3)(out) out = add([shortcut, out]) out = Activation('relu')(out) out = BatchNormalization(axis=3)(out) return out def ResPath(filters, length, inp): ''' ResPath Arguments: filters {int} -- [description] length {int} -- length of ResPath inp {keras layer} -- input layer Returns: [keras layer] -- [output layer] ''' shortcut = inp shortcut = conv2d_bn(shortcut, filters, 1, 1, activation=None, padding='same') out = conv2d_bn(inp, filters, 4, 4, activation='relu', padding='same') out = add([shortcut, out]) out = Activation('relu')(out) out = BatchNormalization(axis=3)(out) for i in range(length-1): shortcut = out shortcut = conv2d_bn(shortcut, filters, 1, 1, activation=None, padding='same') out = conv2d_bn(out, filters, 4, 4, activation='relu', padding='same') out = add([shortcut, out]) out = Activation('relu')(out) out = BatchNormalization(axis=3)(out) return out def MultiResUnet(height, width, n_channels): ''' MultiResUNet Arguments: height {int} -- height of image width {int} -- width of image n_channels {int} -- number of channels in image Returns: [keras model] -- MultiResUNet model ''' inputs = Input((height, width, n_channels)) mresblock1 = MultiResBlock(32, inputs) pool1 = MaxPooling2D(pool_size=(2, 2))(mresblock1) mresblock1 = ResPath(32, 4, mresblock1) mresblock2 = MultiResBlock(32*2, pool1) pool2 = MaxPooling2D(pool_size=(2, 2))(mresblock2) mresblock2 = ResPath(32*2, 3, mresblock2) mresblock3 = MultiResBlock(32*4, pool2) pool3 = MaxPooling2D(pool_size=(2, 2))(mresblock3) mresblock3 = ResPath(32*4, 2, mresblock3) mresblock4 = MultiResBlock(32*8, pool3) up5 = concatenate([Conv2DTranspose( 32*4, (2, 2), strides=(2, 2), padding='same')(mresblock4), mresblock3], axis=3) mresblock6 = MultiResBlock(32*4, up5) up6 = concatenate([Conv2DTranspose( 32*2, (2, 2), strides=(2, 2), padding='same')(mresblock6), mresblock2], axis=3) mresblock7 = MultiResBlock(32*2, up6) up7 = concatenate([Conv2DTranspose( 32, (2, 2), strides=(2, 2), padding='same')(mresblock7), mresblock1], axis=3) mresblock8 = MultiResBlock(32, up7) g = Conv2DTranspose(3, (4,4), strides=(1,1), padding='same')(mresblock8) output1 = Activation('tanh')(g) #second encoder-decoder ############################################################################################## mresblock10 = MultiResBlock(32, output1) pool10 = MaxPooling2D(pool_size=(2, 2))(mresblock10) mresblock10 = ResPath(32, 4, mresblock10) bridge1 = concatenate([Conv2D(32*2,(2,2),strides=(2,2),padding='same')(mresblock10), mresblock7],axis=3) mresblock11 = MultiResBlock(32*2, bridge1) pool11 = MaxPooling2D(pool_size=(2, 2))(mresblock11) mresblock11 = ResPath(32*2, 3, mresblock11) bridge2 = concatenate([Conv2D(32*4,(2,2),strides=(2,2),padding='same')(mresblock11), mresblock6],axis=3) mresblock12 = MultiResBlock(32*4, bridge2) pool12 = MaxPooling2D(pool_size=(2, 2))(mresblock12) mresblock12 = ResPath(32*4, 2, mresblock12) bridge3 = concatenate([Conv2D(32*8,(2,2),strides=(2,2),padding='same')(mresblock12), mresblock4],axis=3) mresblock13 = MultiResBlock(32*8, pool12) up16 = concatenate([Conv2DTranspose( 32*4, (2, 2), strides=(2, 2), padding='same')(mresblock13), mresblock12, mresblock3], axis=3) mresblock16 = MultiResBlock(32*4, up16) up17 = concatenate([Conv2DTranspose( 32*2, (2, 2), strides=(2, 2), padding='same')(mresblock16), mresblock11, mresblock2], axis=3) mresblock17 = MultiResBlock(32*2, up17) up18 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(mresblock17), mresblock10, mresblock1], axis=3) mresblock18 = MultiResBlock(32, up18) g = Conv2DTranspose(3, (4,4), strides=(1,1), padding='same')(mresblock18) output = Activation('tanh')(g) model = Model(inputs,output) return model def main(): # Define the model model = MultiResUnet(256, 256,3) model.summary() if __name__ == '__main__': main() # plot the model #plot_model(model, to_file='generator_model_plot.png', show_shapes=True, show_layer_names=True) # In[ ]: from keras.layers import Input, Conv2D, MaxPooling2D, BatchNormalization, Activation, add, Concatenate from keras.models import Model, model_from_json from keras.optimizers import Adam from keras.layers.advanced_activations import ELU, LeakyReLU from keras.utils.vis_utils import plot_model from keras.initializers import RandomNormal from tensorflow.keras.losses import BinaryCrossentropy def define_discriminator(image_shape): # weight initialization init = RandomNormal(stddev=0.02) # source image input in_src_image = Input(shape=image_shape) # target image input in_target_image = Input(shape=image_shape) # concatenate images channel-wise merged = Concatenate()([in_src_image, in_target_image]) # C64 d = Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(merged) d = LeakyReLU(alpha=0.2)(d) # C128 d = Conv2D(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d) d = BatchNormalization()(d) d = LeakyReLU(alpha=0.2)(d) # C256 d = Conv2D(256, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d) d = BatchNormalization()(d) d = LeakyReLU(alpha=0.2)(d) # second last output layer d = Conv2D(256, (4,4), padding='same', kernel_initializer=init)(d) d = BatchNormalization()(d) d = LeakyReLU(alpha=0.2)(d) # patch output d = Conv2D(1, (4,4), padding='same', kernel_initializer=init)(d) patch_out = Activation('sigmoid')(d) # define model model = Model([in_src_image, in_target_image], patch_out) # compile model opt = Adam(lr=0.0002, beta_1=0.5) model.compile(loss= BinaryCrossentropy(from_logits=True), optimizer=opt, loss_weights=[0.5]) return model # define image shape image_shape = (256,256,3) # create the model model = define_discriminator(image_shape) # summarize the model model.summary() # plot the model #plot_model(model, to_file='/content/drive/My Drive/test/discriminator_model_plot.png', show_shapes=True, show_layer_names=True) # In[ ]: from tensorflow.keras.losses import BinaryCrossentropy def define_gan(g_model, d_model, image_shape): # make weights in the discriminator not trainable d_model.trainable = False # define the source image in_src = Input(shape=image_shape) in_target = Input(shape = image_shape) # connect the source image to the generator input gen_out = g_model(in_src) # connect the source input and generator output to the discriminator input dis_out = d_model([in_src, gen_out]) # src image as input, generated image and classification output model = Model([in_src,in_target], [dis_out, gen_out]) # compile model opt = Adam(lr=0.0002, beta_1=0.5) model.compile(loss=[BinaryCrossentropy(from_logits=True), 'mae'], optimizer=opt, loss_weights=[1,100]) return model # In[ ]: def generate_real_samples(n_samples, patch_shape): # unpack dataset # choose random instances ix = np.random.randint(0, trainA.shape[0], n_samples) # retrieve selected images X1, X2 = trainA[ix], trainB[ix] # generate 'real' class labels (1) y = np.ones((n_samples, patch_shape, patch_shape, 1)) return [X1, X2], y # In[1]: def generate_fake_samples(g_model, samples, patch_shape): # generate fake instance X = g_model.predict(samples) # create 'fake' class labels (0) y = np.zeros((len(X), patch_shape, patch_shape, 1)) return X, y # In[18]: # generate samples and save as a plot and save the model def summarize_performance(step, g_model, d_model, gan_model, n_samples=1): # select a sample of input images [X_realA, X_realB], _ = generate_real_samples(n_samples, 1) # generate a batch of fake samples X_fakeB, _ = generate_fake_samples(g_model, X_realA, 1) # scale all pixels from [-1,1] to [0,1] X_realA = (X_realA + 1) / 2.0 X_realB = (X_realB + 1) / 2.0 X_fakeB = (X_fakeB + 1) / 2.0 X_fakeB = 255 * X_fakeB # plot generated target image for i in range(n_samples): plt.subplot(3, n_samples, 1 + n_samples + i) plt.axis('off') plt.imshow(X_fakeB[i]) # save plot to file filename1 = 'test1/plot_%06d.png' % (step+1) cv2.imwrite(filename1,X_fakeB[0]) # save the generator, discriminator and gan models filename2 = 'test1/g_model_%06d.h5' % (step+1) g_model.save(filename2) #filename3 = 'test/d_model_%06d.h5' % (step+1) #d_model.save(filename3) #filename4 = 'test/gan_model_%06d.h5' % (step+1) #gan_model.save(filename4) print('>Saved: %s and %s' % (filename1, filename2)) # In[32]: def train(d_model, g_model, gan_model, n_epochs=200, n_batch=1, n_patch=32): # unpack dataset # calculate the number of batches per training epoch bat_per_epo = int(len(trainA) / n_batch) # calculate the number of training iterations n_steps = bat_per_epo * n_epochs # manually enumerate epochs for i in range(n_steps): # select a batch of real samples [X_realA, X_realB], y_real = generate_real_samples( n_batch, n_patch) # generate a batch of fake samples X_fakeB, y_fake = generate_fake_samples(g_model, X_realB, n_patch) # update discriminator for real samples d_loss1 = d_model.train_on_batch([X_realB, X_realA], y_real) # update discriminator for generated samples d_loss2 = d_model.train_on_batch([X_realB, X_fakeB], y_fake) # update the generator g_loss, _, _ = gan_model.train_on_batch([X_realB,X_realA], [y_fake, X_realA]) # summarize performance print('>%d, d1[%.3f] d2[%.3f] g[%.3f]' % (i+1, d_loss1, d_loss2, g_loss)) # summarize model performance if (i+1) % (bat_per_epo * 1) == 0: summarize_performance(i, g_model,d_model, gan_model) # In[ ]: image_shape = (256,256,3) # define the models d_model = define_discriminator(image_shape) g_model = MultiResUnet(256,256,3) # define the composite model gan_model = define_gan(g_model, d_model, image_shape) # train model train(d_model, g_model, gan_model)
programmer-770/Image_Deraining_GANs
multi_res_unet-Copy1.py
multi_res_unet-Copy1.py
py
13,838
python
en
code
1
github-code
36
35699327063
import os, json, sys import scrapy def get_report(select): anaul_reports = select.xpath('//*[@id="nav-main-backgroundItem"]/div[@tyc-event-ch="CompangyDetail.nianbao"]/div[2]/div/table/tbody/tr') Ltemp = [] for t in anaul_reports: temp = {} temp['企业年报'] = t.xpath('./td[2]/text()').extract_first() temp['年报链接'] = t.xpath('./td[3]/a/@href').extract_first() Ltemp.append(temp) return Ltemp
elysium-amami/tianyancha_spyder
get_anaul_report.py
get_anaul_report.py
py
451
python
en
code
3
github-code
36
18499875935
def sumar(num1, num2): sum = num1+num2 return sum def multiplicar(num1, num2): return num1*num2 def dividir(num1, num2): try: div = num1/num2 return div except Exception as e: print("Error trying to operate: ", e) return 0 def power(num1, num2): return num1**num2 def salida(opcion): if opcion == 1: suma = sumar(num1, num2) ans["suma"] = suma print(suma) elif opcion == 2: mult = multiplicar(num1,num2) ans["multiplicacion"] = mult print(mult) elif opcion == 3: div = dividir(num1, num2) ans["division"] = div print(div) elif opcion == 4: exp = power(num1,num2) ans["exponente"] = exp print(exp) elif opcion == 5: pass elif opcion == 0: print("Bye!") print(ans) else: print("Not able to process the instruction") ans = {} opcion = 2 while(opcion != 0): num1 = int(input("Introduzca numero 1: ")) num2 = int(input("Introduzca numero 2: ")) opcion = int(input("Escriba opcion: \n [0] Salir \n \n [1] Suma \n [2] Multiplicación \n [3] División \n [4] Exponencial \n")) salida(opcion) opcion = int(input("Para continuar pulse 5, para salir, pulse 0")) salida(opcion)
laramruma/Cisco
05_calculator.py
05_calculator.py
py
1,208
python
es
code
0
github-code
36
12782770518
from estimate_explosion_time.shared import get_custom_logger, main_logger_name, pickle_dir import logging logger = get_custom_logger(main_logger_name) logger.setLevel(logging.INFO) logger.debug('logging level is DEBUG') from estimate_explosion_time.analyses.rappid_simulations import rappidDH from estimate_explosion_time.core.fit_data.fitlauncher.fitlauncher import Fitter from estimate_explosion_time.analyses.rappid_simulations.convert_to_pickle_files import \ rappid_pkl_name, write_model_to_pickle, rappid_original_data import os # only include lightcurves with a peak magnitude brighter than this peak_mag = 19 # take the original simulated data and convert it into pickles in the right format # the path to the original data is to be specified in convert_to_pickle_files.py for model_number in [3, 13]: if not os.path.isfile(rappid_pkl_name(model_number, peak_mag)): write_model_to_pickle(model_number, peak_mag) # sepcify where to look for the SED files that were used in the simulation. # That's necesarry for getting the explosion time from the template sed_directory = rappid_original_data + '/SEDs' # get the lightcurves either generated using MOSFiT type 'mosfit' # or using specral templates type 'templates' generated_with = 'mosfit' # get the DataHandler object who takes care of all the book keeping thisDH = rappidDH.get_dhandler(generated_with, sed_directory=sed_directory) # get the explosion times for the simulations thisDH.get_explosion_times_from_template(ncpu=45) # fit the lightcurves with the desired method (only 'mosfit' is good!) method = 'mosfit' fitter = Fitter.get_fitter(method) logger.debug( f'fitter method {fitter.method_name} \n' f'job-id {fitter.job_id}' ) missing_indice_file = f'{pickle_dir}/{thisDH.name}/{fitter.method_name}/missing_indices.txt' fitter.fit_lcs(thisDH, tasks_in_group=100, # missing_indice_file=missing_indice_file # to be used when repeating the fit ) # make a selection of lightcurves based on the available photometry thisDH.select_and_adjust_selection_string() # get the results thisDH.results('mosfit')
JannisNe/ztf_SN-LCs-explosion_time_estimation
estimate_explosion_time/analyses/rappid_simulations/complete_analyses.py
complete_analyses.py
py
2,150
python
en
code
0
github-code
36
3020252175
class Solution: def permute(self, nums: List[int]) -> List[List[int]]: if not nums: return [] elif len(nums) == 1: return [nums] else: res = [] for i in range(len(nums)): copy = list(nums) x = copy.pop(i) perms = self.permute(copy) perms = [[x] + p for p in perms] res.extend(perms) return res
cdluminate/MyNotes
algo/lc.46.py
lc.46.py
py
462
python
en
code
0
github-code
36
34588733658
import os import atexit import secrets import unittest from functools import wraps import augpathlib as aug from sxpyr import sxpyr from pyontutils.utils import Async, deferred # TODO -> asyncd in future from pyontutils.utils_fast import isoformat from sparcur import exceptions as exc from sparcur.utils import GetTimeNow, log from sparcur.paths import PennsieveCache, LocalPath, Path from sparcur.backends import PennsieveRemote from .common import test_organization, test_dataset, _pid from .common import skipif_ci, skipif_no_net import pytest class _TestOperation: _cache_class = PennsieveCache _remote_class = PennsieveRemote @classmethod def tearDownClass(cls): base = aug.AugmentedPath(__file__).parent / f'test-operation-{_pid}' if base.exists(): base.popd() # in case we were inside it pop back out first base.rmtree() def setUp(self): class Cache(self._cache_class): pass Cache._bind_flavours() base = aug.AugmentedPath(__file__).parent / f'test-operation-{_pid}' if base.exists(): base.popd() # in case we were inside it pop back out first base.rmtree() base.mkdir() base.pushd() self.Remote = self._remote_class._new(Cache._local_class, Cache) self.Remote.init(test_organization) self.anchor = self.Remote.dropAnchor(base) self.root = self.anchor.remote self.project_path = self.anchor.local list(self.root.children) # populate datasets self.test_base = [ p for p in self.project_path.children if p.cache.id == test_dataset][0] list(self.test_base.rchildren) # populate test dataset asdf = self.root / 'lol' / 'lol' / 'lol (1)' class Fun(os.PathLike): name = 'hohohohohoho' def __fspath__(self): return '' @property def size(self): return aug.FileSize(len(b''.join(self.data))) @property def data(self): for i in range(100): yield b'0' * 1000 #wat = asdf.bfobject.upload(Fun(), use_agent=False) #breakpoint() @skipif_ci @skipif_no_net class TestDelete(_TestOperation, unittest.TestCase): def test_0(self): assert True def test_1_case(self): # this is an old scenario that happens because of how the old system worked # local working directory | x # local cache directory | o # remote | o pass def make_rand(n, width=80): lw = width + 1 hex_lw = lw // 2 n_lines = n // lw n_accounted = n_lines * lw h_lines = n // (hex_lw * 2) h_accounted = h_lines * (hex_lw * 2) ldiff = n_lines - h_lines adiff = n_accounted - h_accounted accounted = n_accounted missing = n - accounted hex_missing = (missing + 1) // 2 diff = hex_missing * 2 - missing hexstart = width % 2 # almost there fails on 71 # also fails len(make_rand(102, 101)) - 102 log.debug((adiff, ldiff, missing, diff)) string = '\n'.join([secrets.token_hex(hex_lw)[hexstart:] for i in range(n_lines)] + [secrets.token_hex(hex_missing)[diff:-1] + '\n']) return string.encode() @skipif_ci @skipif_no_net @pytest.mark.skip('VERY SLOW') class TestFilenames(_TestOperation, unittest.TestCase): _evil_names = ( # '......................', # this breaks the agent with infinite timeout '!@#$%^&*()[]{}`~;:,', '(╯°□°)╯︵ ┻━┻)', 'הָיְתָהtestالصفحات التّحول', 'הָיְתָהtestالصفحا تالتّحول', # check bucket names '᚛ᚄᚓᚐᚋᚒᚄ ᚑᚄᚂᚑᚏᚅ᚜', 'Z̮̞̠͙͔ͅḀ̗̞͈̻̗Ḷ͙͎̯̹̞͓G̻O̭̗̮', # '𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐 𝖇𝖗𝖔𝖜𝖓 𝖋𝖔𝖝 𝖏𝖚𝖒𝖕𝖘 𝖔𝖛𝖊𝖗 𝖙𝖍𝖊 𝖑𝖆𝖟𝖞 𝖉𝖔𝖌', # this breaks the agent with ERRORED 'evil file space', 'evil_file underscore', 'evil-file dash', 'evil%20file percent 20', 'hello%20world%20%60~%21%40%23%24%25%5E%26%2A%28%29%5B%5D%7B%7D%27', # the problem is that we don't know whether we can actually # decode a file name, and wthe database stores the encoded filename 'hello%20world', 'hello%20world~', ) @property def _more_evil_names(self): # class scope strikes back! LOL PYTHON return [ name for char in ('\x07', #'/', # have to do this in a different way on unix '\\', '|', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '[', ']', '{', '}', "'", '`', '~', ';', ':', ',', '"', '?', '<', '>', ) for name in (f'prefix{char}', f'prefix{char}suffix', f'{char}suffix',)] @staticmethod def _op(test_folder, fsize, name): test_file_a = test_folder / (name + '.ext') test_file_b = test_folder / (name + '.txe') test_folder_i = test_folder / name for _f in (test_file_a, test_file_b): if _f.exists() or _f.is_broken_symlink(): msg = (f'bad test environment: file/link already exists: {_f}') raise FileExistsError(msg) # FIXME maybe don't straight fail here, but instead # don't upload and just compare the existing name? # the fact that we get an error is a sign that the # name matches actually ... so not getting an error # in subsequent runs is bad ... for test_base at least test_file_a.data = iter((make_rand(fsize),)) test_file_b.data = iter((make_rand(fsize),)) try: remote_a = test_file_a.upload() name_a = remote_a.bfobject.name except Exception as e: name_a = e try: remote_b = test_file_b.upload() name_b = remote_b.bfobject.name except Exception as e: name_b = e try: remote_i = test_folder_i.mkdir_remote() name_i = remote_i.bfobject.name except Exception as e: name_i = e return name_a, name_b, name_i def test_filenames_more_evil(self): return self.test_filenames_evil(self._more_evil_names) def test_filenames_evil(self, names=_evil_names): # XXX warning slow! now = GetTimeNow() local = self.project_path / f'test-dataset-{now.START_TIMESTAMP_LOCAL_FRIENDLY}' remote = local.mkdir_remote() try: # FIXME consider going back to self.test_base instead of local here test_folder = local / 'pandora' test_folder.mkdir_remote() test_folder.__class__.upload = Path.upload fsize = 1024 # needed for uniqueish hashes colloisions will still happen # FIXME this pretty clearly reveals a need for # batching to multiplex the fetch ... SIGH results = Async(rate=10)(deferred(self._op)(test_folder, fsize, name) for name in names) #results = [] #for name in names: # name_a, name_b, name_i = self._op(test_folder, fsize, name) # results.append((name_a, name_b, name_i)) finally: remote.rmdir(force=True) # FIXME crumple fails in refresh since we use rmdir # instead of rmtree (for safety) #remote.cache.refresh() # FIXME @skipif_ci @skipif_no_net class TestUpdate(_TestOperation, unittest.TestCase): @pytest.mark.skip('the question has been answered') def test_process_filesize_limit(self): # so the 1 mb size file works, eventually, something else is wrong test_folder = self.test_base / 'hrm' test_folder.mkdir_remote() test_folder.__class__.upload = Path.upload for i in range(1024 ** 2, 5 * 1024 ** 2, 1024 ** 2): test_file = test_folder / f'size-{i}' if test_file.is_broken_symlink(): test_file.remote.bfobject.package.delete() test_file.unlink() test_file = test_folder / f'size-{i}' # remove stale cache test_file.data = iter((make_rand(i),)) remote = test_file.upload() def test_upload_noreplace(self): for i in range(2): test_file = self.test_base / 'dataset_description.csv' test_file.data = iter((make_rand(100),)) # FIXME temp sandboxing for upload until naming gets sorted test_file.__class__.upload = Path.upload # create some noise remote = test_file.upload(replace=False) print(remote.bfobject.package.name) def test_upload_noreplace_fail(self): # some persistent state from other tests is causing this to fail test_file = self.test_base / 'dataset_description.csv' test_file.data = iter((make_rand(100),)) # FIXME temp sandboxing for upload until naming gets sorted test_file.__class__.upload = Path.upload test_file.upload(replace=False) try: test_file.upload(replace=False) assert False, 'should have failed' except exc.FileHasNotChangedError: pass def test_upload_replace(self): test_file = self.test_base / 'dataset_description.csv' test_file.data = iter((make_rand(100),)) # FIXME temp sandboxing for upload until naming gets sorted test_file.__class__.upload = Path.upload test_file.upload() @skipif_ci @skipif_no_net class TestClone(_TestOperation, unittest.TestCase): # TODO test a variety of clone scenarios # and consider whether testing for and # existing root should be done in dropAnchor def setUp(self): super().setUp() self.alt_project_path = self.project_path.parent / 'alt' / self.project_path.name if self.alt_project_path.parent.exists(): self.alt_project_path.parent.rmtree() self.alt_project_path.mkdir(parents=True) def _do_target(self, target, expect_error_type=None): class Cache(self._cache_class): pass Cache._bind_flavours() BFR = self._remote_class._new(LocalPath, Cache) BFR.init(test_organization) if expect_error_type: try: anchor = BFR.dropAnchor(target.parent) raise AssertionError(f'should have failed with a {expect_error_type}') except expect_error_type as e: pass else: anchor = BFR.dropAnchor(target.parent) def test_1_in_project(self): target = self.project_path / 'some-new-folder' target.mkdir() self._do_target(target) # FIXME succeeds for now, but probably should not? def test_2_project_top_level(self): target = self.project_path self._do_target(target, exc.DirectoryNotEmptyError) def test_3_existing_empty(self): target = self.alt_project_path self._do_target(target) def test_4_existing_has_folder(self): target = self.alt_project_path child = target / 'a-folder' child.mkdir(parents=True) self._do_target(target, exc.DirectoryNotEmptyError) def test_5_existing_has_file(self): target = self.alt_project_path child = target / 'a-file' child.touch() self._do_target(target, exc.DirectoryNotEmptyError) def test_6_existing_has_local_data_dir(self): target = self.alt_project_path child = target / self.anchor._local_data_dir child.mkdir() self._do_target(target, exc.DirectoryNotEmptyError) @skipif_ci @skipif_no_net class TestMkdirRemote(_TestOperation, unittest.TestCase): def test_mkdir_remote_parents_false(self): now = GetTimeNow() local = self.project_path / f'test-dataset-{now.START_TIMESTAMP_LOCAL_FRIENDLY}' / 'some-folder' try: remote = local.mkdir_remote() raise AssertionError('Should have failed since parents=False') except FileNotFoundError: pass def test_0_mkdir_remote_will_be_dataset(self): now = GetTimeNow() local = self.project_path / f'test-dataset-{now.START_TIMESTAMP_LOCAL_FRIENDLY}' remote = local.mkdir_remote() remote.rmdir() remote.cache.refresh() # reminder that remotes are a snapshot in time, NOT dynamic assert not local.exists(), f'should have been deleted {remote}' def test_1_mkdir_remote_will_be_collection(self): now = GetTimeNow() local = self.project_path / f'test-dataset-{now.START_TIMESTAMP_LOCAL_FRIENDLY}' / 'some-folder' remote = local.mkdir_remote(parents=True) parent = remote.parent try: parent.rmdir() # should fail here try: remote.rmdir() # insurance except BaseException as e: log.exception(e) finally: raise AssertionError('remote parent should NOT have rmdired {parent}') except exc.PathNotEmptyError: pass try: remote.rmdir() remote.cache.refresh() assert not local.exists(), f'should have been deleted {remote}' finally: lparent = parent.local parent.cache.refresh() # we just removed the child so the parent is stale parent.rmdir() parent.cache.refresh() assert not lparent.exists(), f'should have been deleted {parent}' class TestMoveFolder(_TestOperation, unittest.TestCase): def test_reparent(self): # TODO this is nowhere near complete with respect to synchronization # but it is sufficient to test the components needed for sync now = GetTimeNow() local = self.project_path / f'test-dataset-{now.START_TIMESTAMP_LOCAL_FRIENDLY}' remote = local.mkdir_remote() try: # FIXME consider going back to self.test_base instead of local here test_folder_1 = local / 'dire-1' test_folder_1.mkdir_remote() test_folder_2 = local / 'dire-2' test_folder_2.mkdir_remote() list(remote.cache.children) # XXX populate with remote data test_folder_2.remote.reparent(test_folder_1.cache_id) test_folder_1.__class__.upload = Path.upload fsize = 1024 test_file_1 = test_folder_1 / 'file-1.ext' test_file_1.data = iter((make_rand(fsize),)) test_file_1.upload() test_file_1.remote.reparent(test_folder_2.cache_id) finally: remote.rmdir(force=True) # FIXME crumple fails in refresh since we use rmdir # instead of rmtree (for safety) #remote.cache.refresh() # FIXME class _ChangesHelper: _local_only = True def _make_ops(self): ops = tuple() # dirs dataset = 'project/dataset' ops += ( #(0, 'mkdir', 'project'), # don't actually need this since we replace it when building paths (0, 'mkdir', dataset), (0, 'mkdir', 'project/dataset/dire-1'), (0, 'mkdir', 'project/dataset/dire-2'), (0, 'mkdir', 'project/dataset/dire-6'), ) # sources d3_1 = 'project/dataset/dire-1/dire-3-1-rn' d3_2 = 'project/dataset/dire-1/dire-3-2-rp' d3_3 = 'project/dataset/dire-1/dire-3-3-np' f1_0 = 'project/dataset/dire-1/file-1-0.ext' f1_1 = 'project/dataset/dire-1/file-1-1-rn.ext' f1_2 = 'project/dataset/dire-1/file-1-2-rp.ext' f1_3 = 'project/dataset/dire-1/file-1-3-np.ext' l1_0 = 'project/dataset/dire-1/link-1-0.ext' l1_1 = 'project/dataset/dire-1/link-1-1-rn.ext' l1_2 = 'project/dataset/dire-1/link-1-2-rp.ext' l1_3 = 'project/dataset/dire-1/link-1-3-np.ext' # targets # TODO need variants of all of these where we lose the metadata probably? ops += ( (0, 'mkdir', d3_1), (0, 'mkdir', d3_2), (0, 'mkdir', d3_3), (0, 'mkfile', f1_0), # nochange (0, 'mkfile', f1_1), (0, 'mkfile', f1_2), (0, 'mkfile', f1_3), (0, 'mklink', l1_0), # nochange (0, 'mklink', l1_1), (0, 'mklink', l1_2), (0, 'mklink', l1_3), # moves: renames, reparents, rename_reparent (1, 'rename', d3_1, 'project/dataset/dire-1/dire-3-1-rn-r'), # rn (1, 'rename', d3_2, 'project/dataset/dire-2/dire-3-2-rp'), # rp (1, 'rename', d3_3, 'project/dataset/dire-2/dire-3-3-np-r'), # rnp (1, 'rename', f1_1, 'project/dataset/dire-1/file-1-1-rn-r.ext'), # rn (1, 'rename', f1_2, 'project/dataset/dire-2/file-1-2-rp.ext'), # rp (1, 'rename', f1_3, 'project/dataset/dire-2/file-1-3-np-r.ext'), # rnp (1, 'rename', l1_1, 'project/dataset/dire-1/link-1-1-rn-r.ext'), # rn (1, 'rename', l1_2, 'project/dataset/dire-2/link-1-2-rp.ext'), # rp (1, 'rename', l1_3, 'project/dataset/dire-2/link-1-3-np-r.ext'), # rnp # add (1, 'mkdir', 'project/dataset/dire-6/dire-7-add'), (1, 'mkfile', 'project/dataset/dire-6/file-4-add.ext'), (2, 'mklink', 'project/dataset/dire-6/link-4-add.ext'), # XXX this causes an error because it looks like the index is out of synx ) # change (only applies to files) f5_1 = 'project/dataset/dire-6/file-5-1-cd_.ext' f5_2 = 'project/dataset/dire-6/file-5-2-c_m.ext' f5_3 = 'project/dataset/dire-6/file-5-3-c_x.ext' f5_4 = 'project/dataset/dire-6/file-5-4-cdm.ext' f5_5 = 'project/dataset/dire-6/file-5-5-cdx.ext' # file_id change ? should be impossible ... ops += ( (0, 'mkfile', f5_1), (0, 'mkfile', f5_2), (0, 'mkfile', f5_3), (0, 'mkfile', f5_4), (0, 'mkfile', f5_5), # TODO probably also change size (1, 'change', f5_1, True, False), # data (1, 'change', f5_2, False, True), # metadata (1, 'change', f5_3, False, None), # no metadata # can handle this from objects cache (1, 'change', f5_4, True, True), # data metadata (1, 'change', f5_5, True, None), # data no metadata ) # remove d9 = 'project/dataset/dire-6/dire-9-rem' f6 = 'project/dataset/dire-6/file-6-rem.ext' l6 = 'project/dataset/dire-6/link-6-rem.ext' ops += ( (0, 'mkdir', d9), (0, 'mkfile', f6), (0, 'mklink', l6), (1, 'remove', d9), (1, 'remove', f6), (1, 'remove', l6), ) # build the indexes so we can do the diff ops += ( (0.5, 'index', dataset), ) return ops def setUp(self): # TODO construct the template we need super().setUp() self.Path = self.Remote._local_class #sigh = list(self.project_path.remote.children) #[s.rmdir(force=True) for s in sigh if '2023' in s.name] #breakpoint() #raise ValueError('cleanup tearDown failure mess') # TODO expected outcome after stage probably if self._local_only: def populate_cache(p, change=False): # FIXME TODO change=True case may need special handling # and probably represents a strange case of some kind return p._cache_class.fromLocal(p) def norm_path(p): return self.project_path / p.replace('project/', '') else: # XXX WARNING extremely slow due to sequentially creating each remote file now = GetTimeNow() local_dataset = self.project_path / f'test-dataset-{now.START_TIMESTAMP_LOCAL_FRIENDLY}' remote_dataset = local_dataset.mkdir_remote() #self._test_dataset = remote_dataset # tearDown fails to trigger if failure happens in setUp which is useless # so use atexit instead atexit.register(lambda : remote_dataset.rmdir(force=True)) def populate_cache(p, change=False): # FIXME TODO change=True case may need special handling # and probably represents a strange case of some kind if change: return remote = p.create_remote() return remote.cache def norm_path(p): return local_dataset / p.replace('project/dataset', '').strip('/') def mkdir(d, add=False): if not self._local_only and d == local_dataset: # FIXME HACK return d.mkdir() if not add: cache = populate_cache(d) d._cache = cache #d._cache_class.fromLocal(d) def mkfile(f, add=False): f.data = iter((make_rand(100),)) if not add: cache = populate_cache(f) f._cache = cache #f._cache_class.fromLocal(f) def mklink(l): try: l.data = iter((make_rand(100),)) # issue with id and parent_id not being set so use fromLocal since it does it correctly #meta = f.meta # TODO checksum probably? #symlink = meta.as_symlink(local_name=l.name) #cache = l._cache_class.fromLocal(l) cache = populate_cache(l) symlink = cache.meta.as_symlink(local_name=l.name) finally: l.unlink() l.symlink_to(symlink) def rename(path, target): path.rename(target) def change(f, data, metadata): if data: f.data = iter((make_rand(100),)) if metadata is None or metadata: # must set xattrs to nothing if # we want to change metadata otherwise # PrimaryCache._meta_updater will go haywire and # ... try to delete the file ... and it will # actually delete it instead of crumpling it # so definitely a FIXME very dangerous lose your work # kind of scenario between that _meta_updater and # BFPNCache._actually_crumple and change of BFPNCache.crumple [f.delxattr(k) for k in f.xattrs()] if f.xattrs(): breakpoint() if metadata: if not f.exists(): raise FileNotFoundError(f) try: populate_cache(f, change=True) #f._cache_class.fromLocal(f) except Exception as e: breakpoint() raise e def remove(path): if path.is_dir(): path.rmdir() else: path.unlink() def index(ds): if self._local_only: caches = [l.cache for l in ds.rchildren] # XXX reminder, NEVER use ds.cache.rchildren that will pull class fakeremote: def __init__(self, id, name, parent_id, file_id, updated, local): self.id = id self.name = name self._name = name self.parent_id = parent_id self.updated = updated self._lol_local = local if file_id is not None: self.file_id = file_id def is_dir(self): return self._lol_local.is_dir() for c in caches: # FIXME causes other issues ... even while trying to avoid init issues # we should not have to do this cmeta = c.meta c._remote = fakeremote( cmeta.id, cmeta.name, cmeta.parent_id, cmeta.file_id, cmeta.updated, c.local) else: # this is safe at this stage since everything should match upstream caches = [c.cache for c in local_dataset.rchildren] ds._generate_pull_index(ds, caches) fops = { 'mkdir': mkdir, 'mkfile': mkfile, 'mklink': mklink, 'rename': rename, 'change': change, 'remove': remove, 'index': index, } def make_closure(stage, op, obj, args): f = fops[op] if stage > 0 and op in ('mkdir', 'mkfile'): kwargs=dict(add=True) else: kwargs = {} @wraps(f) def inner(): f(path, *args, **kwargs) return inner def cargs(args): for a in args: if isinstance(a, str) and a.startswith('project/'): yield norm_path(a) else: yield a ops = self._make_ops() pops = [(stage, op, norm_path(s), *cargs(args)) for stage, op, s, *args in ops] init = set([path for stage, op, path, *args in pops if stage == 0]) test = set([p for stage, op, path, *args in pops if stage >= 1 for p in (path, *args) if isinstance(p, self.project_path.__class__)]) nochange = init - test add_rename_reparent = test - init change_remove = test - add_rename_reparent cs = [(stage, path, make_closure(stage, op, path, args)) for stage, op, path, *args in pops] scs = sorted(cs, key=(lambda abc: (abc[0], len(abc[1].parts)))) will_fails = [] for stage, path, fun in scs: if stage > 1: will_fails.append(fun) else: fun() self._will_fails = will_fails self.dataset = pops[0][-1] class TestChanges(_ChangesHelper, _TestOperation, unittest.TestCase): def test_changes(self): from dateutil import parser as dateparser dataset = self.dataset dataset_id, id_name, parent_children, name_id, updated_transitive = dataset._read_indexes() # XXX updated_transitive from _read_indexes is a string because that is what # _transitive_changes needs internally and then transforms to a datetime object # when it returns, therefore we don't fiddle with the types here #tc = dataset._transitive_changes() # XXX see sparcur.simple.utils dataset_id, updated_cache_transitive, diff = dataset.diff() blob = { 'dataset-id': dataset_id.id, 'updated-transitive': updated_transitive, 'diff': diff, } pl = sxpyr.python_to_sxpr(blob, str_as_string=True) sxpr = pl._print(sxpyr.configure_print_plist(newline_keyword=False)) print(sxpr) #pl = sxpyr.python_to_sxpr(diff, str_as_string=True) #sxpr = pl._print(sxpyr.configure_print_plist(newline_keyword=False)) breakpoint() class _WorkflowHelper: def _do_workflow(self, paths_to_add): # push button, receive bacon # 0. asumme there are changes to a dataset # 1. click upload button in main window (python get diff) # argv-simple-diff -> sparcur.simple.utils for-racket diff -> path_dataset.diff() # 2. select specific paths for upload (python nothing) # racket side selects the list of files to push (push_list) that goes into paths.sxpr # which the python side then reads in the next step # 3. click confirm selection checkbox (python generate manifest) # (ensure-directory! (push-dir)) -> updated_transitive push_id # write-push-paths -> {:user-cache-path}/{dataset-uuid}/{updated-transitive}/{push-id}/paths.sxpr -> push_list # argv-simple-make-push-manifest -> sparcur.simple.utils for-racket make-push-manifest -> path_dataset.make_mush_manifest() # 4. click push selected to remote (python push from manifest) # argv-simple-push -> sparcur.simple.utils for-racket push -> path_dataset.push_from_manifest() # 5. TODO I think that after remote changes are made we probably want to create # a modified index file that notes the changes so that incremental changes # do not have to be pulled again ... of course if upstream has changed we are # back in the usual world of pain ... path_dataset = self.dataset # given dataset_id = path_dataset.cache.identifier # 1 __dataset_id, updated_transitive, diff = path_dataset.diff() # 2 # write the push_list to paths.sxpr push_id = path_dataset._write_push_list(dataset_id, updated_transitive, diff, paths_to_add) # 3 path_dataset.make_push_manifest(dataset_id, updated_transitive, push_id) # 4 if not self._local_only: # this fails without remote import pennsieve.api.agent try: path_dataset.push_from_manifest(dataset_id, updated_transitive, push_id) except pennsieve.api.agent.AgentError as e: log.exception(e) pytest.skip('pennsieve error') class TestWorkflow(_ChangesHelper, _WorkflowHelper, _TestOperation, unittest.TestCase): def test_workflow(self): # splitting d r l lets us test incremental changes paths_to_add_d = [ 'dire-1/dire-3-1-rn-r', # rn 'dire-2/dire-3-2-rp', # rp 'dire-2/dire-3-3-np-r', # rnp ] paths_to_add_r = [ 'dire-1/file-1-1-rn-r.ext', # rn 'dire-2/file-1-2-rp.ext', # rp 'dire-2/file-1-3-np-r.ext', # rnp ] paths_to_add_l = [ 'dire-1/link-1-1-rn-r.ext', # rn 'dire-2/link-1-2-rp.ext', # rp 'dire-2/link-1-3-np-r.ext', # rnp ] paths_to_add_2 = [ 'dire-6/file-4-add.ext', # should error for now ] self._do_workflow(paths_to_add_d) self._do_workflow(paths_to_add_r) self._do_workflow(paths_to_add_l) try: self._do_workflow(paths_to_add_2) raise AssertionError('should have failed due to forbidden ops') except ValueError: pass class TestWithRemoteWorkflow(TestWorkflow): _local_only = False class TestRemote(_TestOperation, unittest.TestCase): def test_remote_path_does_not_exist(self): new_thing = self.root / 'does not exist' @pytest.mark.skip('Not ready.') def test_cache_path_does_not_exist(self): """ This should not produce an error. Path objects should be able to be instantiated without po.exists() -> True at a point in time prior to instantiation. """ new_thing = self.anchor / 'does not exist' def __test_cache_path_fake_id(self): # FIXME the right way to do this is np = self.project_path / 'new-path' np.mkdir() npm = np.meta # bad way class FakeBase(aug.RemotePath): def __init__(self, id, name, cache=None): super().__init__(id, cache) self.name = name now = GetTimeNow() self.created = now._start_time self.updated = now._start_time self.checksum = 'lolnone' self.chunksize = 4096 self.file_id = 'asdfasdfasdf' @property def meta(self): return PathMeta(size=self.size, created=self.created, updated=self.updated, checksum=self.checksum, chunksize=self.chunksize, id=self.id, file_id=self.file_id) @property def parent(self): return None def _parts_relative_to(self, remote, cache_parent=None): return [self.name] # This takes way too many steps :/ Fake = FakeBase._new(LocalPath, aug.CachePath) fake = Fake('lol', 'double lol') self.anchor / fake
SciCrunch/sparc-curation
test/test_delete.py
test_delete.py
py
32,914
python
en
code
11
github-code
36
34005697680
from socket import socket from squaring_server.server import ADDRESS, PORT, DISCONNECT, decode, encode client = socket() client.connect((ADDRESS, PORT)) while True: msg = input('Your msg: ') client.send(encode(msg)) if msg == DISCONNECT: break print(f'Server: {decode(client.recv(1024))}') client.close()
siriusdevs/rpm_chat_2023
squaring_server/client.py
client.py
py
333
python
en
code
1
github-code
36
42600269199
# Script to mess around with User authenticated spotify API # For some reason, cannot authenticate with Google Chrome, so instead use Firefox # http://spotipy.readthedocs.io/en/latest/ from pathlib import Path from spotipy.oauth2 import SpotifyClientCredentials import json import spotipy import time import sys import spotipy.util as util import pandas as pd # Set up directory and read credentials directory = Path(__file__).resolve().parent infile = open(directory / "credentials.txt", 'r').readlines() username = "liltkrookie" scope = "user-top-read" sort = "tempo" print(infile[0]) # Send crendetials and ping Spotify API token = util.prompt_for_user_token(username,scope,client_id='8d3383fc5c434af5bf40fb7b2915c618',client_secret=infile[0],redirect_uri='http://localhost:8888/callback') client_credentials_manager = SpotifyClientCredentials(client_id='8d3383fc5c434af5bf40fb7b2915c618', client_secret=infile[0]) sp = spotipy.Spotify(auth=token) playlistdata = sp.current_user_top_tracks(limit=50, offset=0, time_range='medium_term') playlist_json = json.dumps(playlistdata,indent=4) track_list = json.loads(playlist_json) num = len(track_list['items']) tid=[] for i in range(0, num): uri =track_list['items'][i]['uri'] tid.append(uri) # Song Audiofeatures analysis = sp.audio_features(tid) sample_json = json.dumps(analysis) data = pd.read_json(sample_json) # print(data) # Song Metadata analysis2 = sp.tracks(tid) sample_json2 = json.dumps(analysis2) data2 = json.loads(sample_json2) songdata=[] songlabels=['song','uri','artist'] for i in range(0, num): name=data2['tracks'][i]['name'] uri =data2['tracks'][i]['uri'] artist =data2['tracks'][i]['album']['artists'][0]['name'] songdata.append([name, uri, artist]) song_metadata = pd.DataFrame.from_records(songdata, columns=songlabels) # print(song_metadata) # DataFrame merge export = pd.merge(song_metadata,data, how = 'outer', on =['uri']) writer = pd.ExcelWriter(directory / 'top_played_songs.xlsx') export.to_excel(writer,'Sheet1') writer.save() dfList = export['uri'].tolist() print ("Completed download and export of top played songs")
tkajikawa/spotify_api
spotify_test.py
spotify_test.py
py
2,133
python
en
code
0
github-code
36
16191137869
import datetime from django.utils import timezone from django.core.paginator import Paginator from django.db import transaction from django.db.models import Q from the_mechanic_backend.apps.accounts.models import Store from the_mechanic_backend.apps.stock.models import Brand, BrandModel, Spare, SpareCustomer, SpareOrder, SpareSold from the_mechanic_backend.v0.stock import serializers from the_mechanic_backend.v0.utils import Utils, CustomBaseClass, AppUtils class BrandList(CustomBaseClass): """ Brand List and create Endpoint """ def get(self, request): """ returns the list of brand :param request: :return: """ try: search = request.GET.get('search', '') if search: brands = self.get_filter_objects(Brand, name__icontains=search) else: brands = self.get_all_objects(Brand) serializer = serializers.BrandSerializer(brands, many=True) return Utils.dispatch_success(request, serializer.data) except Exception as e: return self.internal_server_error(request, e) def post(self, request): """ Creates a new brand :param request: { "name" : "Honda" } :return: """ try: serializer = serializers.BrandSerializer(data=request.data) if serializer.is_valid(): serializer.save() return Utils.dispatch_success(request, serializer.data) return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors) except Exception as e: return self.internal_server_error(request, e) class BrandModelList(CustomBaseClass): """ Brand Model List and create Endpoint """ def get(self, request, brand_id): """ Returnt the list of Models of particular brand :param request: :param brand_id: :return: """ try: search = request.GET.get('search', '') if search: brands = self.get_filter_objects(BrandModel, brand=brand_id, model_name__icontains=search) else: brands = self.get_filter_objects(BrandModel, brand=brand_id) serializer = serializers.BrandModelSerializer(brands, many=True) return Utils.dispatch_success(request, serializer.data) except Exception as e: return self.internal_server_error(request, e) def post(self, request, brand_id): """ Creates a new brand model :param request: { "model_name" : "Unicorn" } :param brand_id: :return: """ try: data = request.data data['brand'] = brand_id serializer = serializers.AddBrandModelSerializer(data=request.data) if serializer.is_valid(): serializer.save() return Utils.dispatch_success(request, serializer.data) return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors) except Exception as e: return self.internal_server_error(request, e) class SpareList(CustomBaseClass): def get(self, request, store_id, brand_model_id): """ return a list of spares for particular model :param request: @query_param search=search_text - to search the spares out_of_stock=true - to get only out of stock Note - we can use both at same time :) :param store_id :param brand_model_id: :return: """ try: search = request.GET.get('search') out_of_stock = request.GET.get('out_of_stock') spare = self.get_filter_objects(Spare, brand_model=brand_model_id, store=store_id) if search: spare = spare.filter(Q(spare_id__icontains=search) | Q(spare_name__icontains=search)) if out_of_stock: spare = spare.filter(quantity=0) serializer = serializers.SpareSerializer(spare, many=True) return Utils.dispatch_success(request, serializer.data) except Exception as e: return self.internal_server_error(request, e) def post(self, request, store_id, brand_model_id): """ Create a spare :param request: { "spare_name": "SIde Mirror", "spare_id": #34545435, "quantity": 10, "per_price": "500", "suppliers": "Glass India", "quality_class": "FIRST" } :param store_id: :param brand_model_id: :return: """ try: data = request.data brand_model = self.get_object(BrandModel, brand_model_id) if not brand_model: return self.object_not_found(request) data['brand'] = brand_model.brand.id data['store'] = store_id data['brand_model'] = brand_model_id serializer = serializers.AddSpareSerializer(data=request.data) if serializer.is_valid(): serializer.save() return Utils.dispatch_success(request, serializer.data) return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors) except Exception as e: return self.internal_server_error(request, e) class SpareDetails(CustomBaseClass): """ particular spare details """ def get(self, request, spare_id): """ Return requested spare :param request: :param spare_id: :return: """ try: spare = self.get_object(Spare, spare_id) if not spare: return self.object_not_found(request) serializer = serializers.SpareSerializer(spare) return Utils.dispatch_success(request, serializer.data) except Exception as e: return self.internal_server_error(request, e) def put(self, request, spare_id): """ Updates the requested spare :param request: # partial fields are also acceptable { "spare_name": "SIde Mirror", "spare_id": #34545435, "quantity": 10, "per_price": "500", "suppliers": "Glass India", "quality_class": "FIRST" } :param spare_id: :return: """ try: spare = self.get_object(Spare, spare_id) if not spare: return self.object_not_found(request) serializer = serializers.AddSpareSerializer(spare, request.data, partial=True) if serializer.is_valid(): serializer.save() return Utils.dispatch_success(request, serializer.data) return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors) except Exception as e: return self.internal_server_error(request, e) def delete(self, request, spare_id): """ Delete the request spare :param request: :param spare_id: :return: """ try: spare = self.get_object(Spare, spare_id) if not spare: return self.object_not_found(request) spare.delete() return Utils.dispatch_success(request, 'SUCCESS') except Exception as e: return self.internal_server_error(request, e) class SpareSearchList(CustomBaseClass): def get(self, request, store_id): """ return a list of spares for all models :param request: @query_param search=search_text - to search the spares out_of_stock=true - to get only out of stock Note - we can use both at same time :) :param store_id :param brand_model_id: :return: """ try: search = request.GET.get('search') out_of_stock = request.GET.get('out_of_stock') spare = self.get_filter_objects(Spare, store=store_id) if search: spare = spare.filter(Q(spare_id__icontains=search) | Q(spare_name__icontains=search)) if out_of_stock: spare = spare.filter(quantity=0) serializer = serializers.SpareSerializer(spare, many=True) return Utils.dispatch_success(request, serializer.data) except Exception as e: return self.internal_server_error(request, e) class SpareOrderList(CustomBaseClass): def get(self, request, store_id, *args, **kwargs): """ Returns the list of Spares based on Store :param request: # params start_date=2019-01-31&& end_date=2019-12-31&& page=1 :param store_id: :param args: :param kwargs: :return: """ try: start_date = request.GET.get('start_date') end_date = request.GET.get('end_date') page = request.GET.get('page', 1) search = request.GET.get('search', None) if search: qs = SpareOrder.objects.filter(store=store_id, order_id__icontains=search) else: if start_date: start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d') end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') + datetime.timedelta(days=1) qs = SpareOrder.objects.filter(store=store_id, order_date__range=[start_date, end_date]) paginator = Paginator(qs, per_page=10) serializer = serializers.SpareOrderHistorySerializer(paginator.page(page), many=True) response_data = { "data": serializer.data, "page": int(page), "total_pages": paginator.num_pages } return Utils.dispatch_success(request, response_data) except Exception as e: return self.internal_server_error(request, e) def post(self, request, store_id, *args, **kwargs): """ Create a new Order :param request: { "customer_info": { "name": "Muthu Kumar", "email": "itmemk@gmail.com", "phone_number": "9876543210", "address": "ADDRESSS" }, "order_type": "IN_SOURCE / OUT_SOURCE", "bike_number": "TN41Y5644", "labour_charge": 0.00, "out_source_charge": 0.00, "spares": [ { "spare_id": 1, "spare_price_type": 'MRP / MECHANIC / WHOLESALER / CUSTOMER', "spare_count": 2 }, { "spare_id": 1, "spare_price_type": 'MRP / MECHANIC / WHOLESALER / CUSTOMER', "spare_count": 2 }, { "spare_id": 1, "spare_price_type": 'MRP / MECHANIC / WHOLESALER / CUSTOMER', "spare_count": 2 } ] } :param store_id: :param args: :param kwargs: :return: """ try: data = request.data try: customer = SpareCustomer.objects.get(phone_number=data['customer_info']['phone_number']) except SpareCustomer.DoesNotExist: customer_serializer = serializers.SpareCustomerSerializer(data=data['customer_info']) if customer_serializer.is_valid(): customer_serializer.save() else: return Utils.dispatch_failure(request, "VALIDATION_ERROR", customer_serializer.errors) customer = SpareCustomer.objects.get(id=customer_serializer.data['id']) today = datetime.date.today() today_order_count = SpareOrder.objects.filter(order_date__year=today.year, order_date__month=today.month).count() order_id = 'SPOR{}{:05d}'.format(today.strftime("%Y%m"), today_order_count + 1) with transaction.atomic(): store = self.get_object(Store, store_id) share_message = f"You're successfully purchased following items from {store.name}, {store.branch}.\n" order = SpareOrder(order_id=order_id, store=store, order_type=data['order_type'], customer=customer, total=0.0, sold_by=request.user) order.save() total = 0.0 spares_to_be_created = [] for _spare in data['spares']: spare = self.get_object(Spare, _spare['spare_id']) price_map = { 'MRP': spare.mrp_price, 'MECHANIC': spare.mechanic_price, 'WHOLESALER': spare.wholesaler_price, 'CUSTOMER': spare.customer_price, } sold_spare = SpareSold(order=order, spare=spare, spare_count=_spare['spare_count'], spare_name=spare.spare_name, spare_buying_price=spare.buying_price, spare_price=price_map[_spare['spare_price_type']], spare_price_type=_spare['spare_price_type']) spares_to_be_created.append(sold_spare) current_total = float(sold_spare.spare_count * sold_spare.spare_price) total = total + current_total spare.quantity = spare.quantity - sold_spare.spare_count spare.save() share_message += f"{sold_spare.spare_name} -- {sold_spare.spare_count} x {sold_spare.spare_price} = {current_total}\n" SpareSold.objects.bulk_create(spares_to_be_created) if order.order_type: order.bike_number = data['bike_number'] order.labour_charge = data['labour_charge'] order.out_source_charge = data['out_source_charge'] total = total + order.labour_charge + order.out_source_charge share_message += f"Labour Charge = {order.labour_charge}\n\n" \ f"Out Source Charge = {order.out_source_charge}\n\n" order.total = total order.save() share_message += f"Grand total = {total}.\n\n" \ f"Order ID: {order_id}\n\n" \ f"Date: {today.strftime('%d-%m-%Y')}\n\nThank you for purchasing with us!" return Utils.dispatch_success(request, {'order_id': order.id, 'spareorder_id':order_id, 'share_info': share_message}) except Exception as e: return self.internal_server_error(request, e) class SparesAccountingView(CustomBaseClass): sell_report_type = ['IN_SELL', 'OUT_SELL', 'TOTAL_SELL'] profit_report_type = ['IN_PROFIT', 'OUT_PROFIT', 'TOTAL_PROFIT'] IN_SOURCE = ['IN_SELL', 'IN_PROFIT'] OUT_SOURCE = ['OUT_SELL', 'OUT_PROFIT'] def get_total(self, qs, report_type): """ parms ?start_date=2019-01-31&&end_date=2019-12-31&&stores=16&&report_type=TOTAL_SELL IN_SELL', 'OUT_SELL', 'TOTAL_SELL', 'IN_PROFIT', 'OUT_PROFIT', 'TOTAL_PROFIT :param qs: :param report_type: :return: """ total = 0.00 total_items = 0 spares = [] if report_type in self.IN_SOURCE: qs = qs.filter(order_type=SpareOrder.IN_SOURCE) if report_type in self.OUT_SOURCE: qs = qs.filter(order_type=SpareOrder.OUT_SOURCE) for order in qs: for spare in SpareSold.objects.filter(order=order): if report_type in self.sell_report_type: total = total + float(spare.spare_count * spare.spare_price) if report_type in self.profit_report_type: total = total + float(spare.spare_count * spare.spare_buying_price) spares.append(spare.spare) total_items += spare.spare_count return total, total_items, len(set(spares)) def get(self, request): try: stores = [int(x) for x in request.GET.get('stores', '').split(',')] start_date = request.GET.get('start_date') end_date = request.GET.get('end_date') report_type = request.GET.get('report_type') if start_date: start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d') end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') + datetime.timedelta(days=1) profit_map = { "IN_PROFIT": "IN_SELL", "OUT_PROFIT": "OUT_SELL", "TOTAL_PROFIT": "TOTAL_SELL", } qs = self.get_filter_objects(SpareOrder, store__in=stores, order_date__range=[start_date, end_date]) if not qs: return Utils.dispatch_success(request, "DATA_NOT_FOUND") if report_type in self.sell_report_type: selling_total, total_items, total_spares = self.get_total(qs, report_type) response_data = {"selling_total": selling_total, "total_items": total_items, "total_spares": total_spares} return Utils.dispatch_success(request, response_data) if report_type in self.profit_report_type: buying_total, total_items, total_spares = self.get_total(qs, report_type) selling_total, total_items, total_spares = self.get_total(qs, profit_map[report_type]) difference = selling_total - buying_total response_data = {"selling_total": selling_total, "buying_total": buying_total, "profit_total": abs(difference), "status": "LOSS" if difference < 0 else "PROFIT", "total_items": total_items, "total_spares": total_spares} return Utils.dispatch_success(request, response_data) return self.object_not_found(request) except Exception as e: return self.internal_server_error(request, e) class UrgentSpareList(CustomBaseClass): def get(self, request, store_id, *args, **kwargs): """ return list of urgent stock with pagination :param request: :param store_id: :param args: :param kwargs: :return: """ try: qs = self.get_filter_objects(Spare, store=store_id, is_urgent_spare=True) page = request.GET.get('page', 1) paginator = Paginator(qs, per_page=10) serializer = serializers.SpareSerializer(paginator.page(page), many=True) response_data = { "data": serializer.data, "page": int(page), "total_pages": paginator.num_pages } return Utils.dispatch_success(request, response_data) except Exception as e: return self.internal_server_error(request, e) def put(self, request, store_id, *args, **kwargs): """ Updates list of urgent stock with pagination :param request: { "spares":[{"id":23, "quantity": 20}, {"id":30, "quantity": 2}, {"id":11, "quantity": 12}, ] } :param store_id: :param args: :param kwargs: :return: """ try: spares_list = request.data["spares"] for _spare in spares_list: spare = self.get_object(Spare, _spare["id"]) spare.is_urgent_spare = False spare.quantity += _spare["quantity"] spare.save() return Utils.dispatch_success(request, 'SUCCESS') except Exception as e: return self.internal_server_error(request, e) class SpareOrderEmailPdf(CustomBaseClass): def get(self, request, order_id, *args, **kwargs): """ Returns PDF of the invoice or email's user :param request: @param action=email # to send email to customer @param action=download # to Download the invoice copy :param order_id: :param args: :param kwargs: :return: """ try: action = request.GET.get('action') order = self.get_object(SpareOrder, order_id) data = {} store = order.store data['store'] = { 'store_name': store.name.upper(), 'store_branch': store.branch.upper(), 'store_type': store.branch, 'store_address': store.address.replace(',', '\n'), 'store_phone': store.phone, 'store_email': store.email, 'store_website': store.website, } customer = order.customer data['customer'] = { 'name': customer.name, 'email': customer.email, 'phone_number': customer.phone_number, 'address': customer.address.replace(',', '\n') } data['order_id'] = order.order_id data['date'] = order.order_date.strftime('%d-%m-%Y %H:%M:%S') data['total'] = order.total data['type'] = order.order_type data['bike_number'] = order.bike_number data['labour_charge'] = order.labour_charge data['out_source_charge'] = order.out_source_charge data['sold_by'] = order.sold_by.first_name data['sub_total'] = order.total - order.labour_charge - order.out_source_charge response = { 'csv': Utils.generate_csv, 'xls': Utils.generate_xls, 'pdf': Utils.generate_pdf } data['order'] = [] for i, order_spare in enumerate(SpareSold.objects.filter(order=order)): data['order'].append([i + 1, order_spare.spare_name, order_spare.spare_price, order_spare.spare_count, order_spare.spare_price * order_spare.spare_count]) dynamic_data = { 'pdf_template': 'spare_invoice.html', 'filename': f'Invoice_{order.order_id}', 'data': data, 'action': action } if action == 'download': return response.get('pdf')(**dynamic_data) elif action == 'email': if customer.email: AppUtils.send_inovice_email(response.get('pdf')(**dynamic_data), data, f'Invoice_{order.order_id}' ) return Utils.dispatch_success(request, 'SUCCESS') return self.object_not_found(request) except Exception as e: return self.internal_server_error(request, e)
muthukumar4999/the-mechanic-backend
the_mechanic_backend/v0/stock/views.py
views.py
py
23,745
python
en
code
0
github-code
36
8975839640
import cv2 from keras.models import load_model import numpy as np video_capture = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_SIMPLEX # 读取人脸haar模型 face_detection = cv2.CascadeClassifier('model/face_detection/haarcascade_frontalface_default.xml') # 读取性别判断模型 gender_classifier = load_model('model/gender/simple_CNN.81-0.96.hdf5') # 读取情绪判别模型 emotion_classifier = load_model('model/emotion/simple_CNN.530-0.65.hdf5') gender_labels = {0: 'womam', 1: 'man'} emotion_labels = {0: 'angry', 1: 'disgust', 2: 'fear', 3: 'happy', 4: 'sad', 5: 'surprise', 6: 'neutral'} while True: # 读取摄像头的视频流 _, frame = video_capture.read() # 将视频流转换成灰度 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 检测人脸,产生坐标值 faces = face_detection.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: # 将性别判断出来 face = frame[(y - 60):(y + h + 60), (x - 30):(x + w + 30)] face = cv2.resize(face, (48, 48)) face = np.expand_dims(face, 0) face = face / 255.0 gender_label_arg = np.argmax(gender_classifier.predict(face)) gender = gender_labels[gender_label_arg] if gender == gender_labels[0]: gender_color = (255, 0, 0) else: gender_color = (0, 255, 0) gray_face = gray[(y - 40):(y + h + 40), (x - 20):(x + w + 20)] gray_face = cv2.resize(gray_face, (48, 48)) gray_face = gray_face / 255.0 gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion = emotion_labels[emotion_label_arg] cv2.rectangle(frame, (x, y), (x + w, y + h), gender_color, 2) cv2.putText(frame, gender, (x, y - 30), font, .7, gender_color, 1, cv2.LINE_AA) cv2.putText(frame, emotion, (x + 90, y - 30), font, .7, gender_color, 1, cv2.LINE_AA) cv2.imshow('face', frame) if cv2.waitKey(30) & ord('q') == 0xFF: break # 销毁视频流 video_capture.release() cv2.destroyAllWindows()
HadXu/machine-learning
face_detection_and_emotion/video_test.py
video_test.py
py
2,250
python
en
code
287
github-code
36
40892450132
import io from pathlib import Path import magic from django.conf import settings from smb.smb_structs import OperationFailure from smb.SMBConnection import SMBConnection def factory(): config = settings.SAMBA connection = SMBConnection( config["user"], config["password"], "abcd", config["server_name"], use_ntlm_v2=True, ) connection.connect(config["server_ip"], int(config["port"])) return connection def store_file_and_create_folders(conn, file_path, file_binary): file_path = Path(file_path) share = file_path.parts[1] folders = file_path.parts[2:-1] last_folder = "/" for folder in folders: last_folder += f"{folder}/" try: conn.createDirectory(share, last_folder) except OperationFailure: pass conn.storeFile(share, f'/{"/".join(file_path.parts[2:])}', file_binary) def list_path(conn, path): path_parts = Path(path).parts share = path_parts[1] folder = "/".join(path_parts[2:]) return conn.listPath(share, folder) def find_file_mime_type(conn, path): path_parts = Path(path).parts share = path_parts[1] file_path = "/".join(path_parts[2:]) # Read first 2048 bytes file_buffer = io.BytesIO() conn.retrieveFileFromOffset(share, file_path, file_buffer, max_length=2048) file_buffer.seek(0) return magic.from_buffer(file_buffer.read(), mime=True) def retrieve_file(conn, path): path_parts = Path(path).parts share = path_parts[1] file_path = "/".join(path_parts[2:]) file_buffer = io.BytesIO() conn.retrieveFile(share, file_path, file_buffer) file_buffer.seek(0) return file_buffer
pierrotlemekcho/exaged
sifapi/planning/samba.py
samba.py
py
1,707
python
en
code
0
github-code
36
74205781223
from typing import Iterable, Iterator from PIL import Image # type: ignore def resize_image_to_height(image: Image.Image, height: int) -> Image.Image: return image.resize(size=(int(image.width * (height / image.height)), height)) def concat_paired_images( left_image: Image.Image, right_image: Image.Image, mode: str ) -> Image.Image: dest_image_width = left_image.width + right_image.width dest_image_height = max(left_image.height, right_image.height) dest_image = Image.new(mode=mode, size=(dest_image_width, dest_image_height)) dest_image.paste(im=left_image, box=(0, 0)) dest_image.paste(im=right_image, box=(left_image.width, 0)) return dest_image def concat_images(images: Iterable[Image.Image], mode: str) -> Image.Image: images_itr = iter(images) return _concat_images(next(images_itr), images_itr, mode=mode) def _concat_images( first_image: Image.Image, rest_images_itr: Iterator[Image.Image], mode: str ) -> Image.Image: try: return concat_paired_images( left_image=first_image, right_image=_concat_images( first_image=next(rest_images_itr), rest_images_itr=rest_images_itr, mode=mode, ), mode=mode, ) except StopIteration: return first_image
yskuniv/python-simple-web-counter
simple_web_counter/utils/image/image.py
image.py
py
1,340
python
en
code
0
github-code
36
34998622413
import sys for line in sys.stdin: # print(line) # break arr=[int(x) for x in line.strip().split()] # print(arr) if arr[0]==0: for c in range(arr[4]): print("%s\t%s" % ((arr[1], c, arr[2] ), arr[3])) if arr[0]==1: for a in range(arr[4]): print("%s\t%s" % ((a, arr[2], arr[1] ), arr[3]))
keshavbnsl102/DS-assign2
2019101019_ass2/2019101019_1/mapper/mapper.py
mapper.py
py
378
python
en
code
0
github-code
36
72079445545
# 17124번 # 정수 배열 A와 B가 있다. A는 총 n개의 서로 다른 양의 정수를 포함하고 B는 총 m개의 서로 다른 양의 정수를 포함한다. A,B를 이용해서 길이가 n인 새로운 배열 C를 만들어보자. # 1 : C[i]는 배열 B에 있는 값중 A[i에 가장 가까운 값(절대값 차이가 가장 작은값)으로 정의된다. # 2 : 만약 이 조건을 만족하는 값들이 여럿 있는 경우, 그 중 가장 크기가 작은 값으로 정의된다. # 예를 들어, ,A=[20,5,14,9] 그리고 B=[16,8,12]라고 해보자 # C[1] = 16이다 - 왜냐하면 B[1] = 16이 A[1]=20에 가장 가깝기 땨문이다. C[2] = 8 , C[3] = 12 - 16과 12 둘다 14에 가깝지만 작은 수 이기때문, ㅊ[4] = 8이다 # 입력 : 첫 줄에 테스트 케이스의 수 t ( 1<= t <= 10) 이 주어진다. 각 테스트 케이스는 세줄에 걸쳐서 주어진다. # 첫 출에는 N과 M이 공백으로 구분되어 주어진다. (1<= N,M <= 10^6) / 두 번째 줄에는 공백으로 구분된 N개의 정수가 주어지며, A[1]부터 A[N]을 나타낸다 (1 ~ 10^9) # 세 번째 줄에는 공백으로 구분된 M 개의 정수가 주어지며, B[1]부터 B[M]을 나타낸다 ( 1 ~ 10^9) # 출력 : 각 테스트 케이스에 대해 배열 C를 구하고 해당 배열의 모든 원소 합을 한줄에 출력하시오. def findindex(one, two, three): index = 2 curr = three if two <= curr: index = 1 curr = two if one <= curr: index = 0 return index def binarysearch(item, B, start, end): diff = end - start if diff <= 1: return start mid = (end + start) // 2 if item > B[mid]: return binarysearch(item, B, mid, end) else: return binarysearch(item, B, start, mid) t = int(input()) rst = [] for _ in range(t): n, m = map(int, input().split()) A = [int(x) for x in input().split()] B = [int(x) for x in input().split()] cnt = 0 B.sort() for i in range(n): item = A[i] idx = binarysearch(item, B, 0, m) one = abs(item - B[idx-1]) two = abs(item - B[idx]) three = abs(item - B[(idx+1) % m]) index = findindex(one, two, three) cnt += B[idx - 1 + index] rst.append(cnt) for i in range(t): print(rst[i])
kkhhkk/Study-Algorithms
backjoon/17124.py
17124.py
py
2,335
python
ko
code
0
github-code
36
646138808
#Lista de Exercício 1 - Questão 5 #Dupla: 2020314273 - Cauã Alexandre Torres de Holanda e 2021327294 - Kallyne Ferro Veiga #Disciplina: Programação Web #Professor: Ítalo Arruda #5.Faça um Programa que converta metros para centímetros. class ConversorMedidas: def __init__(self, valor): self.valor = valor def metros_para_centimetros(self): try: centimetros = self.valor * 100 return centimetros except TypeError: return "Erro: O valor fornecido não é numérico." def main(): try: metros = float(input("Digite o valor em metros: ")) conversor = ConversorMedidas(metros) resultado = conversor.metros_para_centimetros() print("O valor em centímetros é:", resultado) except ValueError: print("Erro: O valor fornecido não é válido.") if __name__ == '__main__': main()
caalexandre/Revisao-Python-IFAL-2023-Caua-e-Kallyne
Lista1/l1q5KC-523.py
l1q5KC-523.py
py
901
python
pt
code
0
github-code
36
16173542417
# pylint: disable=missing-docstring """This is a script to test the RecurrentEncoder module.""" import pickle import pytest import torch import torch.nn as nn from metarl.torch.embeddings import RecurrentEncoder class TestRecurrentEncoder: """Test for RecurrentEncoder.""" # yapf: disable @pytest.mark.parametrize( 'input_dim, output_dim, hidden_sizes, num_tasks, num_seq', [ (1, 1, (1, ), 1, 3), (3, 3, (3, ), 1, 5), (5, 5, (5, 5), 2, 4), (7, 7, (7, 5, 7), 2, 5), (9, 9, (9, 7, 5, 9), 3, 10), ]) # yapf: enable def test_module(self, input_dim, output_dim, hidden_sizes, num_tasks, num_seq): """Test forward method.""" input_val = torch.ones((num_tasks, num_seq, input_dim), dtype=torch.float32) # last hidden size should match output size # output_dim is latent_dim module = RecurrentEncoder(input_dim=input_dim, output_dim=output_dim, hidden_nonlinearity=None, hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) module.reset(num_tasks=num_tasks) output = module(input_val) # maps input of shape (task, seq, input_dim) to (task, 1, output_dim) expected_shape = [num_tasks, 1, output_dim] assert all([a == b for a, b in zip(output.shape, expected_shape)]) # yapf: disable @pytest.mark.parametrize( 'input_dim, output_dim, hidden_sizes, num_tasks, num_seq', [ (1, 1, (1, ), 1, 3), (3, 3, (3, ), 1, 5), (5, 5, (5, 5), 2, 4), (7, 7, (7, 5, 7), 2, 5), (9, 9, (9, 7, 5, 9), 3, 10), ]) # yapf: enable def test_is_pickleable(self, input_dim, output_dim, hidden_sizes, num_tasks, num_seq): """Test is_pickeable.""" input_val = torch.ones((num_tasks, num_seq, input_dim), dtype=torch.float32) module = RecurrentEncoder(input_dim=input_dim, output_dim=output_dim, hidden_nonlinearity=None, hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) module.reset(num_tasks=num_tasks) output1 = module(input_val) h = pickle.dumps(module) module_pickled = pickle.loads(h) module_pickled.reset(num_tasks=num_tasks) output2 = module_pickled(input_val) assert torch.all(torch.eq(output1, output2))
icml2020submission6857/metarl
tests/metarl/torch/embeddings/test_recurrent_encoder.py
test_recurrent_encoder.py
py
2,849
python
en
code
2
github-code
36
42119246092
import pygame from settings import Settings from pygame.sprite import Sprite class Ship(Sprite): def __init__(self, ai_game): """initialize the ship and set its starting position""" super().__init__() self.screen = ai_game.screen self.settings = ai_game.settings self.screen_rect = ai_game.screen.get_rect() #$load ship img and its rect self.image = pygame.image.load('images/ship.bmp') self.rect = self.image.get_rect() #start ship at bottom self.rect.midbottom = self.screen_rect.midbottom #store a decimal value for the ships position self.x = float(self.rect.x) #flag to control movement self.moving_right = False self.moving_left = False def update(self): """updates position based on movement flag""" if self.moving_right and self.rect.right < self.screen_rect.right: self.x += self.settings.ship_speed if self.moving_left and self.rect.left > 0: self.x -= self.settings.ship_speed #update rect object from self.x self.rect.x = self.x def blitme(self): self.screen.blit(self.image, self.rect) def center_ship(self): """center the ship on the screen""" self.rect.midbottom = self.screen_rect.midbottom self.x = float(self.rect.x)
SylvainAroma/Alien-Invasion
ship.py
ship.py
py
1,202
python
en
code
0
github-code
36
33537990133
from tree import Tree #Tree tests complete t = Tree(3) print(t) a = t.addNode(0,4) b = t.addNode(0,5) c = t.addNode(a,6) d = t.addNode(a,7) e = t.addNode(d,8) print(t) print("Path to e:") p = t.pathToNode(e) for i in p: print("\t",t.getData(i)) print("Path to c") p = t.pathToNode(c) for i in p: print("\t",t.getData(i)) print("Leaves:") l = t.leaves() for i in l: print(t.getData(i))
zanda8893/Student-Robotics
code/tree-test.py
tree-test.py
py
400
python
en
code
0
github-code
36
73335007463
from datetime import datetime from .setup import config, logger def bibcodes(): try: with open(config.get('CLASSIC_CANONICAL_FILE'), "r") as f: bibcodes = [line.strip() for line in f] except: logger.exception("Unable to retreive bibcodes from classic") return [] else: return bibcodes def compare(classic_bibcodes, db_bibcodes, solr_bibcodes): """Compare bibcode lists against classic""" results = {} batch = {} now = datetime.utcnow() prefix = "{:04}{:02}{:02}_{:02}{:02}".format(now.year, now.month, now.day, now.hour, now.minute) if len(classic_bibcodes) > 0: classic_bibcodes = set(classic_bibcodes) if len(db_bibcodes) > 0: db_bibcodes = set(db_bibcodes) extra_in_db = db_bibcodes.difference(classic_bibcodes) extra_in_db = [e for e in extra_in_db if "zndo" not in e] # Filter out non-classic Zenodo records missing_in_db = classic_bibcodes.difference(db_bibcodes) results['extra_in_db'] = len(extra_in_db) results['missing_in_db'] = len(missing_in_db) else: extra_in_db = set() missing_in_db = set() if len(solr_bibcodes) > 0: solr_bibcodes = set(solr_bibcodes) extra_in_solr = solr_bibcodes.difference(classic_bibcodes) extra_in_solr = [e for e in extra_in_solr if "zndo" not in e] # Filter out non-classic Zenodo records missing_in_solr = classic_bibcodes.difference(solr_bibcodes) results['extra_in_solr'] = len(extra_in_solr) results['missing_in_solr'] = len(missing_in_solr) else: extra_in_solr = set() missing_in_solr = set() batch.update({ "{}_extra_in_db".format(prefix): extra_in_db, "{}_missing_in_db".format(prefix): missing_in_db, "{}_extra_in_solr".format(prefix): extra_in_solr, "{}_missing_in_solr".format(prefix): missing_in_solr, }) return results, batch
adsabs/ADSStatsCollector
statscollector/classic.py
classic.py
py
2,065
python
en
code
0
github-code
36
34076302112
from sklearn.base import BaseEstimator, TransformerMixin import numpy as np import sys ''' The key concept in CSP is to find a set of spatial filters (components) that optimally discriminate between the two classes. These filters are represented by the eigenvectors obtained in the 'fit' method. When you apply the CSP transformation to a new data sample in the 'transform' method, it projects the data onto these filters. The result is that the transformed data has enhanced features that maximize the differences in variances between the two classes. This makes it easier to classify the data based on the most discriminative spatial patterns. ''' class CSPTransformer(BaseEstimator, TransformerMixin): def __init__(self, nb_components=4): ''' In practice, it's common to try a range of values for the number of components (e.g., from 2 to 10) and evaluate their impact on classification accuracy. The choice of the optimal number of components may involve a trade-off between enhanced discriminative power and computational efficiency. ''' self.nb_components = nb_components self.filters = np.array([]) self.x = np.array([]) self.y = np.array([]) def fit(self, x, y): if self.x.size == 0: self.x = x self.y = y class_labels = np.unique(y) if len(class_labels) != 2: print("CSPTransformer: Error: CSP is a binary classification method: there should be two class labels.", file=sys.stderr) exit() x_class1 = x[y == class_labels[0]] #take all values related to one class-label x_class2 = x[y == class_labels[1]] ''' Get covariance matrices for each class. A covariance matrix is a square matrix giving the covariance between each element pair in a vector Covariance is the mean value of the product of the deviations of two variates from their respective means. A positive covariance indicates that both variates tend to be high or low at same time (similar direction) while a negative covariance indicates that if one variate is high the other will be low (opposite direction). ''' cov1 = np.cov(x_class1, rowvar=False) cov2 = np.cov(x_class2, rowvar=False) ''' Get the 'eigenvalues' and 'eigenvectors' by solving the 'generalized eigenvalue problem' The 'generalized eigenvalue problem' is a mathematical problem that arises in various fields and is an extendion of the 'standard eigenvalue problem'. In the 'standard eigenvalue problem', you are given a square matrix A, and you want to find scalars λ (eigenvalues) and corresponding vectors x (eigenvectors) that satisfy the equation: A * x = λ * x. The 'eigenvalues' represent the scaling factors, and the 'eigenvectors' represent the directions along which the matrix A scales or rotates. The 'generalized eigenvalue problem' extends the concept to two matrices, A and B. Given two square matrices A and B, you want to find scalars λ (generalized eigenvalues) and corresponding vectors x (generalized eigenvectors) that satisfy the equation: A * x = λ * B * x. Here we will use the 'eigenvectors' as transformation matrices that will maximize the variance of one class and minimize the variance of another class with the goal of maximizing the differences between the two classes. ''' eigenvalues, eigenvectors = np.linalg.eig(np.dot(np.linalg.pinv(np.add(cov1, cov2)), cov1)) ''' Now we will sort eigenvalues and corresponding eigenvectors in descending order. Thus sort the eigenvalues and align the corresponding eigenvectors based on the eigenvalue magnitudes. This will allow us to select the first eigenvectors and use those as CSP filters. Because they come first they will be associated with the highest eigenvalue magnitudes and thus be the most discriminative. ''' ascending_indices = np.argsort(eigenvalues) descending_indices = np.flip(ascending_indices) eigenvalues = eigenvalues[descending_indices] eigenvectors = eigenvectors[:, descending_indices] #reorder the columns (eigenvectors) of the eigenvectors matrix self.filters = eigenvectors[:, :self.nb_components].real.astype(np.float32) return self def partial_fit(self, x, y): self.x = np.concatenate((self.x, x), axis=0) self.y = np.concatenate((self.y, y), axis=0) return self.fit(self.x, self.y) def transform(self, x): if self.filters.size == 0: print("CSPTransformer: Error: use the 'fit' method to find the filters before using 'transform' method", file=sys.stderr) exit() x_csp = np.dot(x, self.filters) return x_csp
artainmo/total_perspective_vortex
processing_EEGs_lib/dimensionality_reduction_algorithm.py
dimensionality_reduction_algorithm.py
py
4,957
python
en
code
0
github-code
36
33571337998
#!/usr/bin/env python3 from subprocess import Popen, PIPE, STDOUT from threading import Thread from time import sleep import logging import os import sys # Very simple tee logic implementation. You can specify shell command, output # logfile and env variables. After TeePopen is created you can only wait until # it finishes. stderr and stdout will be redirected both to specified file and # stdout. class TeePopen: # pylint: disable=W0102 def __init__(self, command, log_file, env=os.environ.copy(), timeout=None): self.command = command self.log_file = log_file self.env = env self.process = None self.timeout = timeout def _check_timeout(self): sleep(self.timeout) while self.process.poll() is None: logging.warning( "Killing process %s, timeout %s exceeded", self.process.pid, self.timeout, ) os.killpg(self.process.pid, 9) sleep(10) def __enter__(self): self.process = Popen( self.command, shell=True, universal_newlines=True, env=self.env, start_new_session=True, # signall will be sent to all children stderr=STDOUT, stdout=PIPE, bufsize=1, ) self.log_file = open(self.log_file, "w", encoding="utf-8") if self.timeout is not None and self.timeout > 0: t = Thread(target=self._check_timeout) t.daemon = True # does not block the program from exit t.start() return self def __exit__(self, t, value, traceback): for line in self.process.stdout: sys.stdout.write(line) self.log_file.write(line) self.process.wait() self.log_file.close() def wait(self): for line in self.process.stdout: sys.stdout.write(line) self.log_file.write(line) return self.process.wait()
ByConity/ByConity
tests/ci/tee_popen.py
tee_popen.py
py
2,012
python
en
code
1,352
github-code
36
28890197379
"""PDB dataset loader.""" import tree import numpy as np import torch import pandas as pd import logging import random import functools as fn from torch.utils import data from data import utils as du from openfold.data import data_transforms from openfold.np import residue_constants from openfold.utils import rigid_utils class PdbDataset(data.Dataset): def __init__( self, *, data_conf, diffuser, is_training, ): self._log = logging.getLogger(__name__) self._is_training = is_training self._data_conf = data_conf self._init_metadata() self._diffuser = diffuser @property def is_training(self): return self._is_training @property def diffuser(self): return self._diffuser @property def data_conf(self): return self._data_conf def _init_metadata(self): """Initialize metadata.""" # Process CSV with different filtering criterions. filter_conf = self.data_conf.filtering pdb_csv = pd.read_csv(self.data_conf.csv_path) self.raw_csv = pdb_csv if filter_conf.allowed_oligomer is not None and len(filter_conf.allowed_oligomer) > 0: pdb_csv = pdb_csv[pdb_csv.oligomeric_detail.isin( filter_conf.allowed_oligomer)] if filter_conf.max_len is not None: pdb_csv = pdb_csv[pdb_csv.modeled_seq_len <= filter_conf.max_len] if filter_conf.min_len is not None: pdb_csv = pdb_csv[pdb_csv.modeled_seq_len >= filter_conf.min_len] if filter_conf.max_helix_percent is not None: pdb_csv = pdb_csv[ pdb_csv.helix_percent < filter_conf.max_helix_percent] if filter_conf.max_loop_percent is not None: pdb_csv = pdb_csv[ pdb_csv.coil_percent < filter_conf.max_loop_percent] if filter_conf.min_beta_percent is not None: pdb_csv = pdb_csv[ pdb_csv.strand_percent > filter_conf.min_beta_percent] if filter_conf.subset is not None: pdb_csv = pdb_csv[:filter_conf.subset] pdb_csv = pdb_csv.sort_values('modeled_seq_len', ascending=False) self._create_split(pdb_csv) def _create_split(self, pdb_csv): # Training or validation specific logic. if self.is_training: self.csv = pdb_csv self._log.info( f'Training: {len(self.csv)} examples') else: all_lengths = np.sort(pdb_csv.modeled_seq_len.unique()) length_indices = (len(all_lengths) - 1) * np.linspace( 0.0, 1.0, self._data_conf.num_eval_lengths) length_indices = length_indices.astype(int) eval_lengths = all_lengths[length_indices] eval_csv = pdb_csv[pdb_csv.modeled_seq_len.isin(eval_lengths)] # Fix a random seed to get the same split each time. eval_csv = eval_csv.groupby('modeled_seq_len').sample( self._data_conf.samples_per_eval_length, replace=True, random_state=123) eval_csv = eval_csv.sort_values('modeled_seq_len', ascending=False) self.csv = eval_csv self._log.info( f'Validation: {len(self.csv)} examples with lengths {eval_lengths}') @fn.lru_cache(maxsize=50000) def _process_csv_row(self, processed_file_path): processed_feats = du.read_pkl(processed_file_path) processed_feats = du.parse_chain_feats(processed_feats) # Only take modeled residues. modeled_idx = processed_feats['modeled_idx'] min_idx = np.min(modeled_idx) max_idx = np.max(modeled_idx) del processed_feats['modeled_idx'] processed_feats = tree.map_structure( lambda x: x[min_idx:(max_idx+1)], processed_feats) # Run through OpenFold data transforms. chain_feats = { 'aatype': torch.tensor(processed_feats['aatype']).long(), 'all_atom_positions': torch.tensor(processed_feats['atom_positions']).double(), 'all_atom_mask': torch.tensor(processed_feats['atom_mask']).double() } chain_feats = data_transforms.atom37_to_frames(chain_feats) chain_feats = data_transforms.make_atom14_masks(chain_feats) chain_feats = data_transforms.make_atom14_positions(chain_feats) chain_feats = data_transforms.atom37_to_torsion_angles()(chain_feats) # Re-number residue indices for each chain such that it starts from 1. # Randomize chain indices. chain_idx = processed_feats["chain_index"] res_idx = processed_feats['residue_index'] new_res_idx = np.zeros_like(res_idx) new_chain_idx = np.zeros_like(res_idx) all_chain_idx = np.unique(chain_idx).tolist() shuffled_chain_idx = np.array( random.sample(all_chain_idx, len(all_chain_idx))) - np.min(all_chain_idx) + 1 for i,chain_id in enumerate(all_chain_idx): chain_mask = (chain_idx == chain_id).astype(np.int) chain_min_idx = np.min(res_idx + (1 - chain_mask) * 1e3).astype(np.int) new_res_idx = new_res_idx + (res_idx - chain_min_idx + 1) * chain_mask # Shuffle chain_index replacement_chain_id = shuffled_chain_idx[i] new_chain_idx = new_chain_idx + replacement_chain_id * chain_mask # To speed up processing, only take necessary features final_feats = { 'aatype': chain_feats['aatype'], 'seq_idx': new_res_idx, 'chain_idx': chain_idx, 'residx_atom14_to_atom37': chain_feats['residx_atom14_to_atom37'], 'residue_index': processed_feats['residue_index'], 'res_mask': processed_feats['bb_mask'], 'atom37_pos': chain_feats['all_atom_positions'], 'atom37_mask': chain_feats['all_atom_mask'], 'atom14_pos': chain_feats['atom14_gt_positions'], 'rigidgroups_0': chain_feats['rigidgroups_gt_frames'], 'torsion_angles_sin_cos': chain_feats['torsion_angles_sin_cos'], } return final_feats def _create_diffused_masks(self, atom37_pos, rng, row): bb_pos = atom37_pos[:, residue_constants.atom_order['CA']] dist2d = np.linalg.norm(bb_pos[:, None, :] - bb_pos[None, :, :], axis=-1) # Randomly select residue then sample a distance cutoff # TODO: Use a more robust diffuse mask sampling method. diff_mask = np.zeros_like(bb_pos) attempts = 0 while np.sum(diff_mask) < 1: crop_seed = rng.integers(dist2d.shape[0]) seed_dists = dist2d[crop_seed] max_scaffold_size = min( self._data_conf.scaffold_size_max, seed_dists.shape[0] - self._data_conf.motif_size_min ) scaffold_size = rng.integers( low=self._data_conf.scaffold_size_min, high=max_scaffold_size ) dist_cutoff = np.sort(seed_dists)[scaffold_size] diff_mask = (seed_dists < dist_cutoff).astype(float) attempts += 1 if attempts > 100: raise ValueError( f'Unable to generate diffusion mask for {row}') return diff_mask def __len__(self): return len(self.csv) def __getitem__(self, idx): # Sample data example. example_idx = idx csv_row = self.csv.iloc[example_idx] if 'pdb_name' in csv_row: pdb_name = csv_row['pdb_name'] elif 'chain_name' in csv_row: pdb_name = csv_row['chain_name'] else: raise ValueError('Need chain identifier.') processed_file_path = csv_row['processed_path'] chain_feats = self._process_csv_row(processed_file_path) # Use a fixed seed for evaluation. if self.is_training: rng = np.random.default_rng(None) else: rng = np.random.default_rng(idx) gt_bb_rigid = rigid_utils.Rigid.from_tensor_4x4( chain_feats['rigidgroups_0'])[:, 0] diffused_mask = np.ones_like(chain_feats['res_mask']) if np.sum(diffused_mask) < 1: raise ValueError('Must be diffused') fixed_mask = 1 - diffused_mask chain_feats['fixed_mask'] = fixed_mask chain_feats['rigids_0'] = gt_bb_rigid.to_tensor_7() chain_feats['sc_ca_t'] = torch.zeros_like(gt_bb_rigid.get_trans()) # Sample t and diffuse. if self.is_training: t = rng.uniform(self._data_conf.min_t, 1.0) diff_feats_t = self._diffuser.forward_marginal( rigids_0=gt_bb_rigid, t=t, diffuse_mask=None ) else: t = 1.0 diff_feats_t = self.diffuser.sample_ref( n_samples=gt_bb_rigid.shape[0], impute=gt_bb_rigid, diffuse_mask=None, as_tensor_7=True, ) chain_feats.update(diff_feats_t) chain_feats['t'] = t # Convert all features to tensors. final_feats = tree.map_structure( lambda x: x if torch.is_tensor(x) else torch.tensor(x), chain_feats) final_feats = du.pad_feats(final_feats, csv_row['modeled_seq_len']) if self.is_training: return final_feats else: return final_feats, pdb_name class LengthSampler(data.Sampler): def __init__( self, *, data_conf, dataset, ): self._data_conf = data_conf self._dataset = dataset self._data_csv = self._dataset.csv def __iter__(self): return iter(range(len(self._data_csv))) def __len__(self): return len(self._data_csv) class TrainSampler(data.Sampler): def __init__( self, *, data_conf, dataset, batch_size, ): self._data_conf = data_conf self._dataset = dataset self._data_csv = self._dataset.csv self._dataset_indices = list(range(len(self._data_csv))) self._data_csv['index'] = self._dataset_indices self._batch_size = batch_size self.epoch = 0 def __iter__(self): random.shuffle(self._dataset_indices) repeated_indices = np.repeat(self._dataset_indices, self._batch_size) return iter(repeated_indices) def set_epoch(self, epoch): self.epoch = epoch def __len__(self): return len(self._dataset_indices) * self._batch_size
blt2114/twisted_diffusion_sampler
protein_exp/data/pdb_data_loader.py
pdb_data_loader.py
py
10,693
python
en
code
11
github-code
36
40435532047
import unittest from src.entities.event import Event from src.repos.in_memory.data_store import DataStore from src.repos.in_memory.in_memory_event_repo import InMemoryEventRepo class TestInMemoryEventRepo(unittest.TestCase): def setUp(self): data_store = DataStore() self.event_repo = InMemoryEventRepo(data_store) self.game_id = 1 self.events = [Event(None, self.game_id, f'event{i}', i, False) for i in range(11)] self.event_repo.create_events(self.game_id, list(map(lambda e: e.desc, self.events))) def assertEventsEqual(self, e1: Event, e2: Event): self.assertEqual(e1.desc, e2.desc) self.assertEqual(e1.index, e2.index) self.assertEqual(e1.is_hit, e2.is_hit) def test_read_all(self): events = self.event_repo.read_all_events(self.game_id) for e_write, e_read in zip(self.events, events): self.assertEventsEqual(e_write, e_read) def test_update_event(self): new_event = self.events[0] new_event.desc += 'new' new_event.is_hit = True self.event_repo.update_event(self.game_id, new_event) events = self.event_repo.read_all_events(self.game_id) self.assertEventsEqual( sorted(events, key=lambda event: event.index)[new_event.index], new_event)
mchlzhao/discord-bingo-bot
tests/unit/repos/in_memory/test_in_memory_event_repo.py
test_in_memory_event_repo.py
py
1,392
python
en
code
0
github-code
36
42998147166
from __future__ import annotations import time from datetime import date, datetime, timedelta from logging import getLogger from time import struct_time from typing import Any, Callable import pytz from .compat import IS_WINDOWS from .constants import is_date_type_name, is_timestamp_type_name from .converter import ( ZERO_EPOCH, SnowflakeConverter, _adjust_fraction_of_nanoseconds, _extract_timestamp, _generate_tzinfo_from_tzoffset, ) from .sfbinaryformat import SnowflakeBinaryFormat, binary_to_python from .sfdatetime import SnowflakeDateFormat, SnowflakeDateTime, SnowflakeDateTimeFormat logger = getLogger(__name__) def format_sftimestamp( ctx: dict[str, Any], value: datetime | struct_time, franction_of_nanoseconds: int ) -> str: sf_datetime = SnowflakeDateTime( datetime=value, nanosecond=franction_of_nanoseconds, scale=ctx.get("scale") ) return ctx["fmt"].format(sf_datetime) if ctx.get("fmt") else str(sf_datetime) class SnowflakeConverterSnowSQL(SnowflakeConverter): """Snowflake Converter for SnowSQL. Format data instead of just converting the values into native Python objects. """ def __init__(self, **kwargs) -> None: super().__init__(**kwargs) self._support_negative_year = kwargs.get("support_negative_year", True) def _get_format(self, type_name: str) -> str: """Gets the format.""" fmt = None if type_name == "DATE": fmt = self._parameters.get("DATE_OUTPUT_FORMAT") if not fmt: fmt = "YYYY-MM-DD" elif type_name == "TIME": fmt = self._parameters.get("TIME_OUTPUT_FORMAT") elif type_name + "_OUTPUT_FORMAT" in self._parameters: fmt = self._parameters[type_name + "_OUTPUT_FORMAT"] if not fmt: fmt = self._parameters["TIMESTAMP_OUTPUT_FORMAT"] elif type_name == "BINARY": fmt = self._parameters.get("BINARY_OUTPUT_FORMAT") return fmt # # FROM Snowflake to Python objects # # Note: Callable doesn't implement operator| def to_python_method( self, type_name: str, column: dict[str, Any] ) -> Callable | None: ctx = column.copy() if ctx.get("scale") is not None: ctx["max_fraction"] = int(10 ** ctx["scale"]) ctx["zero_fill"] = "0" * (9 - ctx["scale"]) fmt = None if is_date_type_name(type_name): datetime_class = time.struct_time if not IS_WINDOWS else date fmt = SnowflakeDateFormat( self._get_format(type_name), support_negative_year=self._support_negative_year, datetime_class=datetime_class, ) elif is_timestamp_type_name(type_name): fmt = SnowflakeDateTimeFormat( self._get_format(type_name), data_type=type_name, support_negative_year=self._support_negative_year, datetime_class=SnowflakeDateTime, ) elif type_name == "BINARY": fmt = SnowflakeBinaryFormat(self._get_format(type_name)) logger.debug("Type: %s, Format: %s", type_name, fmt) ctx["fmt"] = fmt converters = [f"_{type_name}_to_python"] for conv in converters: try: return getattr(self, conv)(ctx) except AttributeError: pass logger.warning("No column converter found for type: %s", type_name) return None # Skip conversion def _BOOLEAN_to_python(self, ctx): """No conversion for SnowSQL.""" return lambda value: "True" if value in ("1", "True") else "False" def _FIXED_to_python(self, ctx): """No conversion for SnowSQL.""" return None def _REAL_to_python(self, ctx): """No conversion for SnowSQL.""" return None def _BINARY_to_python(self, ctx): """BINARY to a string formatted by BINARY_OUTPUT_FORMAT.""" return lambda value: ctx["fmt"].format(binary_to_python(value)) def _DATE_to_python(self, ctx: dict[str, str | None]) -> Callable: """Converts DATE to struct_time/date. No timezone is attached. """ def conv(value: str) -> str: return ctx["fmt"].format(time.gmtime(int(value) * (24 * 60 * 60))) def conv_windows(value): ts = ZERO_EPOCH + timedelta(seconds=int(value) * (24 * 60 * 60)) return ctx["fmt"].format(date(ts.year, ts.month, ts.day)) return conv if not IS_WINDOWS else conv_windows def _TIMESTAMP_TZ_to_python(self, ctx: dict[str, Any]) -> Callable: """Converts TIMESTAMP TZ to datetime. The timezone offset is piggybacked. """ scale = ctx["scale"] max_fraction = ctx.get("max_fraction") def conv0(encoded_value: str) -> str: value, tz = encoded_value.split() microseconds = float(value) tzinfo = _generate_tzinfo_from_tzoffset(int(tz) - 1440) try: t = datetime.fromtimestamp(microseconds, tz=tzinfo) except OSError as e: logger.debug("OSError occurred but falling back to datetime: %s", e) t = ZERO_EPOCH + timedelta(seconds=microseconds) if pytz.utc != tzinfo: t += tzinfo.utcoffset(t) t = t.replace(tzinfo=tzinfo) fraction_of_nanoseconds = _adjust_fraction_of_nanoseconds( value, max_fraction, scale ) return format_sftimestamp(ctx, t, fraction_of_nanoseconds) def conv(encoded_value: str) -> str: value, tz = encoded_value.split() microseconds = float(value[0 : -scale + 6]) tzinfo = _generate_tzinfo_from_tzoffset(int(tz) - 1440) try: t = datetime.fromtimestamp(microseconds, tz=tzinfo) except (OSError, ValueError) as e: logger.debug("OSError occurred but falling back to datetime: %s", e) t = ZERO_EPOCH + timedelta(seconds=microseconds) if pytz.utc != tzinfo: t += tzinfo.utcoffset(t) t = t.replace(tzinfo=tzinfo) fraction_of_nanoseconds = _adjust_fraction_of_nanoseconds( value, max_fraction, scale ) return format_sftimestamp(ctx, t, fraction_of_nanoseconds) return conv if scale > 6 else conv0 def _TIMESTAMP_LTZ_to_python(self, ctx: dict[str, Any]) -> Callable: def conv(value: str) -> str: t, fraction_of_nanoseconds = self._pre_TIMESTAMP_LTZ_to_python(value, ctx) return format_sftimestamp(ctx, t, fraction_of_nanoseconds) return conv def _TIMESTAMP_NTZ_to_python(self, ctx: dict[str, Any]) -> Callable: """Converts TIMESTAMP NTZ to Snowflake Formatted String. No timezone info is attached. """ def conv(value: str) -> str: microseconds, fraction_of_nanoseconds = _extract_timestamp(value, ctx) try: t = time.gmtime(microseconds) except (OSError, ValueError) as e: logger.debug("OSError occurred but falling back to datetime: %s", e) t = ZERO_EPOCH + timedelta(seconds=(microseconds)) return format_sftimestamp(ctx, t, fraction_of_nanoseconds) return conv _TIME_to_python = _TIMESTAMP_NTZ_to_python
snowflakedb/snowflake-connector-python
src/snowflake/connector/converter_snowsql.py
converter_snowsql.py
py
7,534
python
en
code
511
github-code
36
24369028230
import os import time import argparse from datetime import datetime from utils.CpuMonitor import CpuMonitor from utils.GpuMonitor import GpuMonitor from utils.Recoder import Recoder from utils.Printer import print_info, print_err, print_warn def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--cpu-only', help='Only record CPU temperature', action='store_true') parser.add_argument('--sample-interval', help='Specify the data retrieve interval in seconds. Default is 10 seconds', type=int, default=10) parser.add_argument('--log-file', help='Record CSV file. Default is ./statistic.csv', type=str, default='./statistic.csv') return parser.parse_args() def main(): args = get_args() sample_interval = args.sample_interval log_file = args.log_file log = Recoder(log_file) cpu_tester = None gpu_tester = None try: if not args.cpu_only: print_info("[INFO] Start to record GPU/CPU temperature in every {} sec.".format(str(sample_interval))) cpu_tester = CpuMonitor() gpu_tester = GpuMonitor() log.generate_table_header(cpu_tester.num, gpu_tester.num) else: print_warn("[WARN] Only collect CPU Temperature.") print_info("[INFO] Start to record CPU temperature in every {} sec.\033[0m".format(str(sample_interval))) cpu_tester = CpuMonitor() log.generate_table_header(cpu_tester.num) while True: timestamp = str(datetime.now()).split('.')[0] data_row = [timestamp] data_row.extend(cpu_tester.get_statistics()) if gpu_tester is not None: data_row.extend(gpu_tester.get_statistics()) log.write_record_file(','.join(data_row)) time.sleep(sample_interval) except KeyboardInterrupt: print() print_err("[ERROR] Keyboard Interrupted, temperature recording program exit.") exit() if __name__ == '__main__': main()
Huang-Junchen/hardware-tester
no_gui.py
no_gui.py
py
2,093
python
en
code
0
github-code
36
73694828905
# This program is a print queue simulator. # The program reads instructions off of a web page # pertaining to addition, removal, and printing of documents # as well as showing contents of the print queue # Assignment 2 for CISC 121, Summer 2017 # Author: Andy Wang import urllib.request ''' Reads instructions from a web page using the urlopen function in the urllib.request module. Places each line of instructions in list cmdList. Continues reading until an empty bytes variable is read. Returns cmdList ''' def readHtml(): html=b'temp' cmdList=[] response = urllib.request.urlopen("http://research.cs.queensu.ca/home/cords2/printsim.txt") while html!=b'': html = response.readline() data = html.decode('utf-8').split() cmdList.append(data) cmdList.remove([]) #removes the "decoded" empty bytes variable return cmdList ''' Calculates time reequired to process a document based on format, colour/bw, and pages. Parameter docInfo is the info for the relevant document. Returns -1 if file format is invalid (not pdf, docx, pptx, or jpg) Otherwise, returns estimated time ''' def calcTime(docInfo): time=0 if docInfo["type"]=="pdf": if docInfo["colour"]=="bw": time=time+4*int(docInfo["pages"]) else: time=time+30*int(docInfo["pages"]) elif docInfo["type"]=="pptx" or docInfo["type"]=="docx": if docInfo["colour"]=="bw": time=time+6*int(docInfo["pages"]) else: time=time+20*int(docInfo["pages"]) elif docInfo["type"]=="jpg": if docInfo["colour"]=="bw": time=time+10*int(docInfo["pages"]) else: time=time+60*int(docInfo["pages"]) else: return -1 #returns -1 for an invalid file format return time ''' Used to determine where to place a new document in queue. Parameters include the current queue and estimated time for the new document. Returns "head" if the document should be placed at the front. Otherwise, returns n, being the position in queue ''' def findPlace(currentQ,newTime): n=0 ptr=currentQ while ptr["data"]["time"]<=newTime and ptr["next"]!=None: ptr=ptr["next"] n=n+1 if n==0 and ptr["data"]["time"]>newTime: return "head" elif ptr["data"]["time"]>newTime: #in case the current ptr location contains a slower document n=n-1 return n ''' Function used to add a new document to queue. Parameters include info about the new document and the current queue. Ignores document if file format is invalid. Adds document based on position found using findPlace(). Special cases exist for the first document or a cleared/empty queue. Returns updated currentQ. ''' def submit(docInfo,currentQ): docData={"data":{"id":docInfo[1],"type":docInfo[2],"pages":docInfo[3],"colour":docInfo[4]}} docTime=calcTime(docData["data"]) docData["data"]["time"]=docTime if docTime==-1: #ignore document if file format is invalid pass else: print("Adding job",docData["data"]["id"],"to the queue. It will require",docTime,"seconds to process.") ptr=currentQ if currentQ==None: #special case for cleared queue currentQ={} currentQ["data"]=docData["data"] currentQ["next"]=None elif "data" not in currentQ: #special case for first document docData["next"]=None currentQ=docData else: position=findPlace(currentQ,docTime) if position=="head": docData["next"]=ptr currentQ=docData else: counter=0 while counter<position: #counts to n based on findPlace() ptr=ptr["next"] counter=counter+1 docData["next"]=ptr["next"] ptr["next"]=docData return currentQ ''' Shows the current print queue. Parameter is the current queue Prints the number of items in the queue and estimated total time. Optional lines to show all document data or job IDs ''' def queue(currentQ): #prints a linked list if currentQ==None: print("The queue is empty") else: ptr=currentQ printTime=0 inQueue=0 while ptr["next"]!=None: print(ptr["data"]) #optional line - shows all info for queue items #print(ptr["data"]["id"]) #optional line - shows specific queue items inQueue=inQueue+1 printTime=printTime+int(ptr["data"]["time"]) ptr=ptr["next"] print(ptr["data"]) #optional line - shows all info for queue #print(ptr["data"]["id"]) #optional line - shows specific queue item printTime=printTime+int(ptr["data"]["time"]) inQueue=inQueue+1 print("There are",inQueue,"items in queue. Estimated total print time is",printTime,"seconds.") ''' Function used to "print" first document in queue. Parameter is the current queue. Prints and removes the first document. Returns updated queue. ''' def printDoc(currentQ): if currentQ==None: print("There are no documents in queue") else: print("Printing job",currentQ["data"]["id"]) currentQ=currentQ["next"] return currentQ ''' Function used to removea specific document from the queue. Parameters include ID for document to be removed and current queue. Specific case when document to be removed is at the head. Returns updated queue. ''' def removeDoc(cmdInfo,currentQ): ptr=currentQ prevLoc={} print("Removing job",cmdInfo[1]) if cmdInfo[1]==ptr["data"]["id"] and prevLoc=={}: currentQ=currentQ["next"] elif cmdInfo[1]!=ptr["data"]["id"] and prevLoc=={}: prevLoc=currentQ ptr=ptr["next"] while ptr["next"]!=None: if cmdInfo[1]==ptr["data"]["id"]: prevLoc["next"]=ptr["next"] break ptr=ptr["next"] prevLoc=prevLoc["next"] return currentQ ''' Top level function. Determines command and calls appropriate function. Optional line allows pausing between each command ''' def main(): cmdList=readHtml() currentQ={"next":None} for i in range(len(cmdList)): command=cmdList[i][0] if command=="submit": currentQ=submit(cmdList[i],currentQ) elif command=="queue": queue(currentQ) elif command=="print": currentQ=printDoc(currentQ) elif command=="remove": currentQ=removeDoc(cmdList[i],currentQ) #input("Press enter to continue to the next command") print("") main()
AndyHFW/CISC-121-Assignments
W3 - Print Queue Simulator.py
W3 - Print Queue Simulator.py
py
6,842
python
en
code
0
github-code
36
14913237067
from __future__ import annotations import datetime from dataclasses import dataclass, field import random from typing import List, Dict, Any from chess_manager.M import turn_model MAX_STR_LEN = 122 def check_valid_int(user_int_input: Any) -> bool: """ Vérifie si l'input de l'utilisateur est un int valide. """ try: int(user_int_input) return True except ValueError: return False def check_valid_str(user_str_input: Any) -> bool | str: """ Vérifie si l'input de l'utilisateur est un str valide. """ if not 0 < len(user_str_input) < MAX_STR_LEN: return f"You must enter from 1 to {MAX_STR_LEN} char." return True TOURNAMENT_FORM_VALIDATOR: Dict = { 'name': check_valid_str, 'place': check_valid_str, 'description': check_valid_str, 'turn_nbr': check_valid_int, 'player_nbr': check_valid_int } def _shuffle_player_list(player_list: List) -> List: """Retourne une nouvelle liste mélangée""" randomised_list = player_list[:] random.shuffle(randomised_list) return randomised_list def _order_player_by_score(full_player_data: List) -> List: full_player_data.sort(key=lambda individual_player_data: individual_player_data[1], reverse=True) return full_player_data def _reset_player_pairing(ordered_player_data: List) -> List: """Visite chaque joueur de la liste reçue et en reinitialise le compteur d'adversaires""" for player_data in ordered_player_data: player_data[0].clear_player_pairing() return ordered_player_data def _make_player_pair(ordered_player_data: List) -> List: """ Reçoit une liste de (joueurs, score) et retourne une liste de paire (joueurs, score) classé par score décroissant. Si tous les joueurs ont déjà joué les uns contre les autres, les historiques de pairage sont réinitialisés """ player_pairs = list() working_list = ordered_player_data[:] adversary_index = 0 current_pairing_player = working_list.pop(0) while len(working_list) > 0: if current_pairing_player[0].has_played_against(working_list[adversary_index][0]): adversary_index += 1 if adversary_index >= len(working_list): print("Cannot make more player pair without player playing each other again") return _make_player_pair(_reset_player_pairing(ordered_player_data[:])) continue player_pairs.append((current_pairing_player, working_list.pop(adversary_index))) adversary_index = 0 if len(working_list) < 1: break current_pairing_player = working_list.pop(0) return player_pairs @dataclass class TournamentM: """Représentation d'un tournoi d'échec""" name: str place: str turn_nbr: int = 4 description: str = "No description" player_nbr: int = 0 players: List = field(default_factory=list) turn_list: List = field(default_factory=list) start_date: str | None = None end_date: str | None = None tournament_id: int = -1 def __post_init__(self) -> None: if self.start_date is None: self.start_date = datetime.datetime.now().strftime("%d/%m/%y %H:%M") def register_turn(self, turn: turn_model.TurnM) -> None: self.turn_list.append(turn) def get_current_turn_nbr(self) -> int: return len(self.turn_list) @property def is_finished(self) -> bool: if self.end_date is not None: return True if self.get_current_turn_nbr() < self.turn_nbr: return False return self.turn_list[-1].finished def end_tournament(self) -> None: if self.end_date is not None: return self.end_date = datetime.datetime.now().strftime("%d/%m/%y %H:%M") def get_next_turn_player_pair(self) -> List: if self.get_current_turn_nbr() > 0: return _make_player_pair(_order_player_by_score(self.turn_list[-1].get_turn_data())) player_list = _shuffle_player_list(self.players) player_data = [[player, 0] for player in player_list] return _make_player_pair(_order_player_by_score(player_data)) def from_obj_to_dict(self) -> Dict: return {'name': self.name, 'place': self.place, 'turn_nbr': int(self.turn_nbr), 'description': self.description, 'player_nbr': int(self.player_nbr), 'players': [player.player_id for player in self.players], 'turn_list': [turn.turn_id for turn in self.turn_list], 'start_date': self.start_date, 'end_date': self.end_date, 'tournament_id': self.tournament_id, }
AntoineArchy/Chessmanager
chess_manager/M/tournament_model.py
tournament_model.py
py
4,751
python
en
code
null
github-code
36
15491681290
# App Libraries import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output, State import dash_bootstrap_components as dbc import plotly.graph_objs as go import base64 # Replication strategy library from Asian_Option_CRR import * # Input of rep strat descriptions from inputDescriptions import list_input # Creating the app object from Dash library app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP], #theme for modern-looking buttons, sliders, etc external_scripts=['https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.4/MathJax.js?config=TeX-MML-AM_CHTML', "./assets/mathjax.js"], #usage of LaTeX in the app meta_tags=[{"content": "width=device-width"}] #content gets adapted to user device width ) server = app.server # Author parameters bg_color="#506784", font_color="#F3F6FA" author = "Michel Vanderhulst" emailAuthor = "michelvanderhulst@hotmail.com" supervisor = "Prof. Frédéric Vrins" emailSupervisor = "frederic.vrins@uclouvain.be" logo1path = "./pictures/1200px-Louvain_School_of_Management_logo.svg.png" logo1URL = "https://uclouvain.be/en/faculties/lsm" logo2path = "./pictures/1280px-NovaSBE_Logo.svg.png" logo2URL = "https://www2.novasbe.unl.pt/en/" # Creating the app header def header(): return html.Div( id='app-page-header', children=[ html.Div(children=[html.A(id='lsm-logo', children=[html.Img(style={'height':'6%', 'width':'6%'}, src='data:image/png;base64,{}'.format(base64.b64encode(open(f"{logo1path}", 'rb').read()).decode()))], href=f"{logo1URL}", target="_blank", #open link in new tab style={"margin-left":"10px"} ), html.Div(children=[html.H5("Asian option replication strategy app"), html.H6("Cox-Ross-Rubinstein model") ], style={"display":"inline-block", "font-family":'sans-serif','transform':'translateY(+32%)', "margin-left":"10px"}), html.Div(children=[dbc.Button("About", id="popover-target", outline=True, style={"color":"white", 'border': 'solid 1px white'}), dbc.Popover(children=[dbc.PopoverHeader("About"), dbc.PopoverBody([f"{author}", f"\n {emailAuthor}", html.Hr(), f"This app was built for my Master's Thesis, under the supervision of {supervisor} ({emailSupervisor})."]), ], id="popover", is_open=False, target="popover-target"), ], style={"display":"inline-block","font-family":"sans-serif","marginLeft":"55%", "margin-right":"10px"}), html.A(id="nova-logo", children=[html.Img(style={"height":"9%","width":"9%"}, src="data:image/png;base64,{}".format(base64.b64encode(open(f"{logo2path}","rb").read()).decode()))], href=f"{logo2URL}", target="_blank", style={} ) ] ,style={"display":"inline-block"}), ], style={ 'background': bg_color, 'color': font_color, "padding-bottom": "10px", "padding-top":"-10px" } ) # Creating the app body def body(): return html.Div(children=[ html.Div(id='left-column', children=[ dcc.Tabs( id='tabs', value='About this App', children=[ dcc.Tab( label='About this App', value='About this App', children=html.Div(children=[ html.Br(), html.H4('What is this app?', style={"text-align":"center"}), html.P(f"""This app computes the replication strategy of Asian options on a set of given inputs, in the Cox-Ross-Rubinstein framework"""), html.P(f"""The goal is to showcase that under the Cox-Ross-Rubinstein model assumptions (see "Model" tab), the price \(V_0\) given by the pricing formula is "arbitrage-free". Indeed, we show that in this case, it is possible to build a strategy that"""), html.Ul([html.Li("Can be initiated with \(V_0\) cash at time \(0\)."), html.Li('Is self-financing (i.e., no need to "feed" the strategy with extra cash later'), html.Li("Will deliver exactly the payoff of the option at maturity") ]), html.Hr(), html.P([""" The considered options are Asian options paying \(\psi(T)\) at maturity \(T\) where \(\psi(X)\) is the payoff function. Defining \(S_{ave}(T)\) as the underlying asset average price, we have \ that for a call, the payoff function is \(\psi(T)=max(0,S_{ave}(T)-K)\) and for a put \(\psi(S_T)=max(0,K-S_{ave}(T))\) where K is the strike price."""]), html.Hr(), html.P("""Read more about options: https://en.wikipedia.org/wiki/Option_(finance)"""), ]) ), dcc.Tab( label="Model", value="Model", children=[html.Div(children=[ html.Br(), html.H4("Model assumptions", style={"text-align":"center"}), "Its main assumptions are:", html.Ul([html.Li("Does not consider dividends and transaction costs"), html.Li("The volatility and risk-free rate are assumed constant"), html.Li("Fraction of shares can be traded"), html.Li("The underlying asset can only either go 'up' by a fixed factor \(u<1\) or 'down' by \(0<d<1\)."), html.Li("The log-returns are independent at all periods")]), html.Hr(), html.H4("Underlying asset dynamics", style={"text-align":"center"}), html.P([ """ Under CRR, the underlying asset follows a geometric random walk with drift \(\mu\delta\) and volatility \(\sigma\sqrt{\delta}\). The probability to go \ 'up' and 'down' are respectively \(p\) and \(q=1-p\) (under \(\mathcal{P}\)).The stock price at period \(i\) can be modeled as a function of a binomial \ random variable, and the constant 'up' and 'down' factors computed: $$u=e^{\mu\delta+\sigma\sqrt{\delta}}$$ $$d=e^{\mu\delta-\sigma\sqrt{\delta}}$$ \ The \(\mathcal{Q}\)-probability allowing the discounted stock price to be a martingale amounts to the \(\\tilde{p}\) value (under \(\mathcal{Q}\)) \ that leads to the martingale property: \(\\tilde{p}=\\frac{e^{r}-d}{u-d}\). """]), html.Hr(), html.H4("Option price", style={"text-align":"center"}), html.P([""" With the CRR, the stock tree and the option intrinsic value are easily computed at all nodes. Under the pricing measure \(\mathcal{Q}\), \ the option price of a node is simply the discounted value of the two children nodes. The price tree is therefore filled backwards, starting from the leaves (i.e. the payoff).\ The pricing formula is thus $$V_i=e^{-r\\delta}(V_{i+1}\\tilde{p}+V_{i+1}\\tilde{q})$$ """]), html.Hr(), html.H4("Academic references", style={"text-align":"center"}), html.Ul([html.Li("Vrins, F. (2020). Course notes for LLSM2225: Derivatives Pricing. (Financial Engineering Program, Louvain School of Management, Université catholique de Louvain)"), html.Li("Shreve, S. E. (2004). Stochastic Calculus for Finance I The Binomial Asset Pricing Model (2nd ed.). Springer Finance.") ]), ])]), # # dcc.Tab( label="Appr-oach", value="Methodology", children=[html.Div(children=[ html.Br(), html.H4("Methodology followed", style={"text-align":"center"}), html.P([ """ To prove that the risk-neutral price is arbitrage-free, let us try to perfectly replicate it with a strategy. If the strategy is successfull, then the price is unique and therefore arbitrage-free. For an Asian option, we will also denote with \(s_0\) the stock price at time 0 and \(Y_n=\sum_{k=0}^{n}s_k\) the sum of the stock prices between times zero and n. From there, the payoff at time 3 will be \((\\frac{1}{4}Y_{3}-K)^+\) with strike K. Then, let \(V_n(s,y)\) be the price of the Asian option at node n if \(s_n=s\) and \(Y_n=y\). """]), html.Hr(), html.H4("Replicating portfolio", style={"text-align":"center"}), html.P([ """ Let us start a replication strategy based on the option price: \(\Pi_{0} = V_{0}(s,y)\). The portfolio is composed of a cash account and a equity account. At each period, the number of shares to hold is given by $$\Delta_{n}(s,y) = \\frac{V_{n+1}(us, y + us)-V_{n+1}(ds, y + ds)}{(u-d)s}$$ The initial amount of cash will be \(c_{0} = \Pi_{0} - \Delta_{0}(s,y)s_{0}\). At each node, a portfolio rebalancing is needed to ensure that the portfolio value is equal to the option price. Before the rebalancing, \(\Delta\) is the same from node to node, the cash account grew at the risk-free rate \(c_{n}=c_{n-1}e^{r}\), and the portfolio is the sum of both equity and cash positions $$\Pi_{n} = c_{n}+\Delta_{n}(s,y)s_{n}$$ The rebalancing is done by updating the shares to hold $$\Delta_{n}(s,y) = \\frac{V_{n+1}(us, y + us)-V_{n+1}(ds, y + ds)}{(u-d)s}$$ and ensuring that the value of the strategy before and after the rebalancing is the same $$c_{n}=\pi_{n}-(\Delta_{n-1}-\Delta_{n})s_{n}$$ The tree is computed forward, and will at all times replicate with option price. At the end of it we obtain the option payoff. """]), ])]), # # dcc.Tab( label='Input', value='Input', children=html.Div(children=[ html.Br(), # html.P( """ Hover your mouse over any input to get its definition. """ ), dcc.Dropdown( id='CallOrPut', options=[{'label':'Asian Call option', 'value':"Call"}, {'label':'Asian Put option', 'value':"Put"}], value='Call'), # html.Br(), # html.Div(children=[html.Label('Spot price', title=list_input["Spot price"], style={'font-weight': 'bold', "text-align":"left", "width":"25%",'display': 'inline-block'} ), dcc.Input(id="S", value=100, type='number', style={"width":"16%", 'display': 'inline-block'}), html.P("",id="message_S", style={"font-size":12, "color":"red", "padding":5, 'width': '55%', "text-align":"left", 'display': 'inline-block'}) ] ), html.Div(children=[html.Label("Strike", title=list_input["Strike"], style={'font-weight': 'bold',"text-align":"left", "width":"25%",'display': 'inline-block'} ), dcc.Input(id="K", value=100, type='number', style={"width":"16%", 'display': 'inline-block'}), html.P("",id="message_K", style={"font-size":12, "color":"red", "padding":5, 'width': '55%', "text-align":"left", 'display': 'inline-block'}) ], ), html.Div(children=[html.Label("Drift", title=list_input["Drift"], style={'font-weight': 'bold', 'display': 'inline-block'}), html.Label(id="drift", style={'display': 'inline-block'}), ]), # dcc.Slider(id='mu', min=-0.30, max=0.30, value=0.10, step=0.01, marks={-0.30: '-30%', 0:"0%", 0.30: '30%'}), # html.Div([html.Label('Volatility', title=list_input["Volatility"], style={'font-weight': 'bold', "display":"inline-block"}), html.Label(id="sigma", style={"display":"inline-block"}),]), # dcc.Slider(id='vol', min=0, max=0.5, step=0.01, value=0.15, marks={0:"0%", 0.25:"25%", 0.50:"50%"}), # html.Div([html.Label('Risk-free rate', title=list_input["Risk-free rate"], style={'font-weight': 'bold', "display":"inline-block"}), html.Label(id="riskfree", style={"display":"inline-block"}),]), dcc.Slider(id='Rf', min=0, max=0.1, step=0.01, value=0.05, marks={0:"0%", 0.05:"5%", 0.1:"10%"}), # html.Div([html.Label('Maturity', title=list_input["Maturity"], style={'font-weight':'bold', "display":"inline-block"}), html.Label(id="matu", style={"display":"inline-block"}),]), dcc.Slider(id='T', min=0.25, max=5, marks={0.25:"3 months", 3:"3 years", 5:"5 years"}, step=0.25, value=3), # html.Br(), html.Div(children=[html.Label('Tree periods: ', title=list_input["Tree periods"], style={'font-weight': 'bold', "text-align":"left", "width":"30%",'display': 'inline-block'} ), dcc.Input(id="tree_periods", value=3, type='number', style={"width":"16%", 'display': 'inline-block'}), html.P("",id="message_tree", style={"font-size":12, "color":"red", "padding":5, 'width': '40%', "text-align":"left", 'display': 'inline-block'}) ], ), ])), ],),], style={'float': 'left', 'width': '25%', 'margin':"30px"}), ]) # Creating the app graphs def graphs(): return html.Div(id='right-column', children=[ html.Br(), html.Div([ html.Div(children=[dcc.Markdown(children=''' #### Cumulative sum of stock'''), dcc.Graph(id='option_intrinsic'),], style={"float":"right", "width":"45%", "display":"inline-block"}), html.Div(children=[dcc.Markdown(children=''' #### Stock simulation (GRW) '''), dcc.Graph(id='stock_simul'),], style={"float":"right", "width":"55%", "display":"inline-block"}), ]), html.Div([ html.Div(children=[dcc.Markdown(children=''' #### Option price'''), dcc.Graph(id='option_price'),], style={"float":"right", "width":"45%", "display":"inline-block"}), html.Div(children=[dcc.Markdown(children=''' #### Portfolio after rebalancing'''), dcc.Graph(id='port_details'),], style={"float":"right", "width":"55%", "display":"inline-block"}), ]), html.Div([ html.Div(children=[dcc.Markdown(children=''' #### Cash account after rebalancing'''), dcc.Graph(id='cash_acc'),], style={"float":"right", "width":"45%", "display":"inline-block"}), html.Div(children=[dcc.Markdown(children=''' #### Shares held after rebalancing'''), dcc.Graph(id='nbr_shares'),], style={"float":"right", "width":"55%", "display":"inline-block"}), ]), ], style={'float': 'right', 'width': '70%'}) # Building together the app layout: header, body and graphs app.layout = html.Div( id='main_page', children=[ dcc.Store(id='memory-output'), header(), body(), graphs(), ], ) # App interactivity 1: calling the replication strategy everytime the user changes an input @app.callback( Output('memory-output', 'data'), [Input('CallOrPut', 'value'), Input("S","value"), Input("K", "value"), Input("Rf", "value"), Input("T","value"), Input("mu","value"), Input("vol", "value"), Input("tree_periods", "value"),]) def get_rep_strat_data(CallOrPut, S, K, Rf,T,mu,vol,tree_periods): nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = RepStrat_Asian_Option_CRR(CallOrPut, S, K, Rf, T, mu, vol, tree_periods) return nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown # App interactivity 2: plot of stock simulation + CRR u, d, probUp & probDown values @app.callback( Output('stock_simul', 'figure'), [Input('memory-output', 'data'),]) def graph_stock_simul(data): nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = data return{ 'layout': go.Layout( title={'yref':"paper", 'y':1, "yanchor":"bottom"}, #margin={"t":15}, margin=dict( l=0, #r=50, #b=100, t=15, #pad=4 ), # showlegend=False, xaxis={'showgrid': False, # thin lines in the background 'zeroline': False, # thick line at x=0 'visible': False,}, # numbers below} yaxis={'showgrid': False, # thin lines in the background 'zeroline': False, # thick line at x=0 'visible': False,}, # numbers below} legend=dict( x=0, y=0.8, traceorder='normal', bgcolor='rgba(0,0,0,0)'), ), 'data': [ go.Scatter( x=edge_x, y=edge_y, mode='lines', line=dict(width=0.5), hoverinfo='none', showlegend=False, ), go.Scatter( x=node_x, y=node_y, mode='markers+text', marker=dict(size=40), text=stocksLabel, showlegend=False, hoverinfo='none', ), go.Scatter( x=[None], y=[None], mode='markers', name=f'Up factor: {u}' ), go.Scatter( x=[None], y=[None], mode='markers', name=f'Down factor: {d}' ), go.Scatter( x=[None], y=[None], mode='markers', name=f'Prob up: {probUp}' ), go.Scatter( x=[None], y=[None], mode='markers', name=f'Prob down: {probDown}' ), ], } # App interactivity 3: plot of portfolio (cash + equity accounts) @app.callback( Output('port_details', 'figure'), [Input('memory-output', 'data'),]) def graph_portf_details(data): nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = data return{ 'layout': go.Layout( title={'yref':"paper", 'y':1, "yanchor":"bottom"}, showlegend=False, margin=dict( l=0, #r=50, #b=100, t=15, #pad=4 ), xaxis={'showgrid': False, # thin lines in the background 'zeroline': False, # thick line at x=0 'visible': False,}, # numbers below} yaxis={'showgrid': False, # thin lines in the background 'zeroline': False, # thick line at x=0 'visible': False,} # numbers below} ), 'data': [ go.Scatter( x=edge_x, y=edge_y, mode='lines', line=dict(width=0.5), hoverinfo='none', ), go.Scatter( x=node_x, y=node_y, mode='markers+text', marker=dict(size=40), text=portfolioLabel, hoverinfo='none', ), ], } # App interactivity 4: plot of number of shares to hold at all nodes @app.callback( Output('nbr_shares', 'figure'), [Input('memory-output', 'data'),]) def graph_nbr_of_shares(data): nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = data return{ 'layout': go.Layout( title={'yref':"paper", 'y':1, "yanchor":"bottom"}, showlegend=False, margin=dict( l=0, #r=50, #b=100, t=15, #pad=4 ), xaxis={'showgrid': False, # thin lines in the background 'zeroline': False, # thick line at x=0 'visible': False,}, # numbers below} yaxis={'showgrid': False, # thin lines in the background 'zeroline': False, # thick line at x=0 'visible': False,} # numbers below} ), 'data': [ go.Scatter( x=edge_x, y=edge_y, mode='lines', line=dict(width=0.5), hoverinfo='none', ), go.Scatter( x=node_x, y=node_y, mode='markers+text', marker=dict(size=40), text=nbrofsharesLabel, hoverinfo='none', ), ], } # App interactivity 5: cash account @app.callback( Output('cash_acc', 'figure'), [Input('memory-output', 'data'),]) def graph_cash_account(data): nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = data return{ 'layout': go.Layout( title={'yref':"paper", 'y':1, "yanchor":"bottom"}, showlegend=False, margin=dict( l=0, #r=50, #b=100, t=15, #pad=4 ), xaxis={'showgrid': False, # thin lines in the background 'zeroline': False, # thick line at x=0 'visible': False,}, # numbers below} yaxis={'showgrid': False, # thin lines in the background 'zeroline': False, # thick line at x=0 'visible': False,} # numbers below} ), 'data': [ go.Scatter( x=edge_x, y=edge_y, mode='lines', line=dict(width=0.5), hoverinfo='none', ), go.Scatter( x=node_x, y=node_y, mode='markers+text', marker=dict(size=40), text=cashLabel, hoverinfo='none', ), ], } # App interactivity 6: option price through risk-neutral valuation @app.callback( Output('option_price', 'figure'), [Input('memory-output', 'data'),]) def graph_option_pricee(data): nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = data return{ 'layout': go.Layout( title={'yref':"paper", 'y':1, "yanchor":"bottom"}, showlegend=False, margin=dict( l=0, #r=50, #b=100, t=15, #pad=4 ), xaxis={'showgrid': False, # thin lines in the background 'zeroline': False, # thick line at x=0 'visible': False,}, # numbers below} yaxis={'showgrid': False, # thin lines in the background 'zeroline': False, # thick line at x=0 'visible': False,} # numbers below} ), 'data': [ go.Scatter( x=edge_x, y=edge_y, mode='lines', line=dict(width=0.5), hoverinfo='none', ), go.Scatter( x=node_x, y=node_y, mode='markers+text', marker=dict(size=40), text=optionpriceLabel, hoverinfo='none', ), ], } # App interactivity 7: cumulative sum of stock price for the asian option average @app.callback( Output('option_intrinsic', 'figure'), [Input('memory-output', 'data'),]) def graph_option_cumsum(data): nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = data return{ 'layout': go.Layout( title={'yref':"paper", 'y':1, "yanchor":"bottom"}, showlegend=False, margin=dict( l=0, #r=50, #b=100, t=15, #pad=4 ), xaxis={'showgrid': False, # thin lines in the background 'zeroline': False, # thick line at x=0 'visible': False,}, # numbers below} yaxis={'showgrid': False, # thin lines in the background 'zeroline': False, # thick line at x=0 'visible': False,} # numbers below} ), 'data': [ go.Scatter( x=edge_x, y=edge_y, mode='lines', line=dict(width=0.5), hoverinfo='none', ), go.Scatter( x=node_x, y=node_y, mode='markers+text', marker=dict(size=40), text=intrinsicLabel, hoverinfo='none', ), ], } # App input checks @app.callback(Output('message_S', 'children'), [Input('S', 'value')]) def check_input_S(S): if S<0: return f'Cannot be lower than 0.' else: return "" @app.callback(Output('message_K', 'children'), [Input('K', 'value')]) def check_input_K(K): if K<0: return f'Cannot be lower than 0.' else: return "" @app.callback(Output('message_tree', 'children'), [Input('tree_periods', 'value')]) def check_input_K(tree__periods): if tree__periods<1: return f'Cannot be lower than 1.' else: return "" # App input visuals @app.callback(Output('drift', 'children'), [Input('mu', 'value')]) def display_value(value): return f': {int(value*100)}%' @app.callback(Output('sigma', 'children'), [Input('vol', 'value')]) def display_value2(value): return f': {int(value*100)}%' @app.callback(Output('riskfree', 'children'), [Input('Rf', 'value')]) def display_value3(value): return f': {int(value*100)}%' @app.callback(Output('matu', 'children'), [Input('T', 'value')]) def display_value4(value): if value==0.25 or value==0.5 or value==0.75: return f": {int(value*12)} months" elif value == 1: return f': {value} year' else: return f': {value} years' # Opens the "About" button top right @app.callback( Output("popover", "is_open"), [Input("popover-target", "n_clicks")], [State("popover", "is_open")], ) def toggle_popover(n, is_open): if n: return not is_open return is_open # Main function, runs the app if __name__ == '__main__': app.run_server(debug=True)
MichelVanderhulst/web-app-asian-option-crr
app.py
app.py
py
33,229
python
en
code
0
github-code
36
70970695463
# -*- coding: utf-8 -*- """ Created on Fri Dec 4 11:20:09 2020 @author: KANNAN """ from flask import Flask, render_template, request import emoji #import sklearn import pickle model = pickle.load(open("diabetes_logreg.pkl", "rb")) app = Flask(__name__) @app.route('/') def home(): return render_template('Diabetes.html') @app.route('/predict', methods = ["GET", "POST"]) def Diabetes(): if request.method == "POST": Glucose = float(request.form["Glucose"]) if (Glucose > 0 and Glucose < 140): Glucose_Prediabetes = 0 Glucose_Diabetes = 0 elif(Glucose >= 140 and Glucose < 200): Glucose_Prediabetes = 1 Glucose_Diabetes = 0 else: Glucose_Prediabetes = 0 Glucose_Diabetes = 1 BloodPressure = float(request.form["BloodPressure"]) if (BloodPressure > 0 and BloodPressure < 80): BloodPressure_Hyper_St1 = 0 BloodPressure_Hyper_St2 = 0 BloodPressure_Hyper_Emer = 0 elif (BloodPressure >=80 and BloodPressure < 90): BloodPressure_Hyper_St1 = 1 BloodPressure_Hyper_St2 = 0 BloodPressure_Hyper_Emer = 0 elif (BloodPressure >= 90 and BloodPressure < 120): BloodPressure_Hyper_St1 = 0 BloodPressure_Hyper_St2 = 1 BloodPressure_Hyper_Emer = 0 else: BloodPressure_Hyper_St1 = 0 BloodPressure_Hyper_St2 = 0 BloodPressure_Hyper_Emer = 1 BMI = float(request.form["BMI"]) if (BMI > 0 and BMI < 18.5): BMI_Normal = 0 BMI_Overweight = 0 BMI_Obese = 0 elif (BMI >= 18.5 and BMI < 24.9): BMI_Normal = 1 BMI_Overweight = 0 BMI_Obese = 0 elif (BMI >= 24.9 and BMI < 29.9): BMI_Normal = 0 BMI_Overweight = 1 BMI_Obese = 0 else: BMI_Normal = 0 BMI_Overweight = 0 BMI_Obese = 1 Insulin = float(request.form["Insulin"]) if (Insulin >= 100 and Insulin <= 126): Insulin_Normal = 1 else: Insulin_Normal = 0 Pregnancies = float(request.form["Pregnancies"]) Pregnancies = (Pregnancies - 3.874593) / 3.443637 SkinThickness = float(request.form["SkinThickness"]) SkinThickness = (SkinThickness - 29.180782) / 8.94289800 DiabetesPedigreeFunction = float(request.form["DiabetesPedigreeFunction"]) DiabetesPedigreeFunction = (DiabetesPedigreeFunction - 0.466471) / 0.333203 Age = float(request.form["Age"]) Age = (Age - 33.594463) / 12.016168 prediction = model.predict([[ Pregnancies, SkinThickness, DiabetesPedigreeFunction, Age, BMI_Normal, BMI_Overweight, BMI_Obese, BloodPressure_Hyper_St1, BloodPressure_Hyper_St2, BloodPressure_Hyper_Emer, Glucose_Prediabetes, Glucose_Diabetes, Insulin_Normal ]]) output = prediction[0] if output == 0: text = "You are Healthy!!"+"\U0001F603" else: text = "You have Diabetes"+"\U0001F61E" return render_template('Diabetes.html',prediction_text = text) return render_template('Diabetes.html') if __name__ == '__main__': app.run()
GuruYohesh/ML
Diabetes Prediction/Diabetes_app.py
Diabetes_app.py
py
3,799
python
en
code
0
github-code
36
41587212510
import os import numpy as np import warnings from equilib import equi2cube import cv2 import torch from PIL import Image def main(): image_path = "./data/images/000001.jpg" equi_img = Image.open(image_path) img_mode = equi_img.mode equi_img = np.asarray(equi_img) print("equi_img: ", equi_img.shape) equi_img = np.transpose(equi_img, (2, 0, 1)) print(equi_img.shape) rots = { "roll": 0, "pitch": 0, "yaw": 0 } mode = "bilinear" # mode = "bicubic" # mode = "nearest" equi_img_torch = torch.from_numpy(equi_img).to('cuda') #cube = equi2cube(equi = equi_img,cube_format="horizon", rots=rots, w_face=3368, z_down=False, mode=mode) cube = equi2cube(equi = equi_img_torch,cube_format="horizon", rots=rots, w_face=3368, z_down=False, mode=mode) cube = cube.to('cpu').detach().numpy().copy() print("cube.shape", cube.shape) print("type: ", type(cube)) print("size: ", cube.size, "shape: ", cube.shape) cube = cube.transpose(1,2,0) out_image = Image.fromarray(cube, img_mode) out_path = "./data/results/00001.jpg" out_image.save(out_path) if __name__ == "__main__": main()
motoki/nsworks
src/sphere2cube.py
sphere2cube.py
py
1,211
python
en
code
0
github-code
36
29402978628
from users import User from userfile import UserFile from places import Places from get_poi import TripAdvisorApi from poi_data import PoiData # Initialise user object and conduct API_key_check # If no API key, user given instructions to subscribe. # API_key saved as a persistent environment variable in src/.env and # application rebooted, If there is a stored API_key, application # will proceed. user = User() # place selection place = Places() selected_region = place.select_region() # userfile object creation to search past history and display result if exists user_file = UserFile(selected_region, user.path) user_file.searchfile = f"{user_file.region[0]}-{user_file.region[1]}.json" # history check at region level history_check = user_file.search_and_display_data(selected_region) # history check at city level if not history_check: user_file.city = place.select_city(user_file.region) user_file.searchfile = f"{user_file.city[0]}-{user_file.city[1]}.json" history_check = user_file.search_and_display_data(user_file.city) # look up TripAdvisorAPI if region and city does not exist in cached file # Extract and consolidate data # Save and display data if not history_check: search_api = TripAdvisorApi(user_file.region, user_file.city) api_results = search_api.poi_search() results_data = PoiData(api_results[0], api_results[1]) results_data.extract() results_data.consolidate_categories() # Save and display data user_file.read_flag_and_save(results_data.place_info, api_results[2])
ashley190/travelapp
src/main.py
main.py
py
1,541
python
en
code
0
github-code
36
35099642670
from django.contrib.auth.models import Group from mf.crud.models import Dolar, HistoryOperations, Permisology from mf.user.models import User from django.utils import timezone from datetime import date, datetime, timedelta def convertToDecimalFormat(n): return n.replace('.', '').replace(',', '.') def get_dollar(): data = [] try: dolar1 = Dolar.objects.using('default').get(pk=1) dl1 = float(dolar1.dolar) except: new_dolar1 = Dolar() new_dolar1.dolar = '1000000' new_dolar1.save() dolar1 = Dolar.objects.using('default').get(pk=1) dl1 = float(dolar1.dolar) try: dolar2 = Dolar.objects.using('default').get(pk=2) dl2 = float(dolar2.dolar) except: new_dolar2 = Dolar() new_dolar2.dolar = '1200000' new_dolar2.save() dolar1 = Dolar.objects.using('default').get(pk=1) dl1 = float(dolar1.dolar) data = { 'dolar1': dl1, 'dolar2': dl2 } return data def ValidatePermissions(perms, requestGroup): autorized = False try: permsRequired = perms pk = requestGroup.id group = Group.objects.get(pk=pk) permsRequired = perms for p in permsRequired: if not group.permissions.filter(codename=p).exists(): autorized = False break else: autorized = True except: autorized = False return autorized def RegisterOperation(db, user, action): date = timezone.localtime(timezone.now()) result = 0 try: h = HistoryOperations() h.datejoined = date.strftime('%Y-%m-%d | %H:%M:%S %p') h.userSession_id = user h.description = action h.save() except: result = 1 return result def get_q_events_today(): data = 0 try: start = date.today() end = start + timedelta(days=7) start_date = start.strftime('%Y-%m-%d') end_date = end.strftime('%Y-%m-%d') total = 0 search = Permisology.objects.all() if len(start_date) and len(end_date): search = search.filter(day__range=[start_date, end_date]) for s in search: total = int(total) + 1 data = total except: pass return data def get_events_today(): data = [] total = 0 start = date.today() end = start + timedelta(days=7) start_date = start.strftime('%Y-%m-%d') end_date = end.strftime('%Y-%m-%d') search = Permisology.objects.all() if len(start_date) and len(end_date): search = search.filter(day__range=[start_date, end_date]) for s in search: data.append( { 'name': s.name, 'description': s.description, 'day': s.day.strftime('%Y-%m-%d'), } ) return data
isela1998/facebook
app/mf/crud/functions.py
functions.py
py
2,923
python
en
code
0
github-code
36
71335908263
from os import * from sys import * from collections import * from math import * ''' Following is the Binary Tree node structure: class TreeNode: def __init__(self, data=0, left=None, right=None): self.data = data self.left = left self.right = right ''' def getInOrderTraversal(root): # Write your code here. stack = [] result = [] subtree = root while stack or subtree: if subtree: stack.append(subtree) subtree = subtree.left else: subtree = stack.pop() result.append(subtree.data) subtree= subtree.right return result
architjee/solutions
CodingNinjas/inorder traversal without recursion.py
inorder traversal without recursion.py
py
680
python
en
code
0
github-code
36
74160075943
''' l = [i for i in range(len_string - len_sub + 1) if string[i:i + len_sub] == sub_string] 1.If you want to find the first index of the substring in the original string l[0] is your destination 2.If you want to find every index of the snstring in the original l[::] is what you want 3.If you want the number of times that the substring occurs in the given string len(l) is what you want ''' def count_substring(string, sub_string): len_string = len(string) len_sub = len(sub_string) n = sum([1 for i in range(len_string - len_sub + 1) if string[i:i + len_sub] == sub_string]) return n def count_substring(string, sub_string): #use recursion and a built in function if string.find(sub_string) < 0: return 0 else: return 1 + count_substring(string[string.find(sub_string) + 1:], sub_string) def hackerrankInString(s): # This function decide whether the substring in string(sub_string is Not secutive in the original string) # All the characters in substring in string(the same order also), but interrupted by some noises #Example: sub_string:hackerrank string:hereiamstackerrank sub_string = 'hackerrank' length = len(sub_string) index = 0 for c in s: if c == sub_string[index]: index += 1 if index == length: return 'YES' return 'NO'
CodingProgrammer/HackerRank_Python
(All)Find_a_string.py
(All)Find_a_string.py
py
1,362
python
en
code
0
github-code
36
31700687852
from __future__ import unicode_literals import os import shutil import unittest from doctpl.core import TemplateInfo class TemplateInfoTest(unittest.TestCase): @classmethod def setUpClass(cls): # setup TemplateInfo. TemplateInfo.CONFIG_DIR = os.path.join(os.getcwd(), '.doctpl') TemplateInfo.setup() # mkdir testdir. cls.testdir = os.path.join( TemplateInfo.CONFIG_DIR, 'testdir', ) os.mkdir(cls.testdir) @classmethod def tearDownClass(cls): shutil.rmtree(cls.testdir) def test_avaliable_templates(self): self.assertEqual( set(TemplateInfo.template_objects), {'template_a', 'template_b'}, ) def test_copy_for_not_existed_file(self): target_not_exist = os.path.join( self.testdir, 'not_exist', ) if os.path.exists(target_not_exist): os.remove(target_not_exist) template_object = TemplateInfo.template_objects['template_a'] path = template_object.copy_to('./.doctpl/testdir/not_exist') self.assertEqual(path, target_not_exist) with open(path, 'r') as f: self.assertEqual( f.read(), "template_a's content.\n", ) def test_copy_for_existed_file(self): target_exist = os.path.join( self.testdir, 'exist') # create empty file. open(target_exist, 'w').close() template_object = TemplateInfo.template_objects['template_a'] with self.assertRaises(Exception) as context: template_object.copy_to('./.doctpl/testdir/exist') self.assertIn( 'Already Existed', ''.join(context.exception.args), ) if __name__ == '__main__': unittest.main()
huntzhan/DocTemplate
test.py
test.py
py
1,848
python
en
code
0
github-code
36
36278744993
import re from pprint import pprint import csv from decorator import to_log if __name__ == '__main__': # читаем адресную книгу в формате CSV в список contacts_list with open("phonebook_raw.csv", encoding='utf-8') as f: rows = csv.reader(f, delimiter=",") contacts_list = list(rows) # rewriting phone numbers phone_num = r'(\+7|8)\s?\(?(\w{3})\)?\s?-?(\w{3})-?(\w{2})-?(\w{2})(\s?)(\(?((доб\.)\s(\w{4}))\)?)?' num_format = r'+7(\2)\3-\4-\5\6\9\10' i = 1 form_list = contacts_list while i < len(contacts_list): form_list[i][5] = re.sub(phone_num, num_format, contacts_list[i][5]) i += 1 # pprint(form_list) # put names, surnames... as different elements @to_log(path=r"C:\Users\olesy\PycharmProjects\regex_decorated\log.txt") def arrange_names(some_list): new_list = some_list for n in range(0, 2): for i in range(1, 9): if re.search(r'\s', some_list[i][n]): name = some_list[i][n].split(' ') if n == 0: if some_list[i][1] != '': name.extend(some_list[i][3:]) new_list[i] = name else: name.extend(some_list[i][2:]) new_list[i] = name else: new_list[i][n] = name[0] new_list[i][n + 1] = name[1] else: pass return new_list arrange_names(form_list)
OysterLover/regex_decorated
main.py
main.py
py
1,633
python
en
code
0
github-code
36
40451279379
import random def gtn(): sec_num = random.randint(1, 1000) no_of_tries = 0 print("Welcome to Guess The Number Game!") print("I'm thinking of a number between 1 and 1000. Can you guess it?") while True: user_guess = int(input("Enter your guess: ")) no_of_tries += 1 if user_guess < sec_num: print("Too low! Try again.") elif user_guess > sec_num: print("Too high! Try again.") else: print("Congratulations! You guessed the number in", no_of_tries, "tries.") break
Jayasri2021/Guess_the_number_game
function.py
function.py
py
577
python
en
code
1
github-code
36
3761949834
import cv2 import numpy as np img2 = cv2.imread("images.jpg") img1 = cv2.imread("new quantum.PNG") rows, cols, channels = img2.shape # Reading image details roi = img1[0:rows, 0:cols] img2g = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) # converting to grayscale # defining mask , makes our logo black and background white ret, mask = cv2.threshold(img2g, 220, 255, cv2.THRESH_BINARY_INV) # cv2.imshow('mask',mask) mask_inv = cv2.bitwise_not(mask) # defining non_masked area # adds our inv_mask to region of image of main image img_bg = cv2.bitwise_and(roi, roi, mask=mask_inv) img_fg = cv2.bitwise_and(img2, img2, mask=mask) # our logo after adding mask dst = img_bg+img_fg # we get our logo with our original image background # swaps the region of image original image to logo with same backgound img1[0:rows, 0:cols] = dst cv2.imshow('res', img1) cv2.imwrite('result.jpg', img1) cv2.waitKey(0) cv2.destroyAllWindows()
AirbotsBetaProject/Day-6
D6-04Dheeraj/Source Code/adding_logo_with_no_background.py
adding_logo_with_no_background.py
py
929
python
en
code
0
github-code
36
40252587840
while(1) : data = list(map(int, input().split())) data.sort() if (data[0]==0) and (data[1]==0) and (data[2]==0) : break else : if data[2]**2 == data[0]**2 + data[1]**2 : print('right') else : print('wrong')
parksjsj9368/TIL
ALGORITHM/BAEKJOON/SOURCE/15. PrimeNumber(소수 판별)/4. 직각삼각형.py
4. 직각삼각형.py
py
283
python
zh
code
0
github-code
36
34108283515
A = [int(i) for i in open('26.txt') if ' ' not in i] A.sort() s = k = 0 while s < 8200 and k < 970: s += A[k] k += 1 s -= A[k-1] + A[k-2] maxi = 0 for i in range(k-2, len(A)): if s + A[i] > 8200: break if maxi < A[i]: maxi = A[i] print(k-1, maxi)
alex3287/ege
2021/demo_26.py
demo_26.py
py
278
python
en
code
0
github-code
36
37634246750
# A transformation sequence from word beginWord to word endWord using a dictionary wordList is a sequence of words such that: # The first word in the sequence is beginWord. # The last word in the sequence is endWord. # Only one letter is different between each adjacent pair of words in the sequence. # Every word in the sequence is in wordList. # Given two words, beginWord and endWord, and a dictionary wordList, return the number of words in the shortest transformation sequence from beginWord to endWord, or 0 if no such sequence exists. # Example 1: # Input: beginWord = "hit", endWord = "cog", wordList = ["hot","dot","dog","lot","log","cog"] # Output: 5 # Explanation: One shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog" with 5 words. # Example 2: # Input: beginWord = "hit", endWord = "cog", wordList = ["hot","dot","dog","lot","log"] # Output: 0 # Explanation: The endWord "cog" is not in wordList, therefore there is no possible transformation. # Constraints: # 1 <= beginWord.length <= 10 # endWord.length == beginWord.length # 1 <= wordList.length <= 5000 # wordList[i].length == beginWord.length # beginWord, endWord, and wordList[i] consist of lowercase English letters. # beginWord != endWord # All the strings in wordList are unique. from collections import deque class Solution: def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int: graph = {} for word in wordList: for i in range(len(word)): graph.setdefault(word[:i]+'*'+word[i+1:],[]).append(word) #print(graph) queue = deque([(beginWord,1)]) visited = set([beginWord]) while queue: curword, length = queue.popleft() #print(curword,length) if curword == endWord: return length for i in range(len(curword)): pattern = curword[:i]+'*'+curword[i+1:] #print("pattern",pattern) for nextWord in graph.get(pattern,[]): #print(nextWord) if nextWord not in visited: queue.append((nextWord,length+1)) visited.add(nextWord) return 0
sunnyyeti/Leetcode-solutions
127 Word Ladder.py
127 Word Ladder.py
py
2,240
python
en
code
0
github-code
36
39803390183
from homepageapp.models import ModelsNewSQL02Model from django.conf import settings from django.http import JsonResponse from django.core.files.storage import FileSystemStorage import os from django.shortcuts import get_object_or_404, render from django.core.paginator import Paginator # repairOrder model was added on 11/5/2022. Deleted on 11/18/2022 from django.utils.translation import gettext_lazy as _ from django.contrib import messages from django.utils import timezone from django.shortcuts import render, redirect from django.core.mail import send_mail from django.template.loader import render_to_string from django.utils.html import strip_tags import json from django.contrib.auth.mixins import LoginRequiredMixin from appointments.forms import AppointmentCreationForm, AppointmentImagesForm from appointments.forms import AppointmentImagesForm, AppointmentImageFormSet from appointments.models import AppointmentRequest, AppointmentImages from django.views.generic import CreateView, FormView, TemplateView from django.views.generic import DetailView from django.views.generic import ListView from formtools.wizard.views import WizardView from django.urls import reverse_lazy import calendar from formtools.wizard.views import SessionWizardView from appointments.models import APPT_STATUS_CANCELLED, APPT_STATUS_NOT_SUBMITTED # 2023-04-10 def appointment_create_view_for_customer(request): # form = AppointmentCreationForm(request.POST or None) if request.method == 'POST': # form = AppointmentCreationForm(request.POST) form = AppointmentCreationForm(request.POST, request.FILES) image_formset = AppointmentImageFormSet( request.POST, request.FILES, user=request.user) image_form = AppointmentImagesForm( request.POST, request.FILES, user=request.user) # form = AppointmentCreationForm(request.POST) if form.is_valid(): # and image_formset.is_valid() # form.save() form.save(commit=False) appointment_data = form.cleaned_data # appointment_data.user = request.user appointment_data = json.dumps(appointment_data, default=str) request.session['appointment_data'] = appointment_data # request.session['images'] = [image_form.cleaned_data for image_form in image_formset] # request.session['submitted_form'] = json.dumps(form, default=dict)[0] # json.dumps(my_dictionary, indent=4, sort_keys=True, default=str) # appointment = form.save(commit=False) # appointment.appointment_requested_datetime = timezone.now() # appointment.save() # kwargs = {'appointment': appointment} # TODO: Send email to customer about service request status return redirect('appointments:appointment-preview-view') else: print(form.errors) # print out the form errors # return redirect('appointment_preview', args=[appointment.appointment_id]) else: form = AppointmentCreationForm image_formset = AppointmentImageFormSet( queryset=AppointmentImages.objects.none()) image_form = AppointmentImagesForm() # context = {'form': form} context = {'form': form, 'image_formset': image_formset, 'image_form': image_form} return render(request, 'appointments/10_appointment_create.html', context) def appointment_preview_view(request): # appointment = kwargs.get('appointment', None) appointment_data = request.session.get('appointment_data') images = request.session.get('images') # submitted_form = request.session.get('submitted_form') if not appointment_data: return redirect('appointments:appointment-create-view') # 2024-04-10 using json.loads to load back the appointment_data. # otherwise appointment_data will be appointment_data = json.loads(appointment_data) images = json.loads(images) # if request.method == 'GET': form = AppointmentCreationForm(appointment_data) appointment = AppointmentRequest(**appointment_data) context = {'form': form, 'appointment': appointment, } if request.method == 'POST': appointment.appointment_status = APPT_STATUS_NOT_SUBMITTED appointment.save() messages.success( request, 'Appointment has been submitted successfuly.') request.session.pop('appointment_data') return redirect('appointments:appointment-success-view') return render(request, 'appointments/20_appointment_preview.html', context) # form = AppointmentCreationForm(request.POST) # if form.is_valid(): # appointment = AppointmentRequest(form.fields) # appointment.save() # messages.success(request, 'Appointment has been submitted successfuly.') # request.session.pop('appointment_data') # # send_appointment_confirmation_email(appointment) # return redirect('appointments:appointment-success') # return redirect('appointment_success') # form = AppointmentCreationForm(initial=kwargs) # return render(request, 'appointments/02-appointment-preview.html', {'form': form}) # elif 'confirm' in request.POST: # form = AppointmentCreationForm(request.POST) # if form.is_valid(): # appointment = form.save(commit=False) # appointment.appointment_status = 'C' # appointment.save() # # Send confirmation email -- pending # # 2023-04-10 # # subject = 'Appointment Confirmed' # # html_message = render_to_string('appointment_confirmation_email.html', {'appointment': appointment}) # # plain_message = strip_tags(html_message) # # from_email = 'Your Company <noreply@yourcompany.com>' # # to_email = appointment.appointment_email # # send_mail(subject, plain_message, from_email, [to_email], html_message=html_message) # # else: # # return redirect('appointment-create-view') # # form = AppointmentCreationForm() # context = {'form': form} # return render(request, 'appointments/02-appointment-preview.html', context) # return redirect('appointment-create-view') def appointment_success(request): return render(request, 'appointments/30_appointment_creation_success.html') # version 2 of appointment creation. class AppointmentCreateView(SessionWizardView): # def get_template_names(self): # return ['appointments/12_appointment_create_v2_step_1.html', 'appointments/13_appointment_create_v2_step_2.html'] template_name = 'appointments/11_appointment_create_v2.html' file_storage = FileSystemStorage(location=os.path.join( settings.DEFAULT_FILE_STORAGE, 'appointment_images')) form_list = [ ('upload images', AppointmentImageFormSet), ('new_appointment', AppointmentCreationForm), ] success_url = reverse_lazy('appointments:appointment-preview-view') def done(self, form_list, **kwargs): image_formset, appointment_form = form_list appointment_form.save(commit=False) appointment_data = appointment_form.cleaned_data images = image_formset.save(commit=False) appointment_data = json.dumps(appointment_data, default=str) images = json.dumps(images, default=str) self.request.session['appointment_data'] = appointment_data self.request.session['images'] = images # talent_data = {} # talent_data.update(form.cleaned_data) # # # Create the talent record # # talent = TalentsModel.objects.create(**talent_data) # talent = TalentsModel(**talent_data) # Get the current user # Add a success message # messages.success(self.request, "Talent created successfully.") # return redirect("talent_management:talent_list", {'talent': talent}) return redirect('appointments:appointment-preview-view') class AppointmentPreviewView(FormView): template_name = 'appointments/20_appointment_preview.html' # form_class = AppointmentCreationForm success_url = reverse_lazy('appointments:appointment-success-view') def form_valid(self, form): self.request.session['appointment_data'] = self.request.POST return super().form_valid(form) def get_form_kwargs(self): kwargs = super().get_form_kwargs() kwargs['data'] = self.request.session.get('appointment_data', {}) return kwargs class AppointmentSuccessView(TemplateView): template_name = 'appointments/30_appointment_creation_success.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) appointment_data = self.request.session.get('appointment_data', {}) if appointment_data: appointment = AppointmentRequest(**appointment_data) appointment.save() self.request.session['appointment_data'] = None context['appointment'] = appointment return context class AppointmentListView(LoginRequiredMixin, ListView): model = AppointmentRequest context_object_name = 'appointments' template_name = 'appointments/50_appointment_list.html' login_url = reverse_lazy('internal_users:internal_user_login') def get_queryset(self): # `__` double undestore..more researched are needed. qs = AppointmentRequest.objects.prefetch_related( 'appointment_repair_order').exclude(appointment_status=APPT_STATUS_CANCELLED).all() # qs=qs.filter(appointment_status=APPT_STATUS_CANCELLED) # select_related('repair_order_customer').prefetch_related('repair_order_customer__addresses') # repair order phase defines the WIP (work-in-progress) caegory. 6 means invoice. return qs class AppointmentDetailView(DetailView): model = AppointmentRequest context_object_name = 'appointment' template_name = 'appointments/60_appointment_detail.html' # login_url = '/users/login' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['form'] = AppointmentImagesForm() return context def post(self, request, *args, **kwargs): appointment = self.get_object() form = AppointmentImagesForm(request.POST, request.FILES) if form.is_valid(): image = form.save(commit=False) image.appointment = appointment image.save() return self.get(request, *args, **kwargs) # def post(self, request, *args, **kwargs): # talent = self.get_object() # form = TalentDocumentsForm(request.POST, request.FILES) # if form.is_valid(): # document = form.save(commit=False) # document.talent = talent # document.save() # return self.get(request, *args, **kwargs) # def get_queryset(self): # queryset = super().get_queryset() # return queryset.filter(user=self.request.user) class AppointmentDetailByConfirmationIdView(AppointmentDetailView): def get_queryset(self): queryset = AppointmentRequest.objects.filter( appointment_confirmation_id=self.args['appointment_confirmation_id']) return queryset.filter(user=self.request.user) def appointment_get_vehicle_models(request, make_id): models = ModelsNewSQL02Model.objects.filter( make_id=make_id).all().order_by('model_name') model_dict_list = list(models.values('model_id', 'model_name')) model_tuple_list = [(model.pk, model.model_name) for model in models] # return JsonResponse(model_tuple_list, safe=False) return JsonResponse(model_dict_list, safe=False) def appointment_image_list(request, pk): appointment = AppointmentRequest.objects.get(pk=pk) images = AppointmentImages.objects.filter( image_is_active=True).filter(appointment=appointment).all() return render(request, 'appointments/70_appointment_image_list.html', {'images': images, 'appointment': appointment}) def appointment_image_soft_delete(request, image_id): image = get_object_or_404(AppointmentImages, image_id=image_id) image.image_is_active = False image.save() messages.add_message(request, messages.INFO, "Image selected has been deleted.") return redirect('appointment:appointment_image_list')
zjgcainiao/new_place_at_76
appointments/views.py
views.py
py
12,477
python
en
code
0
github-code
36
27047741058
""" @author Joe This file contains some pretty functions """ def get_top_k_indexes_of_list(target_list, k, is_max=True, min_value=None): """ get the top k indexes of elements in list Example: Problem: I have a list say a = [5,3,1,4,10], and I need to get a index of top two values of the list viz 5 and 10. Is there a one-liner that python offers for such a case? Usage: get_top_k_indexes_of_list(target_list=a, k=2, is_max=True) link: https://stackoverflow.com/questions/13070461/get-index-of-the-top-n-values-of-a-list-in-python :param target_list: target list :param k: the number of indexes :param is_max: True means max else False means min :param min_value: if min_value is not None filter the indexes whose value less than or equals to min_value :return: a list of indexes """ indexes = sorted(range(len(target_list)), key=lambda i: target_list[i], reverse=is_max)[:k] result = list() if min_value is not None: for index in indexes: if target_list[index] <= min_value: break result.append(index) else: result = indexes return result def get_elements_from_list(target_list, indexes): """ get elements from target_list by indexes :param target_list: target list :param indexes: a list of indexes :return: a list of elements """ elements = [target_list[i] for i in indexes] return elements
JoeZJH/JoePyLibs
general/list_utils.py
list_utils.py
py
1,495
python
en
code
0
github-code
36
2214688634
import argparse from alarm import __version__ from alarm.constants import ALLOWED_EXTENSIONS, ON_WINDOWS def parse_args(args): """Passing in args makes this easier to test: https://stackoverflow.com/a/18161115 """ parser = argparse.ArgumentParser( description="Play an alarm after N minutes", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) duration_group = parser.add_mutually_exclusive_group(required=True) duration_group.add_argument( "-s", "--seconds", type=int, help="Number of seconds before playing alarm" ) duration_group.add_argument( "-m", "--minutes", type=int, help="Number of minutes before playing alarm" ) run_mode_group = parser.add_mutually_exclusive_group() if not ON_WINDOWS: run_mode_group.add_argument( "-b", "--background", action="store_true", default=False, help="Run timer in the background", ) run_mode_group.add_argument( "-d", "--display_timer", action="store_true", default=False, help="Show timer in console", ) alarm_file_group = parser.add_mutually_exclusive_group() alarm_file_group.add_argument( "-l", "--song_library", help=( "Take a random song from a song library directory, " f"supported formats: {', '.join(sorted(ALLOWED_EXTENSIONS))}" ), ) alarm_file_group.add_argument( "-f", "--file", help="File path to song to play as alarm" ) alarm_file_group.add_argument( "-M", "--message", help="Set an audio message to play for alarm" ) parser.add_argument( "-v", "--version", action="version", version=f"%(prog)s {__version__}" ) parser.add_argument("-t", "--timeout", type=int, help="Stop alarm after N seconds") parser.add_argument( "-r", "--repeat", type=int, default=1, help="Repeat alarm cycle N times (only works in foreground)", ) return parser.parse_args(args)
hobojoe1848/pybites-alarm
alarm/cli.py
cli.py
py
2,095
python
en
code
null
github-code
36
31619170189
import pandas as pd import xml.etree.ElementTree as ET # Load the XML data tree = ET.parse('statement_short.xml') root = tree.getroot() # Define a function to extract data from the XML elements def extract_data(elem): data = {} for child in elem: if len(child) == 0: data[child.tag] = child.text else: data[child.tag] = extract_data(child) return data # Extract the data from the XML and create a DataFrame data = [] for elem in root: data.append(extract_data(elem)) df = pd.DataFrame(data) df1 = pd.read_xml('statement_short.xml') # Display the DataFrame print(df1)
dochaauch/Tools_for_buh
xml_pank/xml1.py
xml1.py
py
627
python
en
code
0
github-code
36
70973655465
import torch import torch.nn as nn class ChannelAttention(nn.Module): def __init__(self, in_planes, ratio=16): super(ChannelAttention, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.max_pool = nn.AdaptiveMaxPool2d(1) self.fc = nn.Sequential(nn.Conv2d(in_planes, in_planes // 16, 1, bias=False), nn.ReLU(), nn.Conv2d(in_planes // 16, in_planes, 1, bias=False)) self.sigmoid = nn.Sigmoid() def forward(self, x): avg_out = self.fc(self.avg_pool(x)) max_out = self.fc(self.max_pool(x)) out = avg_out + max_out return self.sigmoid(out) class SpatialAttention(nn.Module): def __init__(self, kernel_size=7): super(SpatialAttention, self).__init__() self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): avg_out = torch.mean(x, dim=1, keepdim=True) max_out, _ = torch.max(x, dim=1, keepdim=True) x = torch.cat([avg_out, max_out], dim=1) x = self.conv1(x) return self.sigmoid(x) class conv_block(nn.Module): def __init__(self, in_c, out_c): super().__init__() self.conv1 = nn.Conv2d(in_c, out_c, kernel_size=3, padding=1) self.in1 = nn.InstanceNorm2d(out_c) self.conv2 = nn.Conv2d(out_c, out_c, kernel_size=3, padding=1) self.in2 = nn.InstanceNorm2d(out_c) self.relu = nn.ReLU() self.ca = ChannelAttention(out_c) self.sa = SpatialAttention() def forward(self, inputs): x = self.conv1(inputs) x = self.in1(x) x = self.relu(x) x = self.conv2(x) x = self.in2(x) x = self.relu(x) x = self.ca(x) * x x = self.sa(x) * x return x class encoder_block(nn.Module): def __init__(self, in_c, out_c): super().__init__() self.conv = conv_block(in_c, out_c) self.pool = nn.MaxPool2d((2, 2)) def forward(self, inputs): x = self.conv(inputs) p = self.pool(x) return x, p class decoder_block(nn.Module): def __init__(self, in_c, out_c): super().__init__() self.up = nn.ConvTranspose2d(in_c, out_c, kernel_size=2, stride=2, padding=0) self.conv = conv_block(out_c + out_c, out_c) def forward(self, inputs, skip): x = self.up(inputs) x = torch.cat([x, skip], axis=1) x = self.conv(x) return x class build_unet(nn.Module): def __init__(self): super().__init__() """ Encoder """ self.e1 = encoder_block(3, 64) self.e2 = encoder_block(64, 128) self.e3 = encoder_block(128, 256) self.e4 = encoder_block(256, 512) """ Bottleneck """ self.b1 = conv_block(512, 640) self.b2 = conv_block(640, 768) self.b3 = conv_block(768, 896) self.b4 = conv_block(896, 1024) """ Decoder """ self.d1 = decoder_block(1024, 512) self.d2 = decoder_block(512, 256) self.d3 = decoder_block(256, 128) self.d4 = decoder_block(128, 64) """ Classifier """ self.outputs = nn.Conv2d(64, 1, kernel_size=1, padding=0) def forward(self, inputs): """ Encoder """ s1, p1 = self.e1(inputs) s2, p2 = self.e2(p1) s3, p3 = self.e3(p2) s4, p4 = self.e4(p3) """ Bottleneck """ b = self.b1(p4) b = self.b2(b) b = self.b3(b) b = self.b4(b) """ Decoder """ d1 = self.d1(b, s4) d2 = self.d2(d1, s3) d3 = self.d3(d2, s2) d4 = self.d4(d3, s1) outputs = self.outputs(d4) return outputs
AAleka/Cycle-CBAM-and-CBAM-UNet
UNet/model.py
model.py
py
3,806
python
en
code
7
github-code
36
4738737155
import torch import torch.nn as nn import numpy as np from torch.autograd import Variable import math import time import multiprocessing from torch.nn.parameter import Parameter from torch.nn.modules.module import Module import torch.nn.functional as F import copy class conGraphConvolutionlayer(Module): def __init__(self, in_features, out_features, bias=True): super(conGraphConvolutionlayer, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1. / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')' class conGCN(nn.Module): def __init__(self, nfeat, nhid, common_hid_layers_num, fcnn_hid_layers_num, dropout, nout1, ): super(conGCN, self).__init__() self.nfeat = nfeat self.nhid = nhid self.common_hid_layers_num = common_hid_layers_num self.fcnn_hid_layers_num = fcnn_hid_layers_num self.nout1 = nout1 self.dropout = dropout self.training = True ## The beginning layer self.gc_in_exp = conGraphConvolutionlayer(nfeat, nhid) self.bn_node_in_exp = nn.BatchNorm1d(nhid) self.gc_in_sp = conGraphConvolutionlayer(nfeat, nhid) self.bn_node_in_sp = nn.BatchNorm1d(nhid) ## common_hid_layers if self.common_hid_layers_num > 0: for i in range(self.common_hid_layers_num): exec('self.cgc{}_exp = conGraphConvolutionlayer(nhid, nhid)'.format(i+1)) exec('self.bn_node_chid{}_exp = nn.BatchNorm1d(nhid)'.format(i+1)) exec('self.cgc{}_sp = conGraphConvolutionlayer(nhid, nhid)'.format(i+1)) exec('self.bn_node_chid{}_sp = nn.BatchNorm1d(nhid)'.format(i+1)) ## FCNN layers self.gc_out11 = nn.Linear(2*nhid, nhid, bias=True) self.bn_out1 = nn.BatchNorm1d(nhid) if self.fcnn_hid_layers_num > 0: for i in range(self.fcnn_hid_layers_num): exec('self.gc_out11{} = nn.Linear(nhid, nhid, bias=True)'.format(i+1)) exec('self.bn_out11{} = nn.BatchNorm1d(nhid)'.format(i+1)) self.gc_out12 = nn.Linear(nhid, nout1, bias=True) def forward(self, x, adjs): self.x = x ## input layer self.x_exp = self.gc_in_exp(self.x, adjs[0]) self.x_exp = self.bn_node_in_exp(self.x_exp) self.x_exp = F.elu(self.x_exp) self.x_exp = F.dropout(self.x_exp, self.dropout, training=self.training) self.x_sp = self.gc_in_sp(self.x, adjs[1]) self.x_sp = self.bn_node_in_sp(self.x_sp) self.x_sp = F.elu(self.x_sp) self.x_sp = F.dropout(self.x_sp, self.dropout, training=self.training) ## common layers if self.common_hid_layers_num > 0: for i in range(self.common_hid_layers_num): exec("self.x_exp = self.cgc{}_exp(self.x_exp, adjs[0])".format(i+1)) exec("self.x_exp = self.bn_node_chid{}_exp(self.x_exp)".format(i+1)) self.x_exp = F.elu(self.x_exp) self.x_exp = F.dropout(self.x_exp, self.dropout, training=self.training) exec("self.x_sp = self.cgc{}_sp(self.x_sp, adjs[1])".format(i+1)) exec("self.x_sp = self.bn_node_chid{}_sp(self.x_sp)".format(i+1)) self.x_sp = F.elu(self.x_sp) self.x_sp = F.dropout(self.x_sp, self.dropout, training=self.training) ## FCNN layers self.x1 = torch.cat([self.x_exp, self.x_sp], dim=1) self.x1 = self.gc_out11(self.x1) self.x1 = self.bn_out1(self.x1) self.x1 = F.elu(self.x1) self.x1 = F.dropout(self.x1, self.dropout, training=self.training) if self.fcnn_hid_layers_num > 0: for i in range(self.fcnn_hid_layers_num): exec("self.x1 = self.gc_out11{}(self.x1)".format(i+1)) exec("self.x1 = self.bn_out11{}(self.x1)".format(i+1)) self.x1 = F.elu(self.x1) self.x1 = F.dropout(self.x1, self.dropout, training=self.training) self.x1 = self.gc_out12(self.x1) gc_list = {} gc_list['gc_in_exp'] = self.gc_in_exp gc_list['gc_in_sp'] = self.gc_in_sp if self.common_hid_layers_num > 0: for i in range(self.common_hid_layers_num): exec("gc_list['cgc{}_exp'] = self.cgc{}_exp".format(i+1, i+1)) exec("gc_list['cgc{}_sp'] = self.cgc{}_sp".format(i+1, i+1)) gc_list['gc_out11'] = self.gc_out11 if self.fcnn_hid_layers_num > 0: exec("gc_list['gc_out11{}'] = self.gc_out11{}".format(i+1, i+1)) gc_list['gc_out12'] = self.gc_out12 return F.log_softmax(self.x1, dim=1), gc_list def conGCN_train(model, train_valid_len, test_len, feature, adjs, label, epoch_n, loss_fn, optimizer, train_valid_ratio = 0.9, scheduler = None, early_stopping_patience = 5, clip_grad_max_norm = 1, load_test_groundtruth = False, print_epoch_step = 1, cpu_num = -1, GCN_device = 'CPU' ): if GCN_device == 'CPU': device = torch.device("cpu") print('Use CPU as device.') else: if torch.cuda.is_available(): device = torch.device("cuda") print('Use GPU as device.') else: device = torch.device("cpu") print('Use CPU as device.') if cpu_num == -1: cores = multiprocessing.cpu_count() torch.set_num_threads(cores) else: torch.set_num_threads(cpu_num) model = model.to(device) adjs = [adj.to(device) for adj in adjs] feature = feature.to(device) label = label.to(device) time_open = time.time() train_idx = range(int(train_valid_len*train_valid_ratio)) valid_idx = range(len(train_idx), train_valid_len) best_val = np.inf clip = 0 loss = [] para_list = [] for epoch in range(epoch_n): try: torch.cuda.empty_cache() except: pass optimizer.zero_grad() output1, paras = model(feature.float(), adjs) loss_train1 = loss_fn(output1[list(np.array(train_idx)+test_len)], label[list(np.array(train_idx)+test_len)].float()) loss_val1 = loss_fn(output1[list(np.array(valid_idx)+test_len)], label[list(np.array(valid_idx)+test_len)].float()) if load_test_groundtruth == True: loss_test1 = loss_fn(output1[:test_len], label[:test_len].float()) loss.append([loss_train1.item(), loss_val1.item(), loss_test1.item()]) else: loss.append([loss_train1.item(), loss_val1.item(), None]) if epoch % print_epoch_step == 0: print("******************************************") print("Epoch {}/{}".format(epoch+1, epoch_n), 'loss_train: {:.4f}'.format(loss_train1.item()), 'loss_val: {:.4f}'.format(loss_val1.item()), end = '\t' ) if load_test_groundtruth == True: print("Test loss= {:.4f}".format(loss_test1.item()), end = '\t') print('time: {:.4f}s'.format(time.time() - time_open)) para_list.append(paras.copy()) for i in paras.keys(): para_list[-1][i] = copy.deepcopy(para_list[-1][i]) if early_stopping_patience > 0: if torch.round(loss_val1, decimals=4) < best_val: best_val = torch.round(loss_val1, decimals=4) best_paras = paras.copy() best_loss = loss.copy() clip = 1 for i in paras.keys(): best_paras[i] = copy.deepcopy(best_paras[i]) else: clip += 1 if clip == early_stopping_patience: break else: best_loss = loss.copy() best_paras = None loss_train1.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=clip_grad_max_norm) optimizer.step() if scheduler != None: try: scheduler.step() except: scheduler.step(metrics = loss_val1) print("***********************Final Loss***********************") print("Epoch {}/{}".format(epoch+1, epoch_n), 'loss_train: {:.4f}'.format(loss_train1.item()), 'loss_val: {:.4f}'.format(loss_val1.item()), end = '\t' ) if load_test_groundtruth == True: print("Test loss= {:.4f}".format(loss_test1.item()), end = '\t') print('time: {:.4f}s'.format(time.time() - time_open)) torch.cuda.empty_cache() return output1.cpu(), loss, model.cpu()
luoyuanlab/stdgcn
STdGCN/GCN.py
GCN.py
py
9,989
python
en
code
2
github-code
36
10387196560
from setuptools import setup, find_packages from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='pysnippets', version='0.1.0', description='Scattered python snippets', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/gongzhitaao/snippets/pysnippets', author='gongzhitaao', author_email='zhitaao.gong@gmail.com', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', ], keywords='snippets', packages=find_packages(exclude=['contrib', 'docs', 'tests']), install_requires=[], extras_require={ 'test': ['pytest'], }, )
gongzhitaao/snippets
pysnippets/setup.py
setup.py
py
1,010
python
en
code
0
github-code
36
71251001
import torch import torch.nn as nn import torch.utils.data as Data from torch.autograd import Variable import torch.nn.functional as F import time import numpy as np assert F import csv,random from imblearn.over_sampling import SMOTE class Datamanager(): def __init__(self): self.dataset = {} self.threshold = 1e-10 ## The threshold for classifying the label of the sample after simoid function self.normalize = {} ## Record the mean and standard deviation for testing set normalization self.over_sample = False ## Determine whether copy the positive (attribute #4 = 2) samples self.over_sample_rate = 150 ## Number of copies self.down_sample = False ## Determine whether delete the negative (attribute #4 = 1) samples self.down_sample_rate = 2 ## Make number of negative samples = down_sample_rate * Number of positive samples self.smote = True ## Determine whether use SMOTE to generate minor class samples source: https://imbalanced-learn.org/en/stable/generated/imblearn.over_sampling.SMOTE.html self.weighted_loss = True ## Determine whether adjust the weight of BCE loss function self.weighted_loss_rate = 0.1 ## Weight of negative samples in loss function ( 1 - weighted_loss_rate for positive samples) def get_data(self,name,file_name,b_size,args,shuf=True): with open(file_name,newline='') as csvfile: rows = csv.reader(csvfile) ## Read file data = [] ## Store the data from file for row in rows: data.append(row) data = data[2:] data = np.array(data) if name == 'train' : data = np.delete(data,2,0) ## Missing data => remove data = np.delete(data,0,0) ## Positive(attribute #4 = 2) outlier => remove data = np.delete(data,4,0) data = np.delete(data,5,0) data = np.delete(data,6,0) data = np.delete(data,11,0) data = np.delete(data,2542,0) if name == 'test' : data = np.delete(data,1103,0) data = np.delete(data,1102,0) data = np.delete(data,699,0) data = np.delete(data,5,0) data = np.delete(data,176,1) ## These columns have std = 0 => remove data = np.delete(data,167,1) data = np.delete(data,166,1) data = np.delete(data,165,1) data = np.delete(data,5,1) data = np.delete(data,4,1) data = np.delete(data,166,1) data = np.delete(data,165,1) data = np.delete(data,164,1) for i in range(data.shape[1]): if i == 3 : for j in range(data.shape[0]): ## Transform label of attribute #4 '2' to 1(positive), '1' to 0(negative) if data[j][i] == '1': data[j][i] = 0 elif data[j][i] == '2': data[j][i] = 1 else: print('error target') elif data[0][i] == 'TRUE' or data[0][i] == 'FALSE': # ## Transform label 'TRUE' to 1, 'Negative' to 0 for j in range(data.shape[0]): if data[j][i] == 'TRUE': data[j][i] = 1.0 elif data[j][i] == 'FALSE': data[j][i] = 0.0 else: print(j,i,data[j][i]) print('other type') mean = data[:,i].astype(np.double).mean() ## Normalization. Record mean and standard deviation std = data[:,i].astype(np.double).std() if(std == 0): print(i) data[:,i] = (data[:,i].astype(np.double) - mean) / std self.normalize[i] = [mean,std] else: if name == 'train': ## Normalization. Record mean and standard deviation mean = data[:,i].astype(np.double).mean() std = data[:,i].astype(np.double).std() if(std == 0): print(i) data[:,i] = (data[:,i].astype(np.double) - mean) / std self.normalize[i] = [mean,std] else: data[:,i] = (data[:,i].astype(np.double) - self.normalize[i][0]) / self.normalize[i][1] if name == 'train' : np.random.shuffle(data) Y = data[:int(data.shape[0]*0.9),3].reshape(-1,1).astype(np.double) ## Split training and validation set, and extract attribute #4 as targets Y_val = data[int(data.shape[0]*0.9):,3].reshape(-1,1).astype(np.double) X = np.delete(data,3,1).astype(np.double)[:int(data.shape[0]*0.9),:] X_val = np.delete(data,3,1).astype(np.double)[int(data.shape[0]*0.9):,:] if self.over_sample or self.down_sample or self.smote: count_0 = 0 count_1 = 0 count_1_list = [] for i in range(Y.shape[0]): if Y[i][0] == 0: count_0 = count_0 + 1 else: count_1 = count_1 + 1 count_1_list.append(i) print('count_0:',count_0) print('count_1:',count_1) if self.over_sample: ## Copy the positive (attribute #4 = 2) samples ori_one_X , ori_one_Y = X[count_1_list] , Y[count_1_list] for i in range(self.over_sample_rate): noise = np.random.normal(0, 0.3, ori_one_X.shape) add_one_X = ori_one_X + noise X = np.concatenate((X,add_one_X),axis = 0) Y = np.concatenate((Y,ori_one_Y),axis = 0) if self.down_sample: ## Delete the negative (attribute #4 = 1) samples number = int(count_0 - count_1 * (self.over_sample_rate + 1) * self.down_sample_rate) while(number > 0): for i in range(Y.shape[0]): if Y[i][0] == 0: X = np.delete(X,i,0) Y = np.delete(Y,i,0) number = number - 1 break if self.smote: ## Use SMOTE to generate minor class(positive) samples sm = SMOTE(sampling_strategy = 1) X, Y = sm.fit_resample(X, Y) Y = Y.reshape(-1,1) count_0 = 0 count_1 = 0 for i in range(Y.shape[0]): if Y[i][0] == 0: count_0 = count_0 + 1 else: count_1 = count_1 + 1 #print('count_0:',count_0) #print('count_1:',count_1) #print(X.shape) #print(Y.shape) X,Y = torch.from_numpy(X).cuda(),torch.from_numpy(Y).cuda() ## Convert numpy array to tensor for Pytorch train_dataset = Data.TensorDataset(data_tensor=X[:], target_tensor=Y[:]) ## Wrap up the input/target tensor into TensorDataset source: https://pytorch.org/docs/stable/data.html self.dataset['train'] = Data.DataLoader(dataset=train_dataset, batch_size=b_size, shuffle=shuf) ## Put the TensorDataset in Dataloader (stored in a dictionary), shuffling the samples source: https://pytorch.org/docs/stable/data.html X_val,Y_val = torch.from_numpy(X_val).cuda(),torch.from_numpy(Y_val).cuda() val_dataset = Data.TensorDataset(data_tensor=X_val[:], target_tensor=Y_val[:]) self.dataset['val'] = Data.DataLoader(dataset=val_dataset, batch_size=b_size, shuffle=shuf) elif name == 'test': ## Process the testing set Y = data[:,3].reshape(-1,1).astype(np.double) X = np.delete(data,3,1).astype(np.double) X,Y = torch.from_numpy(X).cuda(),torch.from_numpy(Y).cuda() train_dataset = Data.TensorDataset(data_tensor=X[:], target_tensor=Y[:]) self.dataset['test'] = Data.DataLoader(dataset=train_dataset, batch_size=b_size, shuffle=shuf) ## Put the TensorDataset in Dataloader (stored in a dictionary), not shuffling the samples source: https://pytorch.org/docs/stable/data.html def train(self,model,trainloader,epoch): ## Train the model model.train() ## Set to training mode optimizer = torch.optim.Adam(model.parameters()) ## Use Adam optimizer to optimize all DNN parameters source: https://pytorch.org/docs/stable/optim.html loss_func = nn.BCELoss() ## Use binary cross entropoy for model evaluation source: https://pytorch.org/docs/stable/nn.html total_loss = 0 ## Calculate total loss in a epoch t1_p1 = 0 ## Confusion matrix initialization t1_p0 = 0 t0_p1 = 0 t0_p0 = 0 for batch_index, (x, y) in enumerate(trainloader): ## Process a batch of data in each timestep x, y= Variable(x).cuda(), Variable(y).cuda() output = model(x) ## Use present model to forecast the the result if self.weighted_loss: ## Adjust the weight of BCE loss functional source: https://pytorch.org/docs/stable/nn.html weight = np.empty([len(x)]) for i in range(len(x)): weight[i] = self.weighted_loss_rate arr = np.where(y.data == 1) weight[arr[0].tolist()] = 1 - self.weighted_loss_rate weight = torch.from_numpy(weight).cuda().double().view(len(x),1) loss_func = nn.BCELoss(weight = weight) loss = loss_func(output,y) optimizer.zero_grad() ## Set the gradient in the previous time step to zero loss.backward() ## Back propagate source: https://pytorch.org/docs/stable/optim.html optimizer.step() ## Gradient descent source: https://pytorch.org/docs/stable/autograd.html if batch_index % 4 == 0: ## Print model status source: https://pytorch.org/docs/stable/optim.html print('\rTrain Epoch: {} | [{}/{} ({:.0f}%)]\t '.format( epoch, batch_index * len(x), len(trainloader.dataset), 100. * batch_index / len(trainloader)),end='') total_loss+= loss.data[0]*len(x) ## Sum up batch loss pred = np.empty([len(x),1]) ## Calculate confusion matrix output = output.cpu().data.numpy() for i in range(len(x)): if(output[i] > self.threshold): pred[i,0] = 1 else: pred[i,0] = 0 y = y.cpu().data.numpy() for i in range(pred.shape[0]): if y[i] == 1 and pred[i] == 1: t1_p1 = t1_p1 + 1 elif y[i] == 1 and pred[i] == 0: t1_p0 = t1_p0 + 1 elif y[i] == 0 and pred[i] == 1: t0_p1 = t0_p1 + 1 elif y[i] == 0 and pred[i] == 0: t0_p0 = t0_p0 + 1 total_loss/= len(trainloader.dataset) print('Total loss: {:.4f}'.format(total_loss)) ## Print model status print('t1_p1: ',t1_p1 , 't0_p1: ',t0_p1 ) print('t1_p0: ',t1_p0 , 't0_p0: ',t0_p0 ) return total_loss def val(self,model,name,valloader): ## Test the model model.eval() ## Set to evaluation mode val_loss = 0 ## Calculate total loss t1_p1 = 0 ## Confusion matrix initialization t1_p0 = 0 t0_p1 = 0 t0_p0 = 0 for x, y in valloader: x, y = Variable(x, volatile=True).cuda(), Variable(y,volatile=True).cuda() output = model(x) ## Use present model to forecast the the result val_loss += F.binary_cross_entropy(output, y, size_average=False).data[0] ## Sum up batch loss pred = np.empty([len(x),1]) ## Calculate confusion matrix output = output.cpu().data.numpy() for i in range(len(x)): if(output[i] > self.threshold): pred[i,0] = 1 else: pred[i,0] = 0 y = y.cpu().data.numpy() for i in range(pred.shape[0]): if y[i] == 1 and pred[i] == 1: t1_p1 = t1_p1 + 1 elif y[i] == 1 and pred[i] == 0: t1_p0 = t1_p0 + 1 elif y[i] == 0 and pred[i] == 1: t0_p1 = t0_p1 + 1 elif y[i] == 0 and pred[i] == 0: t0_p0 = t0_p0 + 1 val_loss /= len(valloader.dataset) print(name , ' set: Average loss: {:.4f}'.format(val_loss)) ## Print model status print('t1_p1: ',t1_p1 , 't0_p1: ',t0_p1 ) print('t1_p0: ',t1_p0 , 't0_p0: ',t0_p0 ) return val_loss class DNN(nn.Module): ## Set up DNN def __init__(self,args): super(DNN, self).__init__() print(args.unit) self.den=nn.ModuleList() for i in range(1,len(args.unit)-1): ## Set up hidden layers self.den.append( nn.Sequential( nn.Linear(args.unit[i-1], args.unit[i]), ## Source: https://pytorch.org/docs/stable/nn.html nn.ReLU(), nn.Dropout(0.2) )) self.den.append( nn.Sequential( nn.Linear(args.unit[-2], args.unit[-1]), nn.Dropout(0.2), nn.Sigmoid(), )) def forward(self, x): ## Connect layers and activation function for i in self.den: x = i(x) return x
b04901056/dsa2017
qualcomm/nn.py
nn.py
py
17,265
python
en
code
0
github-code
36
3169544582
# ported from uniborg thanks to @s_n_a_p_s , @r4v4n4 , @spechide and @PhycoNinja13b #:::::Credit Time:::::: # 1) Coded By: @s_n_a_p_s # 2) Ported By: @r4v4n4 (Noodz Lober) # 3) End Game Help By: @spechide # 4) Better Colour Profile Pic By @PhycoNinja13b import asyncio import base64 import os import random import shutil import time from datetime import datetime from PIL import Image, ImageDraw, ImageFont from pySmartDL import SmartDL from telethon.errors import FloodWaitError from telethon.tl import functions from . import AUTONAME, BOTLOG, BOTLOG_CHATID, DEFAULT_BIO from .sql_helper.globals import addgvar, delgvar, gvarstatus DEFAULTUSERBIO = DEFAULT_BIO or " рЌ»рЌЕрЈєрјбрЈєрЉјрЈђ рЈърЈєрЈдрЌ┤ рјбрЈєрЌ░рЌ┤ " CHANGE_TIME = Config.CHANGE_TIME DEFAULTUSER = AUTONAME or Config.ALIVE_NAME FONT_FILE_TO_USE = "/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf" autopic_path = os.path.join(os.getcwd(), "userbot", "original_pic.png") digitalpic_path = os.path.join(os.getcwd(), "userbot", "digital_pic.png") autophoto_path = os.path.join(os.getcwd(), "userbot", "photo_pfp.png") digitalpfp = Config.DIGITAL_PIC or "https://telegra.ph/file/aeaebe33b1f3988a0b690.jpg" @bot.on(admin_cmd(pattern="autopic ?(.*)")) async def autopic(event): if event.fwd_from: return if Config.DEFAULT_PIC is None: return await edit_delete( event, "**Error**\nFor functing of autopic you need to set DEFAULT_PIC var in Heroku vars", parse_mode=parse_pre, ) downloader = SmartDL(Config.DEFAULT_PIC, autopic_path, progress_bar=False) downloader.start(blocking=False) while not downloader.isFinished(): pass input_str = event.pattern_match.group(1) if input_str: try: input_str = int(input_str) except ValueError: input_str = 60 else: if gvarstatus("autopic_counter") is None: addgvar("autopic_counter", 30) if gvarstatus("autopic") is not None and gvarstatus("autopic") == "true": return await edit_delete(event, f"`Autopic is already enabled`") addgvar("autopic", True) if input_str: addgvar("autopic_counter", input_str) await edit_delete(event, f"`Autopic has been started by my Master`") await autopicloop() @bot.on(admin_cmd(pattern="digitalpfp$")) async def main(event): if event.fwd_from: return downloader = SmartDL(digitalpfp, digitalpic_path, progress_bar=False) downloader.start(blocking=False) while not downloader.isFinished(): pass if gvarstatus("digitalpic") is not None and gvarstatus("digitalpic") == "true": return await edit_delete(event, f"`Digitalpic is already enabled`") addgvar("digitalpic", True) await edit_delete(event, f"`digitalpfp has been started by my Master`") await digitalpicloop() @bot.on(admin_cmd(pattern="bloom$")) async def autopic(event): if event.fwd_from: return if Config.DEFAULT_PIC is None: return await edit_delete( event, "**Error**\nFor functing of bloom you need to set DEFAULT_PIC var in Heroku vars", parse_mode=parse_pre, ) downloader = SmartDL(Config.DEFAULT_PIC, autopic_path, progress_bar=True) downloader.start(blocking=False) while not downloader.isFinished(): pass if gvarstatus("bloom") is not None and gvarstatus("bloom") == "true": return await edit_delete(event, f"`Bloom is already enabled`") addgvar("bloom", True) await edit_delete(event, f"`Bloom has been started by my Master`") await bloom_pfploop() @bot.on(admin_cmd(pattern="autoname$")) async def _(event): if event.fwd_from: return if gvarstatus("autoname") is not None and gvarstatus("autoname") == "true": return await edit_delete(event, f"`Autoname is already enabled`") addgvar("autoname", True) await edit_delete(event, "`AutoName has been started by my Master `") await autoname_loop() @bot.on(admin_cmd(pattern="autobio$")) async def _(event): if event.fwd_from: return if gvarstatus("autobio") is not None and gvarstatus("autobio") == "true": return await edit_delete(event, f"`Autobio is already enabled`") addgvar("autobio", True) await edit_delete(event, "`Autobio has been started by my Master `") await autobio_loop() @bot.on(admin_cmd(pattern="end (.*)")) async def _(event): # sourcery no-metrics if event.fwd_from: return input_str = event.pattern_match.group(1) if input_str == "autopic": if gvarstatus("autopic") is not None and gvarstatus("autopic") == "true": delgvar("autopic") if os.path.exists(autopic_path): file = await event.client.upload_file(autopic_path) try: await event.client(functions.photos.UploadProfilePhotoRequest(file)) os.remove(autopic_path) except BaseException: return return await edit_delete(event, "`Autopic has been stopped now`") return await edit_delete(event, "`Autopic haven't enabled`") if input_str == "digitalpfp": if gvarstatus("digitalpic") is not None and gvarstatus("digitalpic") == "true": delgvar("digitalpic") await event.client( functions.photos.DeletePhotosRequest( await bot.get_profile_photos("me", limit=1) ) ) return await edit_delete(event, "`Digitalpfp has been stopped now`") return await edit_delete(event, "`Digitalpfp haven't enabled`") if input_str == "bloom": if gvarstatus("bloom") is not None and gvarstatus("bloom") == "true": delgvar("bloom") if os.path.exists(autopic_path): file = await event.client.upload_file(autopic_path) try: await event.client(functions.photos.UploadProfilePhotoRequest(file)) os.remove(autopic_path) except BaseException: return return await edit_delete(event, "`Bloom has been stopped now`") return await edit_delete(event, "`Bloom haven't enabled`") if input_str == "autoname": if gvarstatus("autoname") is not None and gvarstatus("autoname") == "true": delgvar("autoname") await event.client( functions.account.UpdateProfileRequest(first_name=DEFAULTUSER) ) return await edit_delete(event, "`Autoname has been stopped now`") return await edit_delete(event, "`Autoname haven't enabled`") if input_str == "autobio": if gvarstatus("autobio") is not None and gvarstatus("autobio") == "true": delgvar("autobio") await event.client( functions.account.UpdateProfileRequest(about=DEFAULTUSERBIO) ) return await edit_delete(event, "`Autobio has been stopped now`") return await edit_delete(event, "`Autobio haven't enabled`") async def autopicloop(): AUTOPICSTART = gvarstatus("autopic") == "true" if AUTOPICSTART and Config.DEFAULT_PIC is None: if BOTLOG: return await bot.send_message( BOTLOG_CHATID, "**Error**\n`For functing of autopic you need to set DEFAULT_PIC var in Heroku vars`", ) return if gvarstatus("autopic") is not None: try: counter = int(gvarstatus("autopic_counter")) except Exception as e: LOGS.warn(str(e)) while AUTOPICSTART: if not os.path.exists(autopic_path): downloader = SmartDL(Config.DEFAULT_PIC, autopic_path, progress_bar=False) downloader.start(blocking=False) while not downloader.isFinished(): pass shutil.copy(autopic_path, autophoto_path) im = Image.open(autophoto_path) file_test = im.rotate(counter, expand=False).save(autophoto_path, "PNG") current_time = datetime.now().strftime(" Time: %H:%M \n Date: %d.%m.%y ") img = Image.open(autophoto_path) drawn_text = ImageDraw.Draw(img) fnt = ImageFont.truetype(FONT_FILE_TO_USE, 30) drawn_text.text((150, 250), current_time, font=fnt, fill=(124, 252, 0)) img.save(autophoto_path) file = await bot.upload_file(autophoto_path) try: await bot(functions.photos.UploadProfilePhotoRequest(file)) os.remove(autophoto_path) counter += counter await asyncio.sleep(CHANGE_TIME) except BaseException: return AUTOPICSTART = gvarstatus("autopic") == "true" async def digitalpicloop(): DIGITALPICSTART = gvarstatus("digitalpic") == "true" i = 0 while DIGITALPICSTART: if not os.path.exists(digitalpic_path): downloader = SmartDL(digitalpfp, digitalpic_path, progress_bar=False) downloader.start(blocking=False) while not downloader.isFinished(): pass shutil.copy(digitalpic_path, autophoto_path) Image.open(autophoto_path) current_time = datetime.now().strftime("%H:%M") img = Image.open(autophoto_path) drawn_text = ImageDraw.Draw(img) cat = str(base64.b64decode("dXNlcmJvdC9oZWxwZXJzL3N0eWxlcy9kaWdpdGFsLnR0Zg=="))[ 2:36 ] fnt = ImageFont.truetype(cat, 200) drawn_text.text((350, 100), current_time, font=fnt, fill=(124, 252, 0)) img.save(autophoto_path) file = await bot.upload_file(autophoto_path) try: if i > 0: await bot( functions.photos.DeletePhotosRequest( await bot.get_profile_photos("me", limit=1) ) ) i += 1 await bot(functions.photos.UploadProfilePhotoRequest(file)) os.remove(autophoto_path) await asyncio.sleep(CHANGE_TIME) except BaseException: return DIGITALPICSTART = gvarstatus("digitalpic") == "true" async def bloom_pfploop(): BLOOMSTART = gvarstatus("bloom") == "true" if BLOOMSTART and Config.DEFAULT_PIC is None: if BOTLOG: return await bot.send_message( BOTLOG_CHATID, "**Error**\n`For functing of bloom you need to set DEFAULT_PIC var in Heroku vars`", ) return while BLOOMSTART: if not os.path.exists(autopic_path): downloader = SmartDL(Config.DEFAULT_PIC, autopic_path, progress_bar=False) downloader.start(blocking=False) while not downloader.isFinished(): pass # RIP Danger zone Here no editing here plox R = random.randint(0, 256) B = random.randint(0, 256) G = random.randint(0, 256) FR = 256 - R FB = 256 - B FG = 256 - G shutil.copy(autopic_path, autophoto_path) image = Image.open(autophoto_path) image.paste((R, G, B), [0, 0, image.size[0], image.size[1]]) image.save(autophoto_path) current_time = datetime.now().strftime("\n Time: %H:%M:%S \n \n Date: %d/%m/%y") img = Image.open(autophoto_path) drawn_text = ImageDraw.Draw(img) fnt = ImageFont.truetype(FONT_FILE_TO_USE, 60) ofnt = ImageFont.truetype(FONT_FILE_TO_USE, 250) drawn_text.text((95, 250), current_time, font=fnt, fill=(FR, FG, FB)) drawn_text.text((95, 250), " ­Ъўѕ", font=ofnt, fill=(FR, FG, FB)) img.save(autophoto_path) file = await bot.upload_file(autophoto_path) try: await bot(functions.photos.UploadProfilePhotoRequest(file)) os.remove(autophoto_path) await asyncio.sleep(CHANGE_TIME) except BaseException: return BLOOMSTART = gvarstatus("bloom") == "true" async def autoname_loop(): AUTONAMESTART = gvarstatus("autoname") == "true" while AUTONAMESTART: DM = time.strftime("%d-%m-%y") HM = time.strftime("%H:%M") name = f"Рїџ№ИЈ {HM}||Рђ║ {DEFAULTUSER} Рђ╣||­ЪЊЁ {DM}" LOGS.info(name) try: await bot(functions.account.UpdateProfileRequest(first_name=name)) except FloodWaitError as ex: LOGS.warning(str(ex)) await asyncio.sleep(ex.seconds) await asyncio.sleep(CHANGE_TIME) AUTONAMESTART = gvarstatus("autoname") == "true" async def autobio_loop(): AUTOBIOSTART = gvarstatus("autobio") == "true" while AUTOBIOSTART: DMY = time.strftime("%d.%m.%Y") HM = time.strftime("%H:%M:%S") bio = f"­ЪЊЁ {DMY} | {DEFAULTUSERBIO} | Рїџ№ИЈ {HM}" LOGS.info(bio) try: await bot(functions.account.UpdateProfileRequest(about=bio)) except FloodWaitError as ex: LOGS.warning(str(ex)) await asyncio.sleep(ex.seconds) await asyncio.sleep(CHANGE_TIME) AUTOBIOSTART = gvarstatus("autobio") == "true" bot.loop.create_task(autopicloop()) bot.loop.create_task(digitalpicloop()) bot.loop.create_task(bloom_pfploop()) bot.loop.create_task(autoname_loop()) bot.loop.create_task(autobio_loop()) CMD_HELP.update( { "autoprofile": """**Plugin : **`autoprofile` Рђб **Syntax : **`.autopic angle` Рђб **Function : **__Rotating image along with the time on it with given angle if no angle is given then doesnt rotate. You need to set __`DEFAULT_PIC`__ in heroku__ Рђб **Syntax : **`.digitalpfp` Рђб **Function : **__Your profile pic changes to digitaltime profile picutre__ Рђб **Syntax : **`.bloom` Рђб **Function : **__Random colour profile pics will be set along with time on it. You need to set__ `DEFAULT_PIC`__ in heroku__ Рђб **Syntax : **`.autoname` Рђб **Function : **__for time along with name, you must set __`AUTONAME`__ in the heroku vars first for this to work__ Рђб **Syntax : **`.autobio` Рђб **Function : **__for time along with your bio, Set __`DEFAULT_BIO`__ in the heroku vars first__ Рђб **Syntax : **`.end function` Рђб **Function : **__To stop the given functions like autopic ,difitalpfp , bloom , autoname and autobio__ **Рџа№ИЈDISCLAIMERРџа№ИЈ** __USING THIS PLUGIN CAN RESULT IN ACCOUNT BAN. WE ARE NOT RESPONSIBLE FOR YOUR BAN.__ """ } )
rockzy77/catusertbot77
userbot/plugins/autoprofile.py
autoprofile.py
py
14,466
python
en
code
2
github-code
36
20271187203
''' This module provides management methods for the pygame screen ''' import sys import pygame class MetaGame(type): ''' the metaclass for the game class - this implements classproperties on Game ''' @property def clock(cls): ''' produce the game clock ''' return cls._clock @property def screen(cls): ''' get the game screen ''' return cls._screen @screen.setter def screen(cls, value): ''' set the game screen ''' cls._screen = value @property def scenes(cls): ''' get the game scene stack ''' return cls._scenes @scenes.setter def scenes(cls, value): ''' set the game scene stack ''' cls._scenes = value class Game(object, metaclass=MetaGame): ''' manage the pygame screen ''' _clock = None _screen = None _scenes = None _fps_unlocked = False _max_fps = 0 @classmethod def init(cls, title='pygame', max_fps=0): ''' initialize pygame and some other important things ''' # initialize pygame pygame.mixer.pre_init(channels=1) pygame.init() # set window caption pygame.display.set_caption(title) # initialize game clock cls._clock = pygame.time.Clock() cls._max_fps = max_fps @classmethod def main(cls): ''' start the main loop of the game ''' while cls._scenes: # get the scene on top of the scene stack scene = cls._scenes.peek() try: # process events for event in pygame.event.get(): if event.type == pygame.QUIT: return elif event.type == pygame.VIDEORESIZE: cls.screen.resize(event.dict['size']) elif event.type == pygame.KEYUP and event.key == pygame.K_HASH: cls.screen.debug.enabled = not cls.screen.debug.enabled elif event.type == pygame.KEYUP and event.key == pygame.K_EXCLAIM: cls._fps_unlocked = not cls._fps_unlocked else: scene.on_event(event) # update the scenegraph objects and redraw scene.update() # flip the buffers at the given maximum refresh rate cls.screen.flip() cls._clock.tick(0 if cls._fps_unlocked else cls._max_fps) except StopIteration: Game.scenes.pop() @classmethod def quit(cls): ''' quit the game ''' sys.exit(0)
oaken-source/pyablo
pyablo/game.py
game.py
py
2,780
python
en
code
2
github-code
36
34005743410
from torch.utils.data import * from imutils import paths import numpy as np import random import cv2 import os CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑', '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤', '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁', '新', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'I', 'O', '-' ] CHARS_DICT = {char:i for i, char in enumerate(CHARS)} class LPRDataLoader(Dataset): def __init__(self, img_dir, imgSize, lpr_max_len, PreprocFun=None): self.img_dir = img_dir self.img_paths = [] for i in range(len(img_dir)): self.img_paths += [el for el in paths.list_images(img_dir[i])] random.shuffle(self.img_paths) self.img_size = imgSize self.lpr_max_len = lpr_max_len if PreprocFun is not None: self.PreprocFun = PreprocFun else: self.PreprocFun = self.transform def __len__(self): return len(self.img_paths) def __getitem__(self, index): filename = self.img_paths[index] Image = cv2.imread(filename) height, width, _ = Image.shape if height != self.img_size[1] or width != self.img_size[0]: Image = cv2.resize(Image, self.img_size) Image = self.PreprocFun(Image) basename = os.path.basename(filename) imgname, suffix = os.path.splitext(basename) imgname = imgname.split("-")[0].split("_")[0] label = list() for c in imgname: # one_hot_base = np.zeros(len(CHARS)) # one_hot_base[CHARS_DICT[c]] = 1 label.append(CHARS_DICT[c]) if len(label) == 8: if self.check(label) == False: print(imgname) assert 0, "Error label ^~^!!!" return Image, label, len(label) def transform(self, img): img = img.astype('float32') img -= 127.5 img *= 0.0078125 img = np.transpose(img, (2, 0, 1)) return img def check(self, label): if label[2] != CHARS_DICT['D'] and label[2] != CHARS_DICT['F'] \ and label[-1] != CHARS_DICT['D'] and label[-1] != CHARS_DICT['F']: print("Error label, Please check!") return False else: return True
sirius-ai/LPRNet_Pytorch
data/load_data.py
load_data.py
py
2,544
python
en
code
759
github-code
36
138761933
import json def json_read(path): ''' Parses file of json type. ''' with open(path, 'r', encoding='utf-8') as f: text = json.load(f) return text def conti_with_count(): names = json_read('names.json') continents = json_read('continent.json') result = {} for country in continents: if continents[country] not in result: result[continents[country]] = [names[country]] else: result[continents[country]].append(names[country]) return result
AndriiTurko/homeworks_programming
json_make_dict.py
json_make_dict.py
py
534
python
en
code
0
github-code
36
8827426863
from __future__ import absolute_import, division, print_function import os from setuptools import setup HERE = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(HERE, 'README.rst')) as f: README = f.read() setup(name='marv', version='3.2.0', description='MARV framework', long_description=README, classifiers=[ 'Development Status :: 5 - Production/Stable', "Framework :: Flask", 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)', 'Operating System :: POSIX :: Linux', # for now 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 2 :: Only', # for now 'Programming Language :: Python :: Implementation :: CPython', # for now "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", 'Topic :: Scientific/Engineering', ], author='Ternaris', author_email='team@ternaris.com', url='https://github.com/ternaris/marv', license='AGPLv3+', packages=[ 'marv', 'marv.app', 'marv.tests', 'marv_detail', 'marv_node', 'marv_node.testing', 'marv_node.tests', 'marv_nodes', 'marv_pycapnp', 'marv_pycapnp.tests', 'marv_store', 'marv_webapi', ], include_package_data=True, zip_safe=False, test_suite='nose.collector', tests_require=['nose'], install_requires=['Flask-Cors', 'Flask-SQLAlchemy', 'PyJWT', 'bcrypt', 'configparser', 'cython', 'Jinja2>=2.7.3', 'requests-oauthlib', 'pycapnp-for-marv', 'marv-cli'], extras_require={ 'testing': ['coverage', 'ipdb', 'ipdbplugin', 'ipython', 'mock', 'nose', 'testfixtures'], }, entry_points={'marv_cli': ['marv = marv.cli']})
ternaris/marv
setup.py
setup.py
py
2,262
python
en
code
3
github-code
36
11604655493
import ipaddress import json import os import uuid import encryption_helper import phantom.app as phantom import phantom.rules as ph_rules import phantom.utils as ph_utils import requests import xmltodict from phantom.action_result import ActionResult from phantom.base_connector import BaseConnector from requests.auth import AuthBase, HTTPBasicAuth from requests.structures import CaseInsensitiveDict import ews_soap from ewsonprem_consts import * try: from urllib.parse import quote_plus, urlparse except ImportError: from urllib import quote_plus from urlparse import urlparse import base64 import email import quopri import re import time from datetime import datetime, timedelta from email.header import decode_header from email.parser import HeaderParser import outlookmsgfile import six from bs4 import BeautifulSoup, UnicodeDammit from process_email import ProcessEmail from request_handler import RequestStateHandler # noqa from request_handler import _get_dir_name_from_app_name app_dir = os.path.dirname(os.path.abspath(__file__)) os.sys.path.insert(0, '{}/dependencies/ews_dep'.format(app_dir)) # noqa class RetVal3(tuple): def __new__(cls, val1, val2=None, val3=None): return tuple.__new__(RetVal3, (val1, val2, val3)) class RetVal2(tuple): def __new__(cls, val1, val2=None): return tuple.__new__(RetVal2, (val1, val2)) class OAuth2TokenAuth(AuthBase): def __init__(self, token, token_type="Bearer"): self._token = token self._token_type = token_type def __call__(self, r): # modify and return the request r.headers['Authorization'] = "{0} {1}".format(self._token_type, self._token) return r class EWSOnPremConnector(BaseConnector): # actions supported by this script ACTION_ID_RUN_QUERY = "run_query" ACTION_ID_DELETE_EMAIL = "delete_email" ACTION_ID_UPDATE_EMAIL = "update_email" ACTION_ID_COPY_EMAIL = "copy_email" ACTION_ID_MOVE_EMAIL = "move_email" ACTION_ID_BLOCK_SENDER = "block_sender" ACTION_ID_UNBLOCK_SENDER = "unblock_sender" ACTION_ID_EXPAND_DL = "expand_dl" ACTION_ID_RESOLVE_NAME = "resolve_name" ACTION_ID_ON_POLL = "on_poll" ACTION_ID_GET_EMAIL = "get_email" ACTION_ID_TRACE_EMAIL = "trace_email" REPLACE_CONST = "C53CEA8298BD401BA695F247633D0542" # pragma: allowlist secret def __init__(self): """ """ self.__id_to_name = {} # Call the BaseConnectors init first super(EWSOnPremConnector, self).__init__() self._session = None # Target user in case of impersonation self._target_user = None self._state_file_path = None self._state = {} self._headers = None self._base_url = None self._host = None self._impersonate = False self._less_data = False self._dup_data = 0 self._is_token_test_connectivity = False self._is_client_id_changed = False self._timeout = None self.auth_type = None self.rsh = None self._skipped_emails = 0 def _handle_preprocess_scipts(self): config = self.get_config() script = config.get('preprocess_script') self._preprocess_container = lambda x: x if script: try: # Try to load in script to preprocess artifacts import importlib.util preprocess_methods = importlib.util.spec_from_loader('preprocess_methods', loader=None) self._script_module = importlib.util.module_from_spec(preprocess_methods) exec(script, self._script_module.__dict__) except Exception as e: self.save_progress("Error loading custom script. Error: {}".format(str(e))) return self.set_status(phantom.APP_ERROR, EWSONPREM_CONNECTIVITY_TEST_ERROR) try: self._preprocess_container = self._script_module.preprocess_container except Exception: self.save_progress("Error loading custom script. Does not contain preprocess_container function") return self.set_status(phantom.APP_ERROR, EWSONPREM_CONNECTIVITY_TEST_ERROR) return phantom.APP_SUCCESS def _get_ping_fed_request_xml(self, config): try: dt_now = datetime.utcnow() dt_plus = dt_now + timedelta(minutes=10) dt_now_str = "{0}Z".format(dt_now.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]) dt_plus_str = "{0}Z".format(dt_plus.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]) ret_val = EWS_FED_REQUEST_XML.format( ping_url=config[EWS_JSON_FED_PING_URL].split('?')[0], created_date=dt_now_str, expiry_date=dt_plus_str, username=config[phantom.APP_JSON_USERNAME], password=config[phantom.APP_JSON_PASSWORD] ) except Exception as e: return None, "Unable to create request xml data. Error: {0}".format(self._get_error_message_from_exception(e)) return ret_val, "Done" def _set_federated_auth(self, config): ret_val, message = self._check_password(config) if phantom.is_fail(ret_val): self.save_progress(message) return None, message required_params = [EWS_JSON_CLIENT_ID, EWS_JSON_FED_PING_URL, EWS_JSON_AUTH_URL, EWS_JSON_FED_VERIFY_CERT] for required_param in required_params: if required_param not in config: return None, "ERROR: {0} is a required parameter for Azure/Federated Authentication, please specify one.".format(required_param) client_id = config[EWS_JSON_CLIENT_ID] # create the xml request that we need to send to the ping fed fed_request_xml, message = self._get_ping_fed_request_xml(config) if fed_request_xml is None: return None, message # Now create the request to the server headers = {'Content-Type': 'application/soap_xml; charset=utf8'} url = config[EWS_JSON_FED_PING_URL] # POST the request try: r = requests.post( url, data=fed_request_xml, headers=headers, verify=config[EWS_JSON_FED_VERIFY_CERT], timeout=self._timeout ) except Exception as e: return None, "Unable to send POST to ping url: {0}, Error: {1}".format(url, self._get_error_message_from_exception(e)) if r.status_code != 200: return None, "POST to ping url failed. Status Code: {0}".format(r.status_code) # process the xml response xml_response = r.text start_pos = xml_response.find('<saml:Assertion') end_pos = xml_response.find('</saml:Assertion>') + len('</saml:Assertion>') # validate that the saml assertion is present if start_pos == -1 or end_pos == -1: return None, "Could not find Saml Assertion" saml_assertion = xml_response[start_pos:end_pos] # base64 encode the assertion saml_assertion_encoded = base64.encodebytes(saml_assertion.encode('utf8')) # Now work on sending th assertion, to get the token url = '{0}/oauth2/token'.format(config[EWS_JSON_AUTH_URL]) # headers client_req_id = str(uuid.uuid4()) headers = {'Accept': 'application/json', 'client-request-id': client_req_id, 'return-client-request-id': 'True'} # URL parsed_auth_url = urlparse(self._base_url) # Form data data = { 'resource': '{0}://{1}'.format(parsed_auth_url.scheme, parsed_auth_url.netloc), 'client_id': client_id, 'grant_type': 'urn:ietf:params:oauth:grant-type:saml1_1-bearer', 'assertion': saml_assertion_encoded, 'scope': 'openid' } try: r = requests.post(url, data=data, headers=headers, timeout=self._timeout) except Exception as e: return None, "Failed to acquire token. POST request failed for {0}, Error: {1}".format( url, self._get_error_message_from_exception(e)) if r.status_code != 200: return None, "POST to office365 url failed. Status Code: {0}".format(r.status_code) resp_json = None try: resp_json = r.json() except Exception as e: return None, "Unable to parse auth token response as JSON. Error: {0}".format(self._get_error_message_from_exception(e)) if 'token_type' not in resp_json: return None, "token_type not found in response from server" if 'access_token' not in resp_json: return None, "token not found in response from server" self.save_progress("Got Access Token") return OAuth2TokenAuth(resp_json['access_token'], resp_json['token_type']), "" def _make_rest_calls_to_phantom(self, action_result, url): # Ignored the verify semgrep check as the following is a call to the phantom's REST API on the instance itself r = requests.get(url, verify=False) # nosemgrep if not r: message = 'Status Code: {0}'.format(r.status_code) if r.text: message = "{} Error from Server: {}".format(message, r.text.replace('{', '{{').replace('}', '}}')) return action_result.set_status(phantom.APP_ERROR, "Error retrieving system info, {0}".format(message)), None try: resp_json = r.json() except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Error processing response JSON", e), None return phantom.APP_SUCCESS, resp_json def _get_phantom_base_url_ews(self, action_result): ret_val, resp_json = self._make_rest_calls_to_phantom(action_result, '{}rest/system_info'.format(self.get_phantom_base_url())) if phantom.is_fail(ret_val): return action_result.get_status(), None phantom_base_url = resp_json.get('base_url') if not phantom_base_url: return action_result.set_status( phantom.APP_ERROR, "Phantom Base URL is not configured, please configure it in System Settings"), None phantom_base_url = phantom_base_url.strip("/") return phantom.APP_SUCCESS, phantom_base_url def _get_asset_name(self, action_result): ret_val, resp_json = self._make_rest_calls_to_phantom( action_result, '{}rest/asset/{}'.format(self.get_phantom_base_url(), self.get_asset_id())) if phantom.is_fail(ret_val): return action_result.get_status(), None asset_name = resp_json.get('name') if not asset_name: return action_result.set_status(phantom.APP_ERROR, "Error retrieving asset name"), None return phantom.APP_SUCCESS, asset_name def _get_url_to_app_rest(self, action_result=None): if not action_result: action_result = ActionResult() # get the phantom ip to redirect to ret_val, phantom_base_url = self._get_phantom_base_url_ews(action_result) if phantom.is_fail(ret_val): return action_result.get_status(), action_result.get_message() # get the asset name ret_val, asset_name = self._get_asset_name(action_result) if phantom.is_fail(ret_val): return action_result.get_status(), action_result.get_message() self.save_progress('Using Phantom base URL as: {0}'.format(phantom_base_url)) app_json = self.get_app_json() app_name = app_json['name'] app_dir_name = _get_dir_name_from_app_name(app_name) url_to_app_rest = "{0}/rest/handler/{1}_{2}/{3}".format(phantom_base_url, app_dir_name, app_json['appid'], asset_name) return phantom.APP_SUCCESS, url_to_app_rest def _azure_int_auth_initial(self, client_id, client_secret): state = self.rsh.load_state() asset_id = self.get_asset_id() ret_val, message = self._get_url_to_app_rest() if phantom.is_fail(ret_val): return None, message app_rest_url = message request_url = 'https://login.microsoftonline.com/common/oauth2' proxy = {} if 'HTTP_PROXY' in os.environ: proxy['http'] = os.environ.get('HTTP_PROXY') if 'HTTPS_PROXY' in os.environ: proxy['https'] = os.environ.get('HTTPS_PROXY') state['proxy'] = proxy state['client_id'] = client_id state['redirect_url'] = app_rest_url state['request_url'] = request_url # This handling is for the python version 3, working fine with both the python version 2 and 3 client_secret = client_secret.encode('ascii') client_secret = base64.b64encode(client_secret) state['client_secret'] = client_secret.decode('ascii') self.rsh.save_state(state) self.save_progress("Redirect URI: {}".format(app_rest_url)) params = { 'response_type': 'code', 'response_mode': 'query', 'client_id': client_id, 'state': asset_id, 'redirect_uri': app_rest_url } url = requests.Request('GET', '{}/authorize'.format(request_url), params=params).prepare().url url = '{}&'.format(url) self.save_progress("To continue, open this link in a new tab in your browser") self.save_progress(url) for _ in range(0, 60): time.sleep(5) state = self.rsh.load_state() oauth_token = state.get('oauth_token') if oauth_token: break elif state.get('error'): self._reset_the_state() return None, "Error retrieving OAuth token" else: return None, "Timed out waiting for login" self._state['oauth_token'] = oauth_token # NOTE: This state is in the app directory, it is # different from the app state (i.e. self._state) self.rsh.delete_state() return OAuth2TokenAuth(oauth_token['access_token'], oauth_token['token_type']), "" def _azure_int_auth_refresh(self, client_id, client_secret): oauth_token = self._state.get('oauth_token') if not (oauth_token and oauth_token.get("refresh_token")): self._reset_the_state() return None, "Unable to get refresh token. Please run Test Connectivity again" if client_id != self._state.get('client_id', ''): self._reset_the_state() return None, "Client ID has been changed. Please run Test Connectivity again" refresh_token = oauth_token['refresh_token'] request_url = 'https://login.microsoftonline.com/common/oauth2/token' body = { 'grant_type': 'refresh_token', 'resource': 'https://outlook.office365.com/', 'client_id': client_id, 'refresh_token': refresh_token, 'client_secret': client_secret } try: r = requests.post(request_url, data=body, timeout=self._timeout) except Exception as e: return None, "Error refreshing token: {}".format(str(e)) try: oauth_token = r.json() if "error" in oauth_token: if oauth_token["error"] in EWS_ASSET_PARAM_CHECK_LIST_ERRORS: self._reset_the_state() return None, oauth_token["error_description"] except Exception: return None, "Error retrieving OAuth Token" self._state['oauth_token'] = oauth_token return OAuth2TokenAuth(oauth_token['access_token'], oauth_token['token_type']), "" def _set_azure_int_auth(self, config): client_id = config.get(EWS_JSON_CLIENT_ID) client_secret = config.get(EWS_JSON_CLIENT_SECRET) if not client_id: return None, "ERROR: {0} is a required parameter for Azure Authentication, please specify one.".format(EWS_JSON_CLIENT_ID) if not client_secret: return None, "ERROR: {0} is a required parameter for Azure Authentication, please specify one.".format(EWS_JSON_CLIENT_SECRET) if self.get_action_identifier() != phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY: self.debug_print("Try to generate token from refresh token") ret = self._azure_int_auth_refresh(client_id, client_secret) else: self.debug_print("Try to generate token from authorization code") ret = self._azure_int_auth_initial(client_id, client_secret) if ret[0]: self._state['client_id'] = client_id return ret def _get_domain(self, username, client_req_id): """ This method is used to obtain domain from the username. :param username: Username :param client_req_id: Request ID :return: status, domain/message """ headers = {'Accept': 'application/json', 'client-request-id': client_req_id, 'return-client-request-id': 'True'} url = "{0}/common/UserRealm/{1}".format(EWS_LOGIN_URL, username) params = {'api-version': '1.0'} try: r = self._session.get(url, params=params, headers=headers, timeout=self._timeout) except Exception as e: return phantom.APP_ERROR, str(e) if r.status_code != 200: return phantom.APP_ERROR, r.text resp_json = None try: resp_json = r.json() except Exception as e: return phantom.APP_ERROR, str(e) domain = resp_json.get('domain_name') if not domain: return phantom.APP_ERROR, "Did not find domain in response. Cannot continue" return phantom.APP_SUCCESS, domain def _set_header_for_rest_call(self, config): """This function is used to update the headers with access_token before making REST call.""" if self.get_action_identifier() != phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY: resp_json = None if self._state.get("oauth_token", {}): resp_json = self._state.get("oauth_token", {}) if self._state.get("oauth_client_token", {}): resp_json = self._state.get("oauth_client_token", {}) if resp_json: self._session.auth = OAuth2TokenAuth(resp_json['access_token'], resp_json['token_type']) elif self.get_action_identifier() == phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY and not self._is_token_test_connectivity: self._is_token_test_connectivity = True return self.set_authentication_method(config) return phantom.APP_SUCCESS, "" def _set_azure_auth(self, config): ret_val, message = self._check_password(config) if phantom.is_fail(ret_val): self.save_progress(message) return None, message username = config[phantom.APP_JSON_USERNAME] password = config[phantom.APP_JSON_PASSWORD] client_id = config.get(EWS_JSON_CLIENT_ID) client_secret = config.get(EWS_JSON_CLIENT_SECRET) if not client_id: return None, "ERROR: {0} is a required parameter for Azure Authentication, please specify one.".format(EWS_JSON_CLIENT_ID) if not client_secret: return None, "ERROR: {0} is a required parameter for Azure Authentication, please specify one.".format(EWS_JSON_CLIENT_SECRET) oauth_token = self._state.get('oauth_token') is_oauth_token = oauth_token and oauth_token.get("access_token") and oauth_token.get("refresh_token") if self.get_action_identifier() != phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY and is_oauth_token and not self._is_client_id_changed: self.debug_print("Try to generate token from refresh token") ret = self._azure_int_auth_refresh(client_id, client_secret) return ret client_req_id = str(uuid.uuid4()) ret_val, domain = self._get_domain(username, client_req_id) if phantom.is_fail(ret_val): return None, domain headers = {'client-request-id': client_req_id, 'return-client-request-id': 'True'} url = "{0}/{1}/oauth2/token".format(EWS_LOGIN_URL, domain) params = None parsed_base_url = urlparse(self._base_url) data = { 'resource': '{0}://{1}'.format(parsed_base_url.scheme, parsed_base_url.netloc), 'client_id': client_id, 'username': username, 'password': password, 'grant_type': 'password', 'scope': 'openid', 'client_secret': client_secret } try: r = self._session.post(url, params=params, headers=headers, data=data, verify=True, timeout=self._timeout) except Exception as e: return None, str(e) if r.status_code != 200: return None, self._extract_error(r) resp_json = None try: resp_json = r.json() except Exception as e: return None, str(e) if 'token_type' not in resp_json: return None, "token_type not found in response from server" if 'access_token' not in resp_json: return None, "token not found in response from server" self._state["oauth_token"] = resp_json self._state['client_id'] = client_id self.save_progress("Got Access Token") return OAuth2TokenAuth(resp_json['access_token'], resp_json['token_type']), "" def _check_password(self, config): if phantom.APP_JSON_PASSWORD not in list(config.keys()): return phantom.APP_ERROR, "Password not present in asset configuration" return phantom.APP_SUCCESS, '' def _validate_integer(self, action_result, parameter, key, allow_zero=False): try: if not float(parameter).is_integer(): return action_result.set_status( phantom.APP_ERROR, "Please provide a valid integer value in the '{0}' parameter".format(key)), None parameter = int(parameter) except Exception: return action_result.set_status(phantom.APP_ERROR, "Please provide a valid integer value in the '{0}' parameter".format(key)), None if not allow_zero and parameter <= 0: return action_result.set_status( phantom.APP_ERROR, "Please provide a non-zero positive integer in the '{0}' parameter".format(key)), None elif allow_zero and parameter < 0: return action_result.set_status( phantom.APP_ERROR, "Please provide a valid non-negative integer value in the '{0}' parameter".format(key)), None return phantom.APP_SUCCESS, parameter def _get_error_message_from_exception(self, e): """ This method is used to get appropriate error message from the exception. :param e: Exception object :return: error message """ error_code = None error_message = "Error message unavailable. Please check the asset configuration and|or action parameters." self.error_print("Error occurred.", e) try: if hasattr(e, "args"): if len(e.args) > 1: error_code = e.args[0] error_message = e.args[1] elif len(e.args) == 1: error_message = e.args[0] except Exception as e: self.error_print("Error occurred while fetching exception information. Details: {}".format( self._get_error_message_from_exception(e))) if not error_code: error_text = "Error Message: {}".format(error_message) else: error_text = "Error Code: {}. Error Message: {}".format(error_code, error_message) return error_text def _get_string(self, input_str, charset): try: if input_str: input_str = UnicodeDammit(input_str).unicode_markup.encode(charset).decode(charset) except Exception: self.debug_print("Error occurred while converting to string with specific encoding") return input_str def _is_ip(self, input_ip_address): """ Function that checks given address and return True if address is valid IPv4 or IPV6 address. :param input_ip_address: IP address :return: status (success/failure) """ try: ipaddress.ip_address(input_ip_address) except Exception: return False return True def _extract_error(self, r): """ This method generates an error message from the error response. :param r: Response object :return: error message """ try: error_json = r.json() error = error_json["error"] if error in EWS_ASSET_PARAM_CHECK_LIST_ERRORS: self._reset_the_state() error_desc = error_json["error_description"] error_text = "An error occurred. Error: {}, description: {}".format(error, error_desc) return error_text except Exception: return r.text def _set_client_cred_auth(self, config): """ This method generates OAuth token using the client credentials grant. :param config: Dictionary of asset configuration variables :return: An OAuth2TokenAuth object in case of success otherwise, an error message """ oauth_token = self._state.get("oauth_client_token", {}) if self.get_action_identifier() != phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY and oauth_token: if oauth_token.get('access_token') and oauth_token.get('token_type'): return OAuth2TokenAuth(oauth_token['access_token'], oauth_token['token_type']), "" client_id = config.get("client_id") client_secret = config.get("client_secret") if not (client_id and client_secret): return None, MISSING_CLIENT_CREDS client_req_id = str(uuid.uuid4()) username = config[phantom.APP_JSON_USERNAME] ret_val, domain = self._get_domain(username, client_req_id) if phantom.is_fail(ret_val): return None, domain url = "{0}/{1}/oauth2/token".format(EWS_LOGIN_URL, domain) parsed_base_url = urlparse(self._base_url) headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'application/json'} data = { "client_id": client_id, "client_secret": client_secret, "grant_type": "client_credentials", "resource": "{0}://{1}".format(parsed_base_url.scheme, parsed_base_url.netloc), } self.debug_print("Requesting a new token for OAuth client credentials authentication") try: r = self._session.post(url, headers=headers, data=data, verify=True, timeout=self._timeout) except Exception as e: self._state.pop("oauth_client_token", None) return None, str(e) if r.status_code != 200: return None, self._extract_error(r) oauth_token = None try: oauth_token = r.json() except Exception as e: return None, str(e) self.save_progress("Received access token") self._state['oauth_client_token'] = oauth_token self._state['client_id'] = client_id return OAuth2TokenAuth(oauth_token['access_token'], oauth_token['token_type']), "" def _encrypt_client_token(self, state): """ This method encrypts the oauth client token. :param config: State dictionary :return: Encrypted state """ try: if "oauth_client_token" in state and self.auth_type == AUTH_TYPE_CLIENT_CRED: self.debug_print("Encrypting the oauth client token") token = state["oauth_client_token"] state["oauth_client_token"]["access_token"] = encryption_helper.encrypt(token["access_token"], self.get_asset_id()) except Exception as e: self.debug_print("Error occurred while encrypting the token: {}. Deleting the token".format( self._get_error_message_from_exception(e))) state.pop("oauth_client_token", None) return state def _decrypt_client_token(self, state): """ This method decrypts the oauth client token. :param config: State dictionary :return: Decrypted state """ try: if "oauth_client_token" in state: self.debug_print("Decrypting the oauth client token") token = encryption_helper.decrypt(state["oauth_client_token"]["access_token"], self.get_asset_id()) state["oauth_client_token"]["access_token"] = token except Exception as e: self.debug_print("Error occurred while decrypting the token: {}. Deleting the token".format( self._get_error_message_from_exception(e))) state.pop("oauth_client_token", None) return state def finalize(self): if self.auth_type == AUTH_TYPE_CLIENT_CRED: self._state = self._encrypt_client_token(self._state) else: self._state = self.rsh._encrypt_state(self._state) self.save_state(self._state) return phantom.APP_SUCCESS def _clean_the_state(self): """ This method cleans the state. """ self.debug_print("Cleaning the state") if self.auth_type != AUTH_TYPE_CLIENT_CRED: self._state.pop("oauth_client_token", None) if self.auth_type not in (AUTH_TYPE_AZURE_INTERACTIVE, AUTH_TYPE_AZURE): self._state.pop("oauth_token", None) def _reset_the_state(self): self.debug_print("Resetting the state file") self._state = {"app_version": self.get_app_json().get("app_version")} def initialize(self): """ Called once for every action, all member initializations occur here""" config = self.get_config() self.auth_type = config.get(EWS_JSON_AUTH_TYPE, AUTH_TYPE_AZURE) self.rsh = RequestStateHandler(self.get_asset_id()) self._state = self.load_state() if not isinstance(self._state, dict): self.debug_print("Resetting the state file with the default format") self._state = {"app_version": self.get_app_json().get("app_version")} if self.auth_type == AUTH_TYPE_AZURE_INTERACTIVE: return self.set_status(phantom.APP_ERROR, EWSONPREM_STATE_FILE_CORRUPT_ERROR) if self.auth_type == AUTH_TYPE_CLIENT_CRED: self._state = self._decrypt_client_token(self._state) else: self._state, message = self.rsh._decrypt_state(self._state) if message: return self.set_status(phantom.APP_ERROR, message) # The headers, initialize them here once and use them for all other REST calls self._headers = {'Content-Type': 'text/xml; charset=utf-8', 'Accept': 'text/xml'} self._session = requests.Session() self._base_url = config[EWSONPREM_JSON_DEVICE_URL] message = '' self._clean_the_state() is_oauth_token_exist = self.auth_type in [AUTH_TYPE_AZURE, AUTH_TYPE_AZURE_INTERACTIVE] and \ not self._state.get("oauth_token", {}).get("access_token") is_oauth_client_token_exist = self.auth_type == AUTH_TYPE_CLIENT_CRED and \ not self._state.get("oauth_client_token", {}).get("access_token") self._is_client_id_changed = (self._state.get('client_id') and config.get("client_id")) and \ self._state.get('client_id') != config.get("client_id") if self._is_client_id_changed or is_oauth_token_exist or is_oauth_client_token_exist: self._is_token_test_connectivity = self.get_action_identifier() == phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY ret, message = self.set_authentication_method(config) if phantom.is_fail(ret): return self.set_status(ret, message) if self._base_url.endswith('/'): self._base_url = self._base_url[:-1] # The host member extracts the host from the URL, is used in creating status messages self._host = self._base_url[self._base_url.find('//') + 2:] self._impersonate = config[EWS_JSON_USE_IMPERSONATE] ret = self._handle_preprocess_scipts() if phantom.is_fail(ret): return ret self.set_validator('ipv6', self._is_ip) self._timeout = config.get("timeout", DEFAULT_REQUEST_TIMEOUT) ret_val, self._timeout = self._validate_integer(self, self._timeout, "Request Timeout") if phantom.is_fail(ret_val): return self.get_status() return phantom.APP_SUCCESS def _get_error_details(self, resp_json): """ Function that parses the error json received from the device and placed into a json""" error_details = {"message": "Not Found", "code": "Not supplied"} if not resp_json: return error_details error_details['message'] = resp_json.get('m:MessageText', 'Not Specified') error_details['code'] = resp_json.get('m:ResponseCode', 'Not Specified') return error_details def _create_aqs(self, subject, sender, body): aqs = {'subject': subject, 'from': sender, 'body': body} return ' '.join('{}:"{}"'.format(key, value) for key, value in aqs.items() if value) # TODO: Should change these function to be parameterized, instead of one per type of request def _check_get_attachment_response(self, resp_json): resp_body = resp_json['s:Envelope']['s:Body'] return resp_body['m:GetAttachmentResponse']['m:ResponseMessages']['m:GetAttachmentResponseMessage'] def _check_getitem_response(self, resp_json): resp_body = resp_json['s:Envelope']['s:Body'] return resp_body['m:GetItemResponse']['m:ResponseMessages']['m:GetItemResponseMessage'] def _check_find_response(self, resp_json): resp_body = resp_json['s:Envelope']['s:Body'] return resp_body['m:FindItemResponse']['m:ResponseMessages']['m:FindItemResponseMessage'] def _check_delete_response(self, resp_json): resp_body = resp_json['s:Envelope']['s:Body'] return resp_body['m:DeleteItemResponse']['m:ResponseMessages']['m:DeleteItemResponseMessage'] def _check_update_response(self, resp_json): resp_body = resp_json['s:Envelope']['s:Body'] return resp_body['m:UpdateItemResponse']['m:ResponseMessages']['m:UpdateItemResponseMessage'] def _check_copy_response(self, resp_json): resp_body = resp_json['s:Envelope']['s:Body'] return resp_body['m:CopyItemResponse']['m:ResponseMessages']['m:CopyItemResponseMessage'] def _check_markasjunk_response(self, resp_json): resp_body = resp_json['s:Envelope']['s:Body'] return resp_body['m:MarkAsJunkResponse']['m:ResponseMessages']['m:MarkAsJunkResponseMessage'] def _check_move_response(self, resp_json): resp_body = resp_json['s:Envelope']['s:Body'] return resp_body['m:MoveItemResponse']['m:ResponseMessages']['m:MoveItemResponseMessage'] def _check_expand_dl_response(self, resp_json): resp_body = resp_json['s:Envelope']['s:Body'] return resp_body['m:ExpandDLResponse']['m:ResponseMessages']['m:ExpandDLResponseMessage'] def _check_findfolder_response(self, resp_json): resp_body = resp_json['s:Envelope']['s:Body'] return resp_body['m:FindFolderResponse']['m:ResponseMessages']['m:FindFolderResponseMessage'] def _check_getfolder_response(self, resp_json): resp_body = resp_json['s:Envelope']['s:Body'] return resp_body['m:GetFolderResponse']['m:ResponseMessages']['m:GetFolderResponseMessage'] def _check_resolve_names_response(self, resp_json): resp_body = resp_json['s:Envelope']['s:Body'] return resp_body['m:ResolveNamesResponse']['m:ResponseMessages']['m:ResolveNamesResponseMessage'] def _parse_fault_node(self, result, fault_node): fault_code = fault_node.get('faultcode', {}).get('#text', 'Not specified') fault_string = fault_node.get('faultstring', {}).get('#text', 'Not specified') return result.set_status(phantom.APP_ERROR, 'Error occurred, Code: {0} Detail: {1}'.format(fault_code, fault_string)) def _clean_xml(self, input_xml): # But before we do that clean up the xml, # MS is known to send invalid xml chars, # that its own msxml library deems as invalid # https://support.microsoft.com/en-us/kb/315580 replace_regex = r"&#x([0-8]|[b-cB-C]|[e-fE-F]|1[0-9]|1[a-fA-F]);" clean_xml, number_of_substitutes = re.subn(replace_regex, '', input_xml) self.debug_print("Cleaned xml with {0} substitutions".format(number_of_substitutes)) return clean_xml def _get_http_error_details(self, r): if 'text/xml' in r.headers.get('Content-Type', ''): # Try a xmltodict parse try: resp_json = xmltodict.parse(self._clean_xml(r.text)) # convert from OrderedDict to plain dict resp_json = json.loads(json.dumps(resp_json)) except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Error occurred while parsing the HTTP error response. {0}".format(error_text)) return "Unable to parse error details" try: return resp_json['s:Envelope']['s:Body']['s:Fault']['detail']['e:Message']['#text'] except Exception: pass return "" def set_authentication_method(self, config): "Method for setting authentication" if self.auth_type == AUTH_TYPE_AZURE: self.save_progress("Using Azure AD authentication") self._session.auth, message = self._set_azure_auth(config) elif self.auth_type == AUTH_TYPE_AZURE_INTERACTIVE: self.save_progress("Using Azure AD authentication (interactive)") self._session.auth, message = self._set_azure_int_auth(config) elif self.auth_type == AUTH_TYPE_FEDERATED: self.save_progress("Using Federated authentication") self._session.auth, message = self._set_federated_auth(config) elif self.auth_type == AUTH_TYPE_CLIENT_CRED: self._state = self._decrypt_client_token(self._state) self.save_progress("Using Client credentials authentication") self._session.auth, message = self._set_client_cred_auth(config) else: # Make sure username and password are set ret_val, message = self._check_password(config) if phantom.is_fail(ret_val): self.save_progress(message) return ret_val password = config[phantom.APP_JSON_PASSWORD] username = config[phantom.APP_JSON_USERNAME] username = username.replace('/', '\\') self._session.auth = HTTPBasicAuth(username, password) self.save_progress("Using HTTP Basic authentication") if not self._session.auth: return phantom.APP_ERROR, message return phantom.APP_SUCCESS, "" def _make_rest_call(self, result, data, check_response, data_string=False): """ Function that makes the REST call to the device, generic function that can be called from various action handlers Needs to return two values, 1st the phantom.APP_[SUCCESS|ERROR], 2nd the response """ config = self.get_config() resp_json = None ret, message = self._set_header_for_rest_call(config) if phantom.is_fail(ret): return result.set_status(ret, message), resp_json if self._impersonate and (not self._target_user): return result.set_status(phantom.APP_ERROR, "Impersonation is required, but target user not set. Cannot continue execution"), None if self._impersonate: data = ews_soap.add_to_envelope(data, self._target_user) else: data = ews_soap.add_to_envelope(data) data = ews_soap.get_string(data) # Make the call try: r = self._session.post(self._base_url, data=data, headers=self._headers, timeout=self._timeout, verify=True) except Exception as e: error_text = self._get_error_message_from_exception(e) return result.set_status(phantom.APP_ERROR, EWSONPREM_SERVER_CONNECTIVITY_ERROR, error_text), resp_json if hasattr(result, 'add_debug_data'): result.add_debug_data({'r_status_code': r.status_code}) result.add_debug_data({'r_text': r.text if r else 'r is None'}) result.add_debug_data({'r_headers': r.headers}) if r.status_code == 401: if self.auth_type == AUTH_TYPE_CLIENT_CRED: self._state.pop("oauth_client_token", None) ret, message = self.set_authentication_method(config) if phantom.is_fail(ret): return result.set_status(ret, message), resp_json try: r = self._session.post(self._base_url, data=data, headers=self._headers, timeout=self._timeout, verify=True) except Exception as e: error_text = self._get_error_message_from_exception(e) return result.set_status(phantom.APP_ERROR, EWSONPREM_SERVER_CONNECTIVITY_ERROR, error_text), resp_json if not (200 <= r.status_code <= 399): # error detail = self._get_http_error_details(r) if r.status_code == 401: detail = "{0}. {1}".format(detail, EWS_MODIFY_CONFIG) message = "Call failed with HTTP Code: {0}".format(r.status_code) if r.reason: message = "{}. Reason: {}".format(message, r.reason) if detail: message = "{}. Details: {}".format(message, detail) return result.set_status(phantom.APP_ERROR, message), None # Try a xmltodict parse try: resp_json = xmltodict.parse(self._clean_xml(r.text)) # convert from OrderedDict to plain dict resp_json = json.loads(json.dumps(resp_json)) except Exception as e: # r.text is guaranteed to be NON None, it will be empty, but not None msg_string = EWSONPREM_JSON_PARSE_ERROR.format(raw_text=r.text) error_text = self._get_error_message_from_exception(e) return result.set_status(phantom.APP_ERROR, msg_string, error_text), resp_json # Check if there is a fault node present fault_node = resp_json.get('s:Envelope', {}).get('s:Body', {}).get('s:Fault') if fault_node: return self._parse_fault_node(result, fault_node), None # Now try getting the response message try: resp_message = check_response(resp_json) except Exception as e: msg_string = EWSONPREM_JSON_PARSE_ERROR.format(raw_text=r.text) error_text = self._get_error_message_from_exception(e) return result.set_status(phantom.APP_ERROR, msg_string, error_text), resp_json if not isinstance(resp_message, dict): return phantom.APP_SUCCESS, resp_message resp_class = resp_message.get('@ResponseClass', '') if resp_class == 'Error': return result.set_status(phantom.APP_ERROR, EWSONPREM_FROM_SERVER_ERROR.format(**(self._get_error_details(resp_message)))), resp_json return phantom.APP_SUCCESS, resp_message def _test_connectivity(self, param): """ Function that handles the test connectivity action, it is much simpler than other action handlers.""" # Connectivity self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host) action_result = self.add_action_result(ActionResult(dict(param))) ret_val, email_infos = self._get_email_infos_to_process(0, 1, action_result) # Process errors if phantom.is_fail(ret_val): # Dump error messages in the log self.debug_print(action_result.get_message()) # action_result.append_to_message(EWS_MODIFY_CONFIG) # Set the status of the complete connector result action_result.set_status(phantom.APP_ERROR, action_result.get_message()) # Append the message to display self.save_progress(EWSONPREM_CONNECTIVITY_TEST_ERROR) # return error return phantom.APP_ERROR # Set the status of the connector result self.save_progress(EWSONPREM_CONNECTIVITY_TEST_SUCCESS) return action_result.set_status(phantom.APP_SUCCESS) def _get_child_folder_infos(self, user, action_result, parent_folder_info): step_size = 500 folder_infos = list() for curr_step_value in range(0, 10000, step_size): curr_range = "{0}-{1}".format(curr_step_value, curr_step_value + step_size - 1) input_xml = ews_soap.xml_get_children_info(user, parent_folder_id=parent_folder_info['id'], query_range=curr_range) ret_val, resp_json = self._make_rest_call(action_result, input_xml, self._check_findfolder_response) if phantom.is_fail(ret_val): return action_result.get_status(), None total_items = resp_json.get('m:RootFolder', {}).get('@TotalItemsInView', '0') if total_items == '0': # total_items gives the total items in the view, not just items returned in the current call return action_result.set_status(phantom.APP_ERROR, "Children not found, possibly not present."), None folders = [] if resp_json.get('m:RootFolder', {}).get('t:Folders', {}): folders_list = resp_json.get('m:RootFolder', {}).get('t:Folders', {}).get('t:Folder', []) if not isinstance(folders_list, list): folders_list = [folders_list] folders.extend(folders_list) search_folders_list = resp_json.get('m:RootFolder', {}).get('t:Folders', {}).get('t:SearchFolder', []) if not isinstance(search_folders_list, list): search_folders_list = [search_folders_list] folders.extend(search_folders_list) if not folders: return action_result.set_status(phantom.APP_ERROR, "Folder information not found in response, possibly not present"), None folder_infos.extend([{ 'id': x['t:FolderId']['@Id'], 'display_name': x['t:DisplayName'], 'children_count': x['t:ChildFolderCount'], 'folder_path': self._extract_folder_path(x.get('t:ExtendedProperty'))} for x in folders]) curr_folder_len = len(folders) if curr_folder_len < step_size: # got less than what we asked for, so looks like we got all that we wanted break ''' for folder_info in folder_infos: if (int(folder_info['children_count']) <= 0): continue curr_ar = ActionResult() ret_val, child_folder_infos = self._get_child_folder_infos(user, curr_ar, folder_info) if (ret_val): folder_infos.extend(child_folder_infos) ''' return phantom.APP_SUCCESS, folder_infos def _cleanse_key_names(self, input_dict): if not input_dict: return input_dict if not isinstance(input_dict, dict): return input_dict for k, v in list(input_dict.items()): if k.find(':') != -1: new_key = k.replace(':', '_') input_dict[new_key] = v del input_dict[k] if isinstance(v, dict): input_dict[new_key] = self._cleanse_key_names(v) if isinstance(v, list): new_v = [] for curr_v in v: new_v.append(self._cleanse_key_names(curr_v)) input_dict[new_key] = new_v return input_dict def _validate_range(self, email_range, action_result): try: mini, maxi = (int(x) for x in email_range.split('-')) except Exception: return action_result.set_status(phantom.APP_ERROR, "Unable to parse the range. Please specify the range as min_offset-max_offset") if mini < 0 or maxi < 0: return action_result.set_status(phantom.APP_ERROR, "Invalid min or max offset value specified in range") if mini > maxi: return action_result.set_status(phantom.APP_ERROR, "Invalid range value, min_offset greater than max_offset") if maxi > EWSONPREM_MAX_END_OFFSET_VAL: return action_result.set_status( phantom.APP_ERROR, "Invalid range value. The max_offset value cannot be greater than {0}".format(EWSONPREM_MAX_END_OFFSET_VAL)) return phantom.APP_SUCCESS def _process_query(self, action_result, params, flag=False): subject = params.get("subject") sender = params.get("sender") body = params.get("body") int_msg_id = params.get("int_msg_id") aqs = params.get("aqs") is_public_folder = params.get("is_public_folder", False) user = params.get("user") folder_path = params.get("folder_path") email_range = params.get("email_range", "0-10") ignore_subfolders = params.get("ignore_subfolders") folder_infos = [] if folder_path: # get the id of the folder specified ret_val, folder_info = self._get_folder_info(user, folder_path, action_result, is_public_folder) else: ret_val, folder_info = self._get_root_folder_id(action_result, is_public_folder) if phantom.is_fail(ret_val): return action_result.get_status(), None parent_folder_info = folder_info folder_infos.append(folder_info) if not ignore_subfolders: if int(parent_folder_info['children_count']) != 0: ret_val, child_folder_infos = self._get_child_folder_infos(user, action_result, parent_folder_info=parent_folder_info) if phantom.is_fail(ret_val): return action_result.get_status(), None folder_infos.extend(child_folder_infos) items_matched = 0 msg_items = list() num_folder_ids = len(folder_infos) self.save_progress('Will be searching in {0} folder{1}', num_folder_ids, 's' if num_folder_ids > 1 else '') for i, folder_info in enumerate(folder_infos): folder_id = folder_info['id'] ar_folder = ActionResult() if aqs: data = ews_soap.get_search_request_aqs([folder_id], aqs, email_range) else: data = ews_soap.get_search_request_filter([folder_id], subject=subject, sender=sender, body=body, int_msg_id=int_msg_id, email_range=email_range) ret_val, resp_json = self._make_rest_call(ar_folder, data, self._check_find_response) # Process errors if phantom.is_fail(ret_val): self.debug_print("Rest call failed: {0}".format(ar_folder.get_message())) continue resp_json = resp_json.get('m:RootFolder') if not resp_json: self.debug_print('Result does not contain RootFolder key') continue items = resp_json.get('t:Items') if items is None: self.debug_print("There are no items in the response") continue items = resp_json.get('t:Items', {}).get('t:Message', []) if not isinstance(items, list): items = [items] items_matched += len(items) for curr_item in items: self._cleanse_key_names(curr_item) curr_item['folder'] = folder_info['display_name'] curr_item['folder_path'] = folder_info.get('folder_path') if flag: msg_items.append(curr_item) else: action_result.add_data(curr_item) if flag: return phantom.APP_SUCCESS, msg_items return phantom.APP_SUCCESS, items_matched def _run_query(self, param): """ Action handler for the 'run query' action""" action_result = self.add_action_result(ActionResult(dict(param))) subject = param.get(EWSONPREM_JSON_SUBJECT, "") sender = param.get(EWSONPREM_JSON_FROM, "") body = param.get(EWSONPREM_JSON_BODY, "") int_msg_id = param.get(EWSONPREM_JSON_INT_MSG_ID, "") aqs = param.get(EWSONPREM_JSON_QUERY, "") is_public_folder = param.get(EWS_JSON_IS_PUBLIC_FOLDER, False) try: if aqs: UnicodeDammit(aqs).unicode_markup except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Parameter validation failed for the AQS query. {0}".format(error_text)) return action_result.set_status(phantom.APP_ERROR, "Parameter validation failed for the query. Unicode value found.") if not subject and not sender and not aqs and not body and not int_msg_id: return action_result.set_status(phantom.APP_ERROR, "Please specify at-least one search criteria") # Use parameters to create an aqs string ''' if (not aqs): aqs = self._create_aqs(subject, sender, body) ''' self.debug_print("AQS_STR: {}".format(aqs)) # Connectivity self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host) user = param[EWSONPREM_JSON_EMAIL] folder_path = param.get(EWSONPREM_JSON_FOLDER) self._target_user = user ignore_subfolders = param.get('ignore_subfolders', False) # self.save_progress("Searching in {0}\\{1}{2}".format( # self._clean_str(user), # folder_path if folder_path else 'All Folders', # ' (and the children)' if (not ignore_subfolders) else '')) email_range = param.get(EWSONPREM_JSON_RANGE, "0-10") ret_val = self._validate_range(email_range, action_result) if phantom.is_fail(ret_val): return action_result.get_status() params = { "subject": subject, "sender": sender, "body": body, "int_msg_id": int_msg_id, "aqs": aqs, "is_public_folder": is_public_folder, "user": user, "folder_path": folder_path, "email_range": email_range, "ignore_subfolders": ignore_subfolders } ret_val, items_matched = self._process_query(action_result, params) if phantom.is_fail(ret_val): return action_result.get_status() action_result.update_summary({'emails_matched': items_matched}) # Set the Status return action_result.set_status(phantom.APP_SUCCESS) def _get_container_id(self, email_id): email_id = quote_plus(email_id) url = '{}rest/container?_filter_source_data_identifier="{}"&_filter_asset={}'.format( self.get_phantom_base_url(), email_id, self.get_asset_id()) try: # Ignored the verify semgrep check as the following is a call to the phantom's REST API on the instance itself r = requests.get(url, verify=False) # nosemgrep resp_json = r.json() except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Unable to query Email container", error_text) return None if resp_json.get('count', 0) <= 0: self.debug_print("No container matched") return None try: container_id = resp_json.get('data', [])[0]['id'] except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Container results, not proper", error_text) return None return container_id def _get_email_data_from_container(self, container_id, action_result): email_data = None email_id = None resp_data = {} try: ret_val, resp_data, status_code = self.get_container_info(container_id) except ValueError as e: error_text = self._get_error_message_from_exception(e) return RetVal3(action_result.set_status( phantom.APP_ERROR, 'Validation failed for the container_id. {0}'.format(error_text)), email_data, email_id) if phantom.is_fail(ret_val): return RetVal3(action_result.set_status(phantom.APP_ERROR, str(resp_data)), email_data, email_id) # Keep pylint happy resp_data = dict(resp_data) email_data = resp_data.get('data', {}).get('raw_email') email_id = resp_data['source_data_identifier'] if not email_data: return RetVal3(action_result.set_status( phantom.APP_ERROR, "Container does not seem to be created by the same app, raw_email data not found."), None, None) return RetVal3(phantom.APP_SUCCESS, email_data, email_id) def _get_email_data_from_vault(self, vault_id, action_result): file_path = None try: success, message, file_info = ph_rules.vault_info(vault_id=vault_id) file_info = list(file_info)[0] file_path = file_info.get('path') except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print(error_text) return RetVal2(action_result.set_status(phantom.APP_ERROR, "Could not get file path for vault item"), None) if not file_path: return RetVal2(action_result.set_status(phantom.APP_ERROR, "Could not get file path for vault item"), None) try: mail = outlookmsgfile.load(file_path) except UnicodeDecodeError as e: error_text = self._get_error_message_from_exception(e) return action_result.set_status(phantom.APP_ERROR, "Failed to parse message. {0}".format(error_text)), None except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Failed to parse message. {0}".format(error_text)) return action_result.set_status( phantom.APP_ERROR, "Failed to parse message. Please check if the provided file is valid msg file."), None return phantom.APP_SUCCESS, mail def _decode_subject(self, subject, charset): # Decode subject unicode decoded_subject = '' subject = subject.split('?=\r\n\t=') for sub in subject: if '?UTF-8?B?' in sub: sub = sub.replace('?UTF-8?B?', '').replace('?=', '') sub = base64.b64decode(sub) elif '?UTF-8?Q?' in sub: sub = sub.replace('?UTF-8?Q?', '').replace('?=', '') sub = quopri.decodestring(sub) sub = sub.decode(charset) decoded_subject = "{}{}".format(decoded_subject, sub) return decoded_subject def _decode_uni_string(self, input_str, def_name): # try to find all the decoded strings, we could have multiple decoded strings # or a single decoded string between two normal strings separated by \r\n # YEAH...it could get that messy input_str = input_str.replace('\r\n', '') encoded_strings = re.findall(r'=\?.*\?=', input_str, re.I) # return input_str as is, no need to do any conversion if not encoded_strings: return input_str # get the decoded strings try: decoded_strings = [decode_header(x)[0] for x in encoded_strings] decoded_strings = [{'value': x[0], 'encoding': x[1]} for x in decoded_strings] except Exception as e: error_code, error_message = self._base_connector._get_error_message_from_exception(e) err = "Error Code: {0}. Error Message: {1}".format(error_code, error_message) self._debug_print("Decoding: {0}. {1}".format(encoded_strings, err)) return def_name # convert to dict for safe access, if it's an empty list, the dict will be empty decoded_strings = dict(enumerate(decoded_strings)) for i, encoded_string in enumerate(encoded_strings): decoded_string = decoded_strings.get(i) if not decoded_string: # nothing to replace with continue value = decoded_string.get('value') encoding = decoded_string.get('encoding') if not encoding or not value: # nothing to replace with continue try: # Some non-ascii characters were causing decoding issue with # the UnicodeDammit and working correctly with the decode function. # keeping previous logic in the except block incase of failure. value = value.decode(encoding) input_str = input_str.replace(encoded_string, value) except Exception: try: if encoding != 'utf-8': value = str(value, encoding) except Exception: pass try: if value: value = UnicodeDammit(value).unicode_markup input_str = input_str.replace(encoded_string, value) except Exception: pass return input_str def _get_email_headers_from_mail(self, mail, charset=None, email_headers=None): if mail: email_headers = list(mail.items()) # it's gives message headers # TODO: the next 2 ifs can be condensed to use 'or' if charset is None: charset = mail.get_content_charset() if not charset: charset = 'utf-8' if not email_headers: return {} # Convert the header tuple into a dictionary headers = CaseInsensitiveDict() try: [headers.update({x[0]: self._get_string(x[1], charset)}) for x in email_headers] except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Error occurred while converting the header tuple into a dictionary. {}".format(error_text)) # Decode unicode subject # if '?UTF-8?' in headers['Subject']: # chars = 'utf-8' # headers['Subject'] = self._decode_subject(headers['Subject'], chars) # Handle received separately received_headers = list() try: received_headers = [self._get_string(x[1], charset) for x in email_headers if x[0].lower() == 'received'] except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Error occurred while handling the received header tuple separately. {}".format(error_text)) if received_headers: headers['Received'] = received_headers # handle the subject string, if required add a new key subject = headers.get('Subject') if subject: if isinstance(subject, str): headers['decodedSubject'] = self._decode_uni_string(subject, subject) return headers def _get_mail_header_dict(self, email_data, action_result): try: mail = email.message_from_string(email_data) except Exception: return RetVal2(action_result.set_status( phantom.APP_ERROR, "Unable to create email object from data. Does not seem to be valid email"), None) headers = mail.__dict__.get('_headers') if not headers: return RetVal2(action_result.set_status( phantom.APP_ERROR, "Could not extract header info from email object data. Does not seem to be valid email"), None) ret_val = {} for header in headers: ret_val[header[0]] = header[1] return RetVal2(phantom.APP_SUCCESS, ret_val) def _handle_email_with_container_id(self, action_result, container_id): ret_val, email_data, email_id = self._get_email_data_from_container(container_id, action_result) if phantom.is_fail(ret_val): return action_result.get_status(), None action_result.update_summary({"email_id": email_id}) ret_val, header_dict = self._get_mail_header_dict(email_data, action_result) if phantom.is_fail(ret_val): return action_result.get_status(), None action_result.add_data(header_dict) return phantom.APP_SUCCESS, email_id def _handle_email_with_vault_id(self, action_result, vault_id, ingest_email, target_container_id=None, charset=None, user=None): ret_val, mail = self._get_email_data_from_vault(vault_id, action_result) if phantom.is_fail(ret_val): return action_result.get_status(), None try: if mail: headers = self._get_email_headers_from_mail(mail, charset) except Exception as e: error_text = self._get_error_message_from_exception(e) return action_result.set_status(phantom.APP_ERROR, "Unable to get email header string from message. {0}".format(error_text)), None if not headers: return action_result.set_status(phantom.APP_ERROR, "Unable to fetch the headers information from the provided MSG file"), None action_result.add_data(dict(headers)) if not ingest_email: return phantom.APP_SUCCESS, None int_msg_id = headers.get("Message-ID") if not int_msg_id: return action_result.set_status(phantom.APP_ERROR, "Unable to fetch the message_id information from the provided MSG file"), None params = { "int_msg_id": str(int_msg_id) } ret_val, item_matched = self._process_query(action_result, params, flag=True) if phantom.is_fail(ret_val): return action_result.get_status(), None if not item_matched: err_msg = "Unable to ingest the message from the provided MSG file, " \ "the MSG file should be associated with the logged in SMTP user to ingest message from vault item." return action_result.set_status(phantom.APP_ERROR, err_msg), None item = item_matched[0] message_id = item.get("t_ItemId", {}).get("@Id") return phantom.APP_SUCCESS, message_id def _handle_email_with_message_id(self, action_result, email_id): try: data = ews_soap.xml_get_emails_data([email_id]) except Exception as e: error_text = self._get_error_message_from_exception(e) return action_result.set_status(phantom.APP_ERROR, "Parameter validation failed for the ID. {0}".format(error_text)), None action_result.update_summary({"email_id": email_id}) ret_val, resp_json = self._make_rest_call(action_result, data, self._check_getitem_response) # Process errors if phantom.is_fail(ret_val): message = "Error while getting email data for id {0}. Error: {1}".format(email_id, action_result.get_message()) self.debug_print(message) self.send_progress(message) return action_result.set_status(phantom.APP_ERROR, message), None self._cleanse_key_names(resp_json) """ ret_val, rfc822_format = self._get_rfc822_format(resp_json, action_result) if (phantom.is_fail(ret_val)): return phantom.APP_ERROR if (not rfc822_format): return action_result.set_status(phantom.APP_ERROR, 'Result does not contain rfc822 data') """ message = resp_json.get('m_Items', {}).get('t_Message', {}) # Remove mime content because it can be very large if 't_MimeContent' in message: message.pop('t_MimeContent') action_result.add_data(message) recipients_mailbox = message.get('t_ToRecipients', {}).get('t_Mailbox') if recipients_mailbox and (not isinstance(recipients_mailbox, list)): message['t_ToRecipients']['t_Mailbox'] = [recipients_mailbox] summary = { 'subject': message.get('t_Subject'), 'create_time': message.get('t_DateTimeCreated'), 'sent_time': message.get('t_DateTimeSent') } action_result.update_summary(summary) return phantom.APP_SUCCESS, email_id def _get_email(self, param): action_result = self.add_action_result(ActionResult(dict(param))) self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host) message_id = param.get(EWSONPREM_JSON_ID) container_id = param.get(EWS_JSON_CONTAINER_ID) vault_id = param.get(EWS_JSON_VAULT_ID) self._target_user = param.get(EWSONPREM_JSON_EMAIL) use_current_container = param.get('use_current_container') target_container_id = None flag = False email_id = None if container_id is not None: ret_val, container_id = self._validate_integer(action_result, container_id, "container_id") if phantom.is_fail(ret_val): return action_result.get_status() if use_current_container: target_container_id = self.get_container_id() ingest_email = param.get(EWSONPREM_JSON_INGEST_EMAIL, False) if not message_id and not container_id and not vault_id: return action_result.set_status(phantom.APP_ERROR, "Please specify id, container_id or vault_id to get the email") if container_id or vault_id: if container_id: ret_val, email_id = self._handle_email_with_container_id(action_result, container_id) if phantom.is_fail(ret_val): return action_result.set_status(phantom.APP_ERROR, action_result.get_message()) if not ingest_email: return action_result.set_status(phantom.APP_SUCCESS, "Successfully retrieved an email for container ID") elif vault_id: ret_val, email_id = self._handle_email_with_vault_id(action_result, vault_id, ingest_email, target_container_id) if phantom.is_fail(ret_val): return action_result.set_status(phantom.APP_ERROR, action_result.get_message()) if not ingest_email: return action_result.set_status(phantom.APP_SUCCESS, "Successfully retrieved an email for vault item") elif message_id: ret_val, email_id = self._handle_email_with_message_id(action_result, message_id) if phantom.is_fail(ret_val): return action_result.set_status(phantom.APP_ERROR, action_result.get_message()) if not ingest_email: return action_result.set_status(phantom.APP_SUCCESS, "Successfully retrieved an email for message ID") else: return action_result.set_status(phantom.APP_ERROR, "Please specify id, container_id or vault_id to get the email") # if the container_id or vault_id is given to fetch email and ingest_email is True, then, # while ingesting email to create artifacts of attachments, domains, hashes, ips and urls, flag has been set to True. # if message_id is given then artifacts has been created on the basis of asset configuration parameter while ingesting. if container_id or vault_id: flag = True if not email_id: return action_result.set_status(phantom.APP_ERROR, "Unable to get message ID from the given parameters") try: self._process_email_id(email_id, target_container_id, flag=flag) except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Error occurred in _process_email_id with Message ID: {0}. {1}".format(email_id, error_text)) action_result.update_summary({"container_id": None}) return action_result.set_status(phantom.APP_ERROR, "Error processing email. {0}".format(error_text)) if target_container_id is None: # get the container id that of the email that was ingested container_id = self._get_container_id(email_id) action_result.update_summary({"container_id": container_id}) else: action_result.update_summary({"container_id": target_container_id}) return action_result.set_status(phantom.APP_SUCCESS) def _valid_xml_char_ordinal(self, c): codepoint = ord(c) # conditions ordered by presumed frequency return 0x20 <= codepoint <= 0xD7FF or codepoint in (0x9, 0xA, 0xD) or 0xE000 <= codepoint <= 0xFFFD or 0x10000 <= codepoint <= 0x10FFFF def _update_email(self, param): action_result = self.add_action_result(ActionResult(dict(param))) # Connectivity self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host) email_id = param[EWSONPREM_JSON_ID] self._target_user = param.get(EWSONPREM_JSON_EMAIL) category = param.get('category') subject = param.get('subject') if subject is None and category is None: return action_result.set_status(phantom.APP_ERROR, "Please specify one of the email properties to update") # do a get on the message to get the change id try: data = ews_soap.xml_get_emails_data([email_id]) except Exception as e: error_text = self._get_error_message_from_exception(e) return action_result.set_status(phantom.APP_ERROR, "Parameter validation failed for the ID. {0}".format(error_text)) ret_val, resp_json = self._make_rest_call(action_result, data, self._check_getitem_response) # Process errors if phantom.is_fail(ret_val): message = "Error while getting email data for id {0}. Error: {1}".format(email_id, action_result.get_message()) self.debug_print(message) self.send_progress(message) return phantom.APP_ERROR try: change_key = resp_json['m:Items']['t:Message']['t:ItemId']['@ChangeKey'] except Exception: return action_result.set_status(phantom.APP_ERROR, "Unable to get the change key of the email to update") if category is not None: category = [x.strip() for x in category.split(',')] try: data = ews_soap.get_update_email(email_id, change_key, category, subject) except ValueError as e: error_text = self._get_error_message_from_exception(e) return action_result.set_status(phantom.APP_ERROR, "Validation failed for the given input parameter. {0}".format(error_text)) ret_val, resp_json = self._make_rest_call(action_result, data, self._check_update_response) # Process errors if phantom.is_fail(ret_val): return action_result.get_status() if not resp_json: return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key') try: data = ews_soap.xml_get_emails_data([email_id]) except Exception as e: error_text = self._get_error_message_from_exception(e) return action_result.set_status(phantom.APP_ERROR, "Parameter validation failed for the ID. Error: {}".format(error_text)) ret_val, resp_json = self._make_rest_call(action_result, data, self._check_getitem_response) # Process errors if phantom.is_fail(ret_val): return action_result.get_status() self._cleanse_key_names(resp_json) message = resp_json.get('m_Items', {}).get('t_Message', {}) categories = message.get('t_Categories', {}).get('t_String') if categories: if not isinstance(categories, list): categories = [categories] message['t_Categories'] = categories action_result.add_data(message) recipients_mailbox = message.get('t_ToRecipients', {}).get('t_Mailbox') if recipients_mailbox and (not isinstance(recipients_mailbox, list)): message['t_ToRecipients']['t_Mailbox'] = [recipients_mailbox] summary = { 'subject': message.get('t_Subject'), 'create_time': message.get('t_DateTimeCreated'), 'sent_time': message.get('t_DateTimeSent') } action_result.update_summary(summary) # Set the Status return action_result.set_status(phantom.APP_SUCCESS) def _delete_email(self, param): action_result = ActionResult(dict(param)) # Connectivity self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host) message_id = param[EWSONPREM_JSON_ID] self._target_user = param.get(EWSONPREM_JSON_EMAIL) message_ids = ph_utils.get_list_from_string(message_id) try: data = ews_soap.get_delete_email(message_ids) except Exception as e: self.add_action_result(action_result) error_text = self._get_error_message_from_exception(e) return action_result.set_status(phantom.APP_ERROR, 'Parameter validation failed for the ID. {0}'.format(error_text)) ret_val, resp_json = self._make_rest_call(action_result, data, self._check_delete_response) # Process errors if phantom.is_fail(ret_val): self.add_action_result(action_result) return action_result.get_status() if not resp_json: self.add_action_result(action_result) return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key') if not isinstance(resp_json, list): resp_json = [resp_json] for msg_id, resp_message in zip(message_ids, resp_json): curr_param = dict(param) curr_param.update({"id": msg_id}) curr_ar = self.add_action_result(ActionResult(curr_param)) resp_class = resp_message.get('@ResponseClass', '') if resp_class == 'Error': curr_ar.set_status(phantom.APP_ERROR, EWSONPREM_FROM_SERVER_ERROR.format(**(self._get_error_details(resp_message)))) continue curr_ar.set_status(phantom.APP_SUCCESS, "Email deleted successfully") # Set the Status return phantom.APP_SUCCESS def _clean_str(self, string): if not string: return '' return string.replace('{', '-').replace('}', '-') def _extract_folder_path(self, extended_property): if not extended_property: return '' # As of right now, the folder path is the only extended property # that the app extracts, so parse the value directly, once the app starts # parsing other extended properties, the 't:ExtendedFieldURI dictionary will # require to be parsed and validated value = extended_property.get('t:Value') if not value: return '' value = value.lstrip('\\') # I don't know why exchange gives back the path with # '\\' separators since '\' is a valid char allowed in a folder name # makes things confusing and extra parsing code to be written. # Therefore, the app treats folder paths with '/' as the separator, keeps # things less confusing for users. # value = value.replace('\\', '/') if not value: return '' try: str(value) except UnicodeEncodeError: return UnicodeDammit(value).unicode_markup return value def _get_root_folder_id(self, action_result, is_public_folder=False): if is_public_folder: root_folder_id = 'publicfoldersroot' else: root_folder_id = 'root' folder_info = {'id': root_folder_id, 'display_name': root_folder_id, 'children_count': -1, 'folder_path': ''} return phantom.APP_SUCCESS, folder_info def _get_matching_folder_path(self, folder_list, folder_name, folder_path, action_result): """ The input folder is a list, meaning the folder name matched multiple folder Given the folder path, this function will return the one that matches, or fail """ if not folder_list: return action_result(phantom.APP_ERROR, "Unable to find info about folder '{0}'. Returned info list empty".format(folder_name)), None for curr_folder in folder_list: curr_folder_path = self._extract_folder_path(curr_folder.get('t:ExtendedProperty')) if UnicodeDammit(curr_folder_path).unicode_markup == UnicodeDammit(folder_path).unicode_markup: return phantom.APP_SUCCESS, curr_folder return action_result.set_status( phantom.APP_ERROR, "Folder paths did not match while searching for folder: '{0}'".format(folder_path)), None def _get_folder_info(self, user, folder_path, action_result, is_public_folder=False): # hindsight is always 20-20, set the folder path separator to be '/', thinking folder names allow '\' as a char. # turns out even '/' is supported by office365, so let the action escape the '/' char if it's part of the folder name folder_path = folder_path.replace('\\/', self.REPLACE_CONST) folder_names = folder_path.split('/') folder_names = list(filter(None, folder_names)) if not folder_names: return action_result.set_status(phantom.APP_ERROR, "Please provide a valid value for folder path"), None for i, folder_name in enumerate(folder_names): folder_names[i] = folder_name.replace(self.REPLACE_CONST, '/') if is_public_folder: parent_folder_id = 'publicfoldersroot' else: parent_folder_id = 'root' for i, folder_name in enumerate(folder_names): curr_valid_folder_path = '\\'.join(folder_names[:i + 1]) self.save_progress('Getting info about {0}\\{1}'.format(self._clean_str(user), curr_valid_folder_path)) input_xml = ews_soap.xml_get_children_info(user, child_folder_name=folder_name, parent_folder_id=parent_folder_id) ret_val, resp_json = self._make_rest_call(action_result, input_xml, self._check_findfolder_response) if phantom.is_fail(ret_val): return ret_val, None total_items = resp_json.get('m:RootFolder', {}).get('@TotalItemsInView', '0') if total_items == '0': return action_result.set_status( phantom.APP_ERROR, "Folder '{0}' not found, possibly not present".format(curr_valid_folder_path)), None folder = resp_json.get('m:RootFolder', {}).get('t:Folders', {}).get('t:Folder') if not folder: return action_result.set_status( phantom.APP_ERROR, "Information about '{0}' not found in response, possibly not present".format(curr_valid_folder_path) ), None if not isinstance(folder, list): folder = [folder] ret_val, folder = self._get_matching_folder_path(folder, folder_name, curr_valid_folder_path, action_result) if phantom.is_fail(ret_val): return ret_val, None if not folder: return action_result.set_status( phantom.APP_ERROR, "Information for folder '{0}' not found in response, possibly not present".format(curr_valid_folder_path) ), None folder_id = folder.get('t:FolderId', {}).get('@Id') if not folder_id: return action_result.set_status( phantom.APP_ERROR, "Folder ID information not found in response for '{0}', possibly not present".format(curr_valid_folder_path) ), None parent_folder_id = folder_id folder_info = { 'id': folder_id, 'display_name': folder.get('t:DisplayName'), 'children_count': folder.get('t:ChildFolderCount'), 'folder_path': self._extract_folder_path(folder.get('t:ExtendedProperty')) } return phantom.APP_SUCCESS, folder_info def _mark_as_junk(self, param, action): action_result = self.add_action_result(ActionResult(dict(param))) # Connectivity self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host) message_id = param[EWSONPREM_JSON_ID] move_email = param.get('move_to_junk_folder', param.get('move_from_junk_folder', False)) is_junk = True if action == 'block' else False if EWSONPREM_JSON_EMAIL in param: self._target_user = param[EWSONPREM_JSON_EMAIL] message = "Sender blocked" if action == "block" else "Sender unblocked" try: data = ews_soap.xml_get_mark_as_junk(message_id, is_junk=is_junk, move_item=move_email) except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Parameter validation failed for the ID. Error: {}".format( self._get_error_message_from_exception(e))) ret_val, resp_json = self._make_rest_call(action_result, data, self._check_markasjunk_response) # Process errors if phantom.is_fail(ret_val): return action_result.get_status() if move_email: try: new_email_id = resp_json['m:MovedItemId']['@Id'] except Exception: return action_result.set_status(phantom.APP_SUCCESS, "Unable to get moved Email ID") action_result.add_data({'new_email_id': new_email_id}) action_result.update_summary({'new_email_id': new_email_id}) if new_email_id != message_id: # Looks like the email was actually moved message = "{}{}".format(message, ". Message moved to Junk Folder" if action == "block" else ". Message moved out of Junk Folder") # Set the Status return action_result.set_status(phantom.APP_SUCCESS, message) def _copy_move_email(self, param, action="copy"): action_result = self.add_action_result(ActionResult(dict(param))) # Connectivity self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host) message_id = param[EWSONPREM_JSON_ID] folder_path = param[EWSONPREM_JSON_FOLDER] user = param[EWSONPREM_JSON_EMAIL] is_public_folder = param.get(EWS_JSON_IS_PUBLIC_FOLDER, False) # Set the user to impersonate (i.e. target_user), by default it is the destination user self._target_user = user # finally, see if impersonation has been enabled/disabled for this action # as of right now copy or move email is the only action that allows over-ride impersonate = not(param.get(EWS_JSON_DONT_IMPERSONATE, False)) # Use a different email if specified impersonate_email = param.get(EWS_JSON_IMPERSONATE_EMAIL) if impersonate_email: self._target_user = impersonate_email self._impersonate = impersonate ret_val, folder_info = self._get_folder_info(user, folder_path, action_result, is_public_folder) if phantom.is_fail(ret_val): return action_result.get_status() try: data = ews_soap.get_copy_email(message_id, folder_info['id']) except Exception as e: error_text = self._get_error_message_from_exception(e) return action_result.set_status(phantom.APP_ERROR, 'Parameter validation failed for the ID. {0}'.format(error_text)) response_checker = self._check_copy_response if action == "move": try: data = ews_soap.get_move_email(message_id, folder_info['id']) except Exception as e: error_text = self._get_error_message_from_exception(e) return action_result.set_status(phantom.APP_ERROR, 'Parameter validation failed for the ID. {0}'.format(error_text)) response_checker = self._check_move_response ret_val, resp_json = self._make_rest_call(action_result, data, response_checker) # Process errors if phantom.is_fail(ret_val): return action_result.get_status() if not resp_json: return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key') new_email_id = None action_verb = 'copied' if action == "copy" else 'moved' try: new_email_id = resp_json['m:Items']['t:Message']['t:ItemId']['@Id'] except Exception: return action_result.set_status( phantom.APP_SUCCESS, "Email {0} successfully, but its message ID could not be retrieved".format(action_verb)) action_result.add_data({'new_email_id': new_email_id}) # Set the Status return action_result.set_status(phantom.APP_SUCCESS, "Email {0} successfully".format(action_verb)) def _resolve_name(self, param): action_result = self.add_action_result(ActionResult(dict(param))) # Connectivity self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host) email = param[EWSONPREM_JSON_EMAIL] self._target_user = param.get(EWS_JSON_IMPERSONATE_EMAIL) data = ews_soap.xml_get_resolve_names(email) ret_val, resp_json = self._make_rest_call(action_result, data, self._check_resolve_names_response) # Process errors if phantom.is_fail(ret_val): message = action_result.get_message() if 'ErrorNameResolutionNoResults' in message: message = 'No email found. The input parameter might not be a valid alias or email.' return action_result.set_status(phantom.APP_ERROR, message) if not resp_json: return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key') resolution_set = resp_json.get('m:ResolutionSet', {}).get('t:Resolution') if not resolution_set: return action_result.set_summary({'total_entries': 0}) if not isinstance(resolution_set, list): resolution_set = [resolution_set] action_result.update_summary({'total_entries': len(resolution_set)}) for curr_resolution in resolution_set: self._cleanse_key_names(curr_resolution) contact = curr_resolution.get('t_Contact') if contact: email_addresses = contact.get('t_EmailAddresses', {}).get('t_Entry', []) if email_addresses: if not isinstance(email_addresses, list): email_addresses = [email_addresses] contact['t_EmailAddresses'] = email_addresses action_result.add_data(curr_resolution) # Set the Status return action_result.set_status(phantom.APP_SUCCESS) def _expand_dl(self, param): action_result = self.add_action_result(ActionResult(dict(param))) # Connectivity self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host) group = param[EWSONPREM_JSON_GROUP] self._target_user = param.get(EWS_JSON_IMPERSONATE_EMAIL) data = ews_soap.get_expand_dl(group) ret_val, resp_json = self._make_rest_call(action_result, data, self._check_expand_dl_response) # Process errors if phantom.is_fail(ret_val): message = action_result.get_message() if 'ErrorNameResolutionNoResults' in message: message = '{} The input parameter might not be a distribution list.'.format(message) action_result.add_data({"t_EmailAddress": group}) return action_result.set_status(phantom.APP_ERROR, message) if not resp_json: return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key') mailboxes = resp_json.get('m:DLExpansion', {}).get('t:Mailbox') if not mailboxes: action_result.set_summary({'total_entries': 0}) return action_result.set_status(phantom.APP_SUCCESS) if not isinstance(mailboxes, list): mailboxes = [mailboxes] action_result.update_summary({'total_entries': len(mailboxes)}) for mailbox in mailboxes: if param.get('recursive', False) and "DL" in mailbox['t:MailboxType']: param[EWSONPREM_JSON_GROUP] = mailbox['t:EmailAddress'] self._expand_dl(param) self._cleanse_key_names(mailbox) action_result.add_data(mailbox) # Set the Status return action_result.set_status(phantom.APP_SUCCESS) def _get_email_epoch(self, resp_json): return None def _get_rfc822_format(self, resp_json, action_result): try: mime_content = resp_json['m:Items']['t:Message']['t:MimeContent']['#text'] except Exception: return action_result.set_status(phantom.APP_ERROR, "Email MimeContent missing in response.") try: rfc822_email = base64.b64decode(mime_content) except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Unable to decode Email Mime Content. {0}".format(error_text)) return action_result.set_status(phantom.APP_ERROR, "Unable to decode Email Mime Content") return phantom.APP_SUCCESS, rfc822_email def _get_attachment_meta_info(self, attachment, curr_key, parent_internet_message_id, parent_guid): attach_meta_info = dict() try: attach_meta_info['attachmentId'] = attachment['t:AttachmentId']['@Id'] except Exception: pass try: attach_meta_info['attachmentType'] = curr_key[2:].replace('Attachment', '').lower() except Exception: pass attach_meta_info['parentInternetMessageId'] = parent_internet_message_id attach_meta_info['parentGuid'] = parent_guid # attachmentID, attachmentType for k, v in six.iteritems(attachment): if not isinstance(v, str): continue # convert the key to the convention used by cef cef_key_name = k[2:] cef_key_name = "{}{}".format(cef_key_name[0].lower(), cef_key_name[1:]) attach_meta_info[cef_key_name] = v return attach_meta_info def _extract_ext_properties_from_attachments(self, resp_json): email_headers_ret = list() attach_meta_info_ret = list() if 'm:Items' not in resp_json: k = list(resp_json.keys())[0] resp_json['m:Items'] = resp_json.pop(k) # Get the attachments try: attachments = resp_json['m:Items']['t:Message']['t:Attachments'] except Exception: return RetVal3(phantom.APP_SUCCESS) attachment_ids = list() internet_message_id = None try: internet_message_id = resp_json['m:Items']['t:Message']['t:InternetMessageId'] except Exception: internet_message_id = None email_guid = resp_json['emailGuid'] for curr_key in list(attachments.keys()): attachment_data = attachments[curr_key] if not isinstance(attachment_data, list): attachment_data = [attachment_data] for curr_attachment in attachment_data: attachment_ids.append(curr_attachment['t:AttachmentId']['@Id']) # Add the info that we have right now curr_attach_meta_info = self._get_attachment_meta_info(curr_attachment, curr_key, internet_message_id, email_guid) if curr_attach_meta_info: attach_meta_info_ret.append(curr_attach_meta_info) if not attachment_ids: return RetVal3(phantom.APP_SUCCESS) data = ews_soap.xml_get_attachments_data(attachment_ids) action_result = ActionResult() ret_val, resp_json = self._make_rest_call(action_result, data, self._check_get_attachment_response) # Process errors if phantom.is_fail(ret_val): return RetVal3(action_result.get_status()) if not isinstance(resp_json, list): resp_json = [resp_json] for curr_attachment_data in resp_json: try: curr_attachment_data = curr_attachment_data['m:Attachments'] except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Could not parse the attachments response", error_text) continue if curr_attachment_data is None: self.debug_print("Could not parse the attachments response") continue curr_attachment_data['emailGuid'] = str(uuid.uuid4()) ret_val, data = self._extract_ext_properties(curr_attachment_data, internet_message_id, email_guid) if data: email_headers_ret.append(data) ret_val, email_headers_info, attach_meta_info = self._extract_ext_properties_from_attachments(curr_attachment_data) if email_headers_info: email_headers_ret.extend(email_headers_info) if attach_meta_info: attach_meta_info_ret.extend(attach_meta_info) else: # This is a file attachment, we most probably already have the info from the resp_json # But update it with the call to the xml_get_attachments_data(..) There might be more info # that has to be updated curr_attach_meta_info = self._get_attachment_meta_info( curr_attachment_data['m:Items'], 't:FileAttachment', internet_message_id, email_guid) if curr_attach_meta_info: # find the attachment in the list and update it matched_meta_info = list( [x for x in attach_meta_info_ret if x.get('attachmentId', 'foo1') == curr_attach_meta_info.get('attachmentId', 'foo2')] ) if matched_meta_info: matched_meta_info[0].update(curr_attach_meta_info) return phantom.APP_SUCCESS, email_headers_ret, attach_meta_info_ret def _extract_email_headers(self, email_headers): header_parser = HeaderParser() try: email_part = header_parser.parsestr(email_headers) except UnicodeEncodeError: email_part = header_parser.parsestr(UnicodeDammit(email_headers).unicode_markup) email_headers = list(email_part.items()) headers = {} charset = 'utf-8' try: [headers.update({x[0]: self._get_string(x[1], charset)}) for x in email_headers] except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Error occurred while converting the header tuple into a dictionary. {}".format(error_text)) # Handle received separately try: received_headers = list() received_headers = [self._get_string(x[1], charset) for x in email_headers if x[0].lower() == 'received'] except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Error occurred while handling the received header tuple separately. {}".format(error_text)) if received_headers: headers['Received'] = received_headers return headers def _extract_ext_properties(self, resp_json, parent_internet_message_id=None, parent_guid=None): if 'm:Items' not in resp_json: k = list(resp_json.keys())[0] resp_json['m:Items'] = resp_json.pop(k) headers = dict() extended_properties = list() # Get the Extended Properties try: extended_properties = resp_json['m:Items']['t:Message']['t:ExtendedProperty'] except Exception: pass if extended_properties: if not isinstance(extended_properties, list): extended_properties = [extended_properties] for curr_ext_property in extended_properties: property_tag = curr_ext_property.get('t:ExtendedFieldURI', {}).get('@PropertyTag') value = curr_ext_property.get('t:Value') if not property_tag: continue if property_tag.lower() == ews_soap.EXTENDED_PROPERTY_HEADERS.lower() or \ property_tag.lower() == ews_soap.EXTENDED_PROPERTY_HEADERS_RESPONSE.lower(): email_headers = self._extract_email_headers(value) if email_headers is not None: headers.update(email_headers) continue if property_tag == ews_soap.EXTENDED_PROPERTY_BODY_TEXT: headers.update({'bodyText': value}) # now parse the body in the main resp_json try: body_text = resp_json['m:Items']['t:Message']['t:Body']['#text'] except Exception: body_text = None try: body_type = resp_json['m:Items']['t:Message']['t:Body']['@BodyType'] except Exception: body_type = None if body_text is not None: if body_type is not None: body_key = "body{0}".format(body_type.title().replace(' ', '')) headers.update({body_key: body_text}) # In some cases the message id is not part of the headers, in this case # copy the message id from the envelope to the header headers_ci = CaseInsensitiveDict(headers) message_id = headers_ci.get('message-id') if message_id is None: try: message_id = resp_json['m:Items']['t:Message']['t:InternetMessageId'] headers['Message-ID'] = message_id except Exception: pass if parent_internet_message_id is not None: headers['parentInternetMessageId'] = parent_internet_message_id if parent_guid is not None: headers['parentGuid'] = parent_guid headers['emailGuid'] = resp_json['emailGuid'] return phantom.APP_SUCCESS, headers def _parse_email(self, resp_json, email_id, target_container_id, flag=False): try: mime_content = resp_json['m:Items']['t:Message']['t:MimeContent']['#text'] except Exception: return phantom.APP_ERROR, "Email MimeContent missing in response." try: rfc822_email = base64.b64decode(mime_content) rfc822_email = UnicodeDammit(rfc822_email).unicode_markup except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Unable to decode Email Mime Content. {0}".format(error_text)) return phantom.APP_ERROR, "Unable to decode Email Mime Content" epoch = self._get_email_epoch(resp_json) email_header_list = list() attach_meta_info_list = list() resp_json['emailGuid'] = str(uuid.uuid4()) ret_val, data = self._extract_ext_properties(resp_json) if data: email_header_list.append(data) ret_val, attach_email_headers, attach_meta_info = self._extract_ext_properties_from_attachments(resp_json) if attach_email_headers: email_header_list.extend(attach_email_headers) if attach_meta_info: attach_meta_info_list.extend(attach_meta_info) config = self.get_config() if flag: config.update({ "extract_attachments": True, "extract_domains": True, "extract_hashes": True, "extract_ips": True, "extract_urls": True, "extract_eml": True }) process_email = ProcessEmail() return process_email.process_email(self, rfc822_email, email_id, config, epoch, target_container_id, email_headers=email_header_list, attachments_data=attach_meta_info_list) def _process_email_id(self, email_id, target_container_id=None, flag=False): action_result = ActionResult() try: data = ews_soap.xml_get_emails_data([email_id]) except Exception as e: error_text = self._get_error_message_from_exception(e) return action_result.set_status(phantom.APP_ERROR, "Parameter validation failed for the ID. {0}".format(error_text)) ret_val, resp_json = self._make_rest_call(action_result, data, self._check_getitem_response) # Process errors if phantom.is_fail(ret_val): message = "Error while getting email data for id {0}. Error: {1}".format(email_id, action_result.get_message()) self.debug_print(message) self.send_progress(message) self._skipped_emails += 1 return phantom.APP_ERROR ret_val, message = self._parse_email(resp_json, email_id, target_container_id, flag) if phantom.is_fail(ret_val): return phantom.APP_ERROR return phantom.APP_SUCCESS def _get_email_infos_to_process(self, offset, max_emails, action_result, restriction=None): config = self.get_config() # get the user poll_user = config.get(EWS_JSON_POLL_USER, config[phantom.APP_JSON_USERNAME]) if not poll_user: return action_result.set_status(phantom.APP_ERROR, "Polling User Email not specified, cannot continue"), None self._target_user = poll_user folder_path = config.get(EWS_JSON_POLL_FOLDER, 'Inbox') is_public_folder = config.get(EWS_JSON_IS_PUBLIC_FOLDER, False) ret_val, folder_info = self._get_folder_info(poll_user, folder_path, action_result, is_public_folder) if phantom.is_fail(ret_val): return ret_val, None manner = config[EWS_JSON_INGEST_MANNER] folder_id = folder_info['id'] order = "Ascending" if manner == EWS_INGEST_LATEST_EMAILS: order = "Descending" data = ews_soap.xml_get_email_ids( poll_user, order=order, offset=offset, max_emails=max_emails, folder_id=folder_id, restriction=restriction) ret_val, resp_json = self._make_rest_call(action_result, data, self._check_find_response) # Process errors if phantom.is_fail(ret_val): # Dump error messages in the log self.debug_print(action_result.get_message()) # return error return ret_val, None resp_json = resp_json.get('m:RootFolder') if not resp_json: return action_result.set_status(phantom.APP_ERROR, 'Result does not contain required RootFolder key'), None items = resp_json.get('t:Items') if items is None: self.debug_print("Items is None") return action_result.set_status(phantom.APP_SUCCESS, 'Result does not contain items key. Possibly no emails in folder'), None items = resp_json.get('t:Items', {}).get('t:Message', []) if not isinstance(items, list): items = [items] email_infos = [{'id': x['t:ItemId']['@Id'], 'last_modified_time': x['t:LastModifiedTime']} for x in items] return phantom.APP_SUCCESS, email_infos def _pprint_email_id(self, email_id): return "{0}.........{1}".format(email_id[:20], email_id[-20:]) def _process_email_ids(self, email_ids, action_result): if email_ids is None: return action_result.set_status(phantom.APP_ERROR, "Did not get access to email IDs") self.save_progress("Got {0} email{1}".format(len(email_ids), '' if len(email_ids) == 1 else 's')) failed_emails_parsing_list = [] for i, email_id in enumerate(email_ids): self.send_progress("Querying email # {0} with id: {1}".format(i + 1, self._pprint_email_id(email_id))) try: ret_val = self._process_email_id(email_id) if phantom.is_fail(ret_val): failed_emails_parsing_list.append(email_id) except Exception as e: error_text = self._get_error_message_from_exception(e) self.debug_print("Error occurred in _process_email_id # {0} with Message ID: {1}. {2}".format(i, email_id, error_text)) failed_emails_parsing_list.append(email_id) if len(failed_emails_parsing_list) == len(email_ids): return action_result.set_status( phantom.APP_ERROR, "ErrorExp in _process_email_id for all the email IDs: {}".format(str(failed_emails_parsing_list))) if self._skipped_emails > 0: self.save_progress("Skipped emails: {}. (For more details, check the logs)".format(self._skipped_emails)) return action_result.set_status(phantom.APP_SUCCESS) def _get_fips_enabled(self): try: from phantom_common.install_info import is_fips_enabled except ImportError: return False fips_enabled = is_fips_enabled() if fips_enabled: self.debug_print('FIPS is enabled') else: self.debug_print('FIPS is not enabled') return fips_enabled def _poll_now(self, param): action_result = self.add_action_result(ActionResult(dict(param))) # Get the maximum number of emails that we can pull config = self.get_config() # Get the maximum number of emails that we can pull, same as container count try: max_emails = int(param[phantom.APP_JSON_CONTAINER_COUNT]) if max_emails == 0 or (max_emails and (not str(max_emails).isdigit() or max_emails <= 0)): return action_result.set_status( phantom.APP_ERROR, "Please provide a valid non-zero positive integer value in 'container_count' parameter") except Exception: return self.set_status(phantom.APP_ERROR, "Invalid container count") self.save_progress("Will be ingesting all possible artifacts (ignoring max artifacts value) for POLL NOW") email_id = param.get(phantom.APP_JSON_CONTAINER_ID) email_ids = [email_id] # get the user poll_user = UnicodeDammit(config.get(EWS_JSON_POLL_USER, config[phantom.APP_JSON_USERNAME])).unicode_markup if not poll_user: return action_result.set_status(phantom.APP_ERROR, "Polling User Email not specified, cannot continue"), None self._target_user = poll_user if not email_id: self.save_progress("POLL NOW Getting {0} '{1}' email ids".format(max_emails, config[EWS_JSON_INGEST_MANNER])) ret_val, email_infos = self._get_email_infos_to_process(0, max_emails, action_result) if phantom.is_fail(ret_val) or email_infos is None: return action_result.get_status() if not email_infos: return action_result.set_status(phantom.APP_SUCCESS, "No emails found for the ingestion process") email_ids = [x['id'] for x in email_infos] else: self.save_progress("POLL NOW Getting the single email id") ret_val = self._process_email_ids(email_ids, action_result) if phantom.is_fail(ret_val): return action_result.get_status() return action_result.set_status(phantom.APP_SUCCESS) def _get_restriction(self): config = self.get_config() emails_after_key = 'last_ingested_format' if config[EWS_JSON_INGEST_MANNER] == EWS_INGEST_LATEST_EMAILS else 'last_email_format' date_time_string = self._state.get(emails_after_key) if not date_time_string: return None return ews_soap.xml_get_restriction(date_time_string) def _get_next_start_time(self, last_time): # get the time string passed into a datetime object last_time = datetime.strptime(last_time, DATETIME_FORMAT) # add a second to it last_time = last_time + timedelta(seconds=1) # format it return last_time.strftime(DATETIME_FORMAT) def _manage_data_duplication(self, email_infos, email_index, max_emails, total_ingested, limit): # Define current time to store as starting reference for the next run of scheduled | interval polling utc_now = datetime.utcnow() self._state['last_ingested_format'] = utc_now.strftime('%Y-%m-%dT%H:%M:%SZ') self._state['last_email_format'] = email_infos[email_index]['last_modified_time'] self.save_state(self._encrypt_client_token(self._state.copy())) if max_emails: if email_index == 0 or self._less_data: return None, None total_ingested += max_emails - (self._dup_data + self._skipped_emails) self._remaining = limit - total_ingested if total_ingested >= limit: return None, None next_cycle_repeat_data = 0 last_modified_time = email_infos[email_index]['last_modified_time'] for x in reversed(email_infos): if x["last_modified_time"] == last_modified_time: next_cycle_repeat_data += 1 else: break max_emails = next_cycle_repeat_data + self._remaining return max_emails, total_ingested else: return None, None def _on_poll(self, param): # on poll action that is supposed to be scheduled if self.is_poll_now(): self.debug_print("DEBUGGER: Starting polling now") return self._poll_now(param) config = self.get_config() action_result = self.add_action_result(ActionResult(dict(param))) # Fetch first_run_max_emails for asset configuration first_run_max_emails = config[EWS_JSON_FIRST_RUN_MAX_EMAILS] ret_val, first_run_max_emails = self._validate_integer(action_result, first_run_max_emails, "Maximum Emails to Poll First Time") if phantom.is_fail(ret_val): return action_result.get_status() # Fetch max_containers for asset configuration max_containers = config[EWS_JSON_POLL_MAX_CONTAINERS] ret_val, max_containers = self._validate_integer(action_result, max_containers, "Maximum Containers for Scheduled Polling") if phantom.is_fail(ret_val): return action_result.get_status() # handle poll_now i.e. scheduled poll # Get the email ids that we will be querying for, different set for first run if self._state.get('first_run', True): # set the config to _not_ first run max_emails = first_run_max_emails self.save_progress("First time Ingestion detected.") else: max_emails = max_containers total_ingested = 0 limit = max_emails while True: self._dup_data = 0 self._skipped_emails = 0 restriction = self._get_restriction() ret_val, email_infos = self._get_email_infos_to_process(0, max_emails, action_result, restriction) if phantom.is_fail(ret_val) or email_infos is None: return action_result.get_status() if not email_infos: return action_result.set_status(phantom.APP_SUCCESS, "No emails found for the restriction: {}".format(str(restriction))) if len(email_infos) < max_emails: self._less_data = True # if the config is for latest emails, then the 0th is the latest in the list returned, else # The last email is the latest in the list returned email_index = 0 if config[EWS_JSON_INGEST_MANNER] == EWS_INGEST_LATEST_EMAILS else -1 email_ids = [x['id'] for x in email_infos] ret_val = self._process_email_ids(email_ids, action_result) if phantom.is_fail(ret_val): return action_result.get_status() max_emails, total_ingested = self._manage_data_duplication(email_infos, email_index, max_emails, total_ingested, limit) if not max_emails: break # Save the state file data only if the ingestion gets successfully completed if self._state.get('first_run', True): self._state['first_run'] = False return action_result.set_status(phantom.APP_SUCCESS) def _get_trace_error_details(self, response): self.debug_print("Response text: {}".format(response.text)) if 'json' in response.headers.get('Content-Type', ''): try: r_json = json.loads(response.text) error_code = r_json["error"]["code"] or response.status_code error_msg = r_json["error"]["message"]["value"] error_text = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg) except Exception: error_text = "API returned an error. Status: {}, Response: {}. Please check your input parameters/configuration." \ .format(response.status_code, response.text) self.save_progress(error_text) return error_text try: soup = BeautifulSoup(response.text, "html.parser") # Remove the script, style, footer and navigation part from the HTML message for element in soup(["script", "style", "footer", "nav"]): element.extract() error_text = soup.text split_lines = error_text.split('\n') split_lines = [x.strip() for x in split_lines if x.strip()] error_text = '\n'.join(split_lines) error_text = error_text.replace('{', '{{').replace('}', '}}') except Exception: error_text = "Can't process response from server. Status Code: {0} Data from server: {1}".format( response.status_code, response.text.replace('{', '{{').replace('}', '}}')) if len(error_text) > 500: error_text = "Error while connecting to the server" return error_text def _create_filter_string(self, action_result, param): """ This method is used to generate create filter string from the given parameters. :param param: Parameter dictionary :return: filter_str: Filter string """ sender_address = param.get('sender_address', '') recipient_address = param.get('recipient_address', '') status = param.get('status', '') start_date = param.get('start_date', '') end_date = param.get('end_date', '') from_ip = param.get('from_ip', '') to_ip = param.get('to_ip', '') internet_message_id = param.get('internet_message_id', '') message_trace_id = param.get('message_trace_id', '') if (start_date and not end_date) or (end_date and not start_date): return action_result.set_status(phantom.APP_ERROR, "Please specify both the 'start date' and 'end date' parameters"), {} params = {} if sender_address: sender_list = list(filter(None, [x.strip() for x in sender_address.split(",")])) params['SenderAddress'] = "'{}'".format(",".join(set(sender_list))) if recipient_address: recipient_list = list(filter(None, [x.strip() for x in recipient_address.split(",")])) params['RecipientAddress'] = "'{}'".format(",".join(set(recipient_list))) if status: status_list = list(filter(None, [x.strip() for x in status.split(",")])) status_list = ["" if x.lower() == "none" else x for x in status_list] params['Status'] = "'{}'".format(",".join(set(status_list))) if start_date: params['StartDate'] = "datetime'{}'".format(start_date) if end_date: params['EndDate'] = "datetime'{}'".format(end_date) if from_ip: params['FromIP'] = "'{}'".format(from_ip) if to_ip: params['ToIP'] = "'{}'".format(to_ip) if internet_message_id: params['MessageId'] = "'{}'".format(internet_message_id) if message_trace_id: params['MessageTraceId'] = "guid'{}'".format(message_trace_id) filter_str = "" for key, value in params.items(): if not filter_str: filter_str = "{} eq {}".format(key, value) else: filter_str = "{} and {} eq {}".format(filter_str, key, value) return phantom.APP_SUCCESS, filter_str def _trace_email(self, param): action_result = self.add_action_result(ActionResult(dict(param))) ret_val, filter_str = self._create_filter_string(action_result, param) if phantom.is_fail(ret_val): return action_result.get_status() parameter = { "$format": "Json", "$filter": filter_str } self.save_progress("Query parameter: {}".format(repr(parameter))) email_range = param.get("range") mini, maxi = 0, EWSONPREM_MAX_END_OFFSET_VAL if email_range: ret_val = self._validate_range(email_range, action_result) if phantom.is_fail(ret_val): return action_result.get_status() mini, maxi = (int(x) for x in email_range.split('-')) config = self.get_config() if phantom.APP_JSON_PASSWORD not in config: return action_result.set_status(phantom.APP_ERROR, "Password is required for the 'trace email' action") password = config[phantom.APP_JSON_PASSWORD] username = config[phantom.APP_JSON_USERNAME].replace('/', '\\') auth = HTTPBasicAuth(username, password) trace_url = EWS_TRACE_URL results = [] while True: response = requests.get(trace_url, auth=auth, params=parameter, timeout=self._timeout) if response.status_code != 200: error_text = self._get_trace_error_details(response) return action_result.set_status(phantom.APP_ERROR, error_text) # format as json data try: r_json = json.loads(response.text) results.extend(r_json["d"]["results"]) trace_url = r_json["d"].get("__next") # Break if we got all the data we want or if there is no more data to fetch if not trace_url or len(results) > maxi: break except Exception as e: # Log the exception details error_text = self._get_error_message_from_exception(e) self.debug_print("Error while parsing response: {}".format(error_text)) # Fetch the error message from the API response error_text = self._get_trace_error_details(response) return action_result.set_status(phantom.APP_ERROR, error_text) parameter = {"$format": "Json"} if param.get('widget_filter', False): for email_dict in results: email_dict['MessageId'] = email_dict['MessageId'].replace('>', '').replace('<', '') results = results[mini:maxi + 1] action_result.add_data(results) summary = action_result.update_summary({}) summary['emails_found'] = len(results) return action_result.set_status(phantom.APP_SUCCESS) def handle_action(self, param): """Function that handles all the actions""" # Get the action that we are supposed to carry out, set it in the connection result object action = self.get_action_identifier() # Initialize it to success ret_val = phantom.APP_SUCCESS # Bunch if if..elif to process actions if action == self.ACTION_ID_RUN_QUERY: ret_val = self._run_query(param) elif action == self.ACTION_ID_DELETE_EMAIL: ret_val = self._delete_email(param) elif action == self.ACTION_ID_UPDATE_EMAIL: ret_val = self._update_email(param) elif action == self.ACTION_ID_GET_EMAIL: ret_val = self._get_email(param) elif action == self.ACTION_ID_COPY_EMAIL: ret_val = self._copy_move_email(param) elif action == self.ACTION_ID_MOVE_EMAIL: ret_val = self._copy_move_email(param, action='move') elif action == self.ACTION_ID_BLOCK_SENDER: ret_val = self._mark_as_junk(param, action='block') elif action == self.ACTION_ID_UNBLOCK_SENDER: ret_val = self._mark_as_junk(param, action='unblock') elif action == self.ACTION_ID_EXPAND_DL: ret_val = self._expand_dl(param) elif action == self.ACTION_ID_RESOLVE_NAME: ret_val = self._resolve_name(param) elif action == self.ACTION_ID_ON_POLL: ret_val = self._on_poll(param) elif action == phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY: ret_val = self._test_connectivity(param) elif action == self.ACTION_ID_TRACE_EMAIL: ret_val = self._trace_email(param) return ret_val if __name__ == '__main__': import argparse import sys import pudb pudb.set_trace() in_json = None in_email = None argparser = argparse.ArgumentParser() argparser.add_argument('input_test_json', help='Input Test JSON file') argparser.add_argument('-u', '--username', help='username', required=False) argparser.add_argument('-p', '--password', help='password', required=False) argparser.add_argument('-v', '--verify', action='store_true', help='verify', required=False, default=False) args = argparser.parse_args() session_id = None username = args.username password = args.password verify = args.verify if username is not None and password is None: # User specified a username but not a password, so ask import getpass password = getpass.getpass("Password: ") if username and password: try: print("Accessing the Login page") phantom_url = "{}login".format(BaseConnector._get_phantom_base_url()) r = requests.get(phantom_url, verify=verify, timeout=DEFAULT_REQUEST_TIMEOUT) csrftoken = r.cookies['csrftoken'] data = dict() data['username'] = username data['password'] = password data['csrfmiddlewaretoken'] = csrftoken headers = dict() headers['Cookie'] = 'csrftoken={}'.format(csrftoken) headers['Referer'] = phantom_url print("Logging into Platform to get the session id") r2 = requests.post(phantom_url, verify=verify, data=data, headers=headers, timeout=DEFAULT_REQUEST_TIMEOUT) session_id = r2.cookies['sessionid'] except Exception as e: print("Unable to get session id from the platform. Error: {}".format(e)) sys.exit(1) with open(args.input_test_json) as f: in_json = f.read() in_json = json.loads(in_json) connector = EWSOnPremConnector() connector.print_progress_message = True data = in_json.get('data') raw_email = in_json.get('raw_email') # if neither present then treat it as a normal action test json if not data and not raw_email: print(json.dumps(in_json, indent=4)) if session_id is not None: in_json['user_session_token'] = session_id result = connector._handle_action(json.dumps(in_json), None) print(result) sys.exit(0) if data: raw_email = data.get('raw_email') if raw_email: config = { "extract_attachments": True, "extract_domains": True, "extract_hashes": True, "extract_ips": True, "extract_urls": True, "extract_eml": True, "add_body_to_header_artifacts": True } process_email = ProcessEmail() ret_val, message = process_email.process_email(connector, raw_email, "manual_parsing", config, None) sys.exit(0)
splunk-soar-connectors/office365
ewsonprem_connector.py
ewsonprem_connector.py
py
129,772
python
en
code
3
github-code
36
4440001992
import re import six import time import inspect import importlib from .dataType import * import spannerorm.base_model from datetime import date from .relation import Relation from google.api_core.datetime_helpers import DatetimeWithNanoseconds class Helper(object): @classmethod def is_property(cls, v): """ Check is property :type: object :param v: :return: """ return isinstance(v, property) @classmethod def is_attr(cls, v): """ Check is model attr :type: object :param v: """ return isinstance(v, StringField) | isinstance(v, IntegerField) | isinstance(v, BoolField) \ | isinstance(v, IntegerField) | isinstance(v, FloatField) | isinstance(v, BytesField) \ | isinstance(v, DateField) | isinstance(v, TimeStampField) | isinstance(v, EnumField) @classmethod def is_relational_attr(cls, v): """ Check is model relational attr :type: object :param v: """ return isinstance(v, Relation) @classmethod def get_model_prop_by_name(cls, model_cls, prop_name): """ Return model prop by name :type model_cls: spannerorm.base_model.BaseModel :param model_cls: :type prop_name: str :param prop_name: :rtype: property | None :return: """ for key, prop in inspect.getmembers(model_cls, Helper.is_property): if key == prop_name: return prop return None @classmethod def get_model_props_value_by_key(cls, model_obj, prop_name): """ Return model props key-value :type model_obj: spannerorm.base_model.BaseModel :param model_obj: model :type prop_name: str :param prop_name: property name :rtype: dict :return: """ for key, value in inspect.getmembers(model_obj.__class__, Helper.is_property): if key == prop_name: return model_obj.__getattribute__(key) return None @classmethod def get_model_props(cls, model_cls): """ Return model props :type model_cls: spannerorm.base_model.BaseModel :param model_cls: model :rtype: dict :return: """ model_props = {} for key, value in inspect.getmembers(model_cls, Helper.is_property): model_props[key] = value return model_props @classmethod def get_model_attrs(cls, model_cls): """ Return model attr :type model_cls: spannerorm.base_model.BaseModel :param model_cls: Model class :rtype: dict :return: model attributes in key value pairs """ attrs = {} for key, value in inspect.getmembers(model_cls, Helper.is_attr): attrs[key] = value return attrs @classmethod def get_model_relations_attrs(cls, model_cls): """ Return model relation attrs :type model_cls: spannerorm.base_model.BaseModel :param model_cls: Model class :rtype: dict :return: model relational attributes in key value pairs """ attrs = {} for key, value in inspect.getmembers(model_cls, Helper.is_relational_attr): attrs[key] = value return attrs @classmethod def get_model_props_key_value(cls, model_obj): """ Return model props key-value :type model_obj: spannerorm.base_model.BaseModel :param model_obj: model :rtype: dict :return: """ model_props = {} for key, value in inspect.getmembers(model_obj.__class__, Helper.is_property): model_props[key] = model_obj.__getattribute__(key) return model_props @classmethod def model_attr_by_prop(cls, model_cls, prop): """ Return model attribute by property :type model_cls: spannerorm.base_model.BaseModel :param model_cls: Model class :type prop: property :param prop: Model class property :rtype: DataType :return: Model attribute """ if isinstance(prop, property) is False: raise TypeError('Invalid object property') return Helper.model_attr_by_prop_name(model_cls, prop.fget.__name__) @classmethod def model_attr_by_prop_name(cls, model_cls, prop_name): """ Return model attribute by property name :type model_cls: spannerorm.base_model.BaseModel :param model_cls: Model class :type prop_name: str :param prop_name: Model class property name :rtype: DataType :return: Model attribute """ model_attr_name = '_' + prop_name model_attrs = Helper.get_model_attrs(model_cls) if model_attr_name not in model_attrs: raise TypeError('Criteria model property {} not exist'.format(model_attr_name)) return model_attrs.get(model_attr_name) @classmethod def model_relational_attr_by_prop(cls, model_cls, prop): """ Return model relational attribute by property :type model_cls: spannerorm.base_model.BaseModel :param model_cls: Model class :type prop: property :param prop: Model class property :rtype: Relation :return: Model relational attribute """ if isinstance(prop, property) is False: raise TypeError('Invalid object property') return Helper.model_relational_attr_by_prop_name(model_cls, prop.fget.__name__) @classmethod def model_relational_attr_by_prop_name(cls, model_cls, prop_name): """ Return model relational attribute by property name :type model_cls: spannerorm.base_model.BaseModel :param model_cls: Model class :type prop_name: str :param prop_name: Model class property name :rtype: Relation :return: Model relational attribute """ model_attr_name = '_' + prop_name model_attrs = Helper.get_model_relations_attrs(model_cls) if model_attr_name not in model_attrs: raise TypeError('Criteria model property {} not exist'.format(model_attr_name)) return model_attrs.get(model_attr_name) @classmethod def get_db_columns(cls, model_cls): model_attrs = Helper.get_model_attrs(model_cls) columns = [] for attr_name in model_attrs: attr = model_attrs.get(attr_name) columns.append(attr.db_column) return columns @classmethod def validate_model_prop(cls, model_cls, prop): """ Validate model attr :type model_cls: spannerorm.base_model.BaseModel :param model_cls: Model class :type prop: property :param prop: :rtype: dict :return: {'is_valid':bool, 'error_msg':str} """ if isinstance(prop, property) is False: raise TypeError('Invalid object property') model_attr_name = '_' + prop.fget.__name__ model_attrs = Helper.get_model_attrs(model_cls) if model_attr_name in model_attrs: attr = model_attrs.get(model_attr_name) if isinstance(attr, IntegerField) or isinstance(attr, FloatField): return Helper.validate_number_field(attr.value, max_value=attr.max_value, min_value=attr.min_value, null=attr.null) elif isinstance(attr, StringField): return Helper.validate_string_field(attr.value, max_length=attr.max_length, reg_exr=attr.reg_exr, null=attr.null) elif isinstance(attr, BoolField): return Helper.validate_bool_field(attr.value, null=attr.null) elif isinstance(attr, TimeStampField): return Helper.validate_timestamp_field(attr.value, null=attr.null) elif isinstance(attr, DateField): return Helper.validate_date_field(attr.value, null=attr.null) elif isinstance(attr, EnumField): return Helper.validate_enum_field(attr.value, enum_list=attr.enum_list, null=attr.null) return { 'is_valid': True, 'error_msg': None } @classmethod def validate_number_field(cls, value, max_value=None, min_value=None, null=True): """ Validate number field value :type value: int | float :param value: integer value :type max_value: int :param max_value: max allow number value :type min_value: int :param min_value: min allow integer value :type null: bool :param null: is allow None value :rtype: dict :return: {'is_valid':bool, 'error_msg':str} """ is_valid = True error_msg = None if null is False and value is None: is_valid = False error_msg = 'Property value should not be None' if value is not None: if isinstance(value, int) is False and isinstance(value, float) is False: is_valid = False error_msg = 'Data type should be <int>' if max_value is not None and value > max_value: is_valid = False error_msg = 'Max allow value: {}'.format(max_value) if min_value is not None and value < min_value: is_valid = False error_msg = 'Min allow value: {}'.format(min_value) return { 'is_valid': is_valid, 'error_msg': error_msg } @classmethod def validate_string_field(cls, value, max_length=None, reg_exr=None, null=True): """ Validate string field value :type max_length: int :param max_length: max allow string lenght :type reg_exr: str :param reg_exr: regex pattern :type null: bool :param null: is allow None value :rtype: dict :return: {'is_valid':bool, 'error_msg':str} """ is_valid = True error_msg = None if null is False and (value is None or (value is not None and str(value).strip() == '')): is_valid = False error_msg = 'Data should not be None or empty' if value is not None: if isinstance(value, six.string_types) is False: is_valid = False error_msg = 'Data type should be <str>' if max_length is not None and len(value) > max_length: is_valid = False error_msg = 'Max allow string length: {}'.format(max_length) if reg_exr is not None: pattern = re.compile(reg_exr) if pattern.match(value) is None: is_valid = False error_msg = 'String should match regex pattern: {}'.format(reg_exr) return { 'is_valid': is_valid, 'error_msg': error_msg } @classmethod def validate_bool_field(cls, value, null=True): """ Validate bool field value :type value: bool :param value: :type null: bool :param null: is allow None value :rtype: dict :return: {'is_valid':bool, 'error_msg':str} """ is_valid = True error_msg = None if null is False and value is None: is_valid = False error_msg = 'Data should not be None' if value is not None and isinstance(value, bool) is False: is_valid = False error_msg = 'Data type should be <bool>' return { 'is_valid': is_valid, 'error_msg': error_msg } @classmethod def validate_timestamp_field(cls, value, null=True): """ Validate timestamp field value :type value: int | float :param value: :type null: bool :param null: is allow None value :rtype: dict :return: {'is_valid':bool, 'error_msg':str} """ is_valid = True error_msg = None if null is False and value is None: is_valid = False error_msg = 'Data should not be None' if value is not None and isinstance(value, int) is False and isinstance(value, float) is False: is_valid = False error_msg = 'Data type should be <float> or <int> timestamp' return { 'is_valid': is_valid, 'error_msg': error_msg } @classmethod def validate_date_field(cls, value, null=True): """ Validate enum field value :type value: date :param value: :type null: bool :param null: is allow None value :rtype: dict :return: {'is_valid':bool, 'error_msg':str} """ is_valid = True error_msg = None if null is False and value is None: is_valid = False error_msg = 'Data should not be None' if value is not None and isinstance(value, date) is False: is_valid = False error_msg = 'Data type should be <datetime.date>' return { 'is_valid': is_valid, 'error_msg': error_msg } @classmethod def validate_enum_field(cls, value, enum_list, null=True): """ Validate enum field value :type value: object :param value: :type enum_list: list :param enum_list: list of allow value :type null: bool :param null: is allow None value :rtype: dict :return: {'is_valid':bool, 'error_msg':str} """ is_valid = True error_msg = None if null is False and value is None: is_valid = False error_msg = 'Data should not be None' if value is not None: if value in enum_list is False: is_valid = False error_msg = 'Data value should be from list: {}'.format(enum_list) return { 'is_valid': is_valid, 'error_msg': error_msg } @classmethod def get_model_props_details(cls, model_cls): """ Return model props details :type model_cls: spannerorm.base_model.BaseModel :param model_cls: :rtype: dict :return: """ model_props = Helper.get_model_props(model_cls) model_attrs = Helper.get_model_attrs(model_cls) props_details = {} for prop_name in model_props: model_attr_name = '_' + prop_name if model_attr_name in model_attrs: attr = model_attrs.get(model_attr_name) props_details.update({ prop_name: Helper.get_prop_details(attr) }) return props_details @classmethod def get_relation_props_details(cls, model_cls): """ Return model relation props details :type model_cls: spannerorm.base_model.BaseModel :param model_cls: :rtype: dict :return: """ model_props = Helper.get_model_props(model_cls) model_relation_attrs = Helper.get_model_relations_attrs(model_cls) props_details = {} for prop_name in model_props: model_attr_name = '_' + prop_name if model_attr_name in model_relation_attrs: attr = model_relation_attrs.get(model_attr_name) props_details.update({ prop_name: Helper.get_relation_pop_detail(attr) }) return props_details @classmethod def get_prop_details(cls, attr): """ Return model attr field details :type attr: DataType :param attr: :rtype: dict :return: """ details = { 'db_column': attr.db_column, 'null': attr.null, 'default_value': attr.default } if isinstance(attr, IntegerField): details.update({ 'data_type': 'IntegerField', 'max_value': attr.max_value, 'min_value': attr.min_value }) if isinstance(attr, FloatField): details.update({ 'data_type': 'FloatField', 'max_value': attr.max_value, 'min_value': attr.min_value, 'decimal_places': attr.decimal_places }) if isinstance(attr, StringField): details.update({ 'data_type': 'StringField', 'max_length': attr.max_length, 'reg_exr': attr.reg_exr }) if isinstance(attr, EnumField): details.update({ 'data_type': 'EnumField', 'enum_list': attr.enum_list }) if isinstance(attr, DateField): details.update({ 'data_type': 'DateField' }) if isinstance(attr, TimeStampField): details.update({ 'data_type': 'TimeStampField' }) if isinstance(attr, BoolField): details.update({ 'data_type': 'BoolField' }) if isinstance(attr, BytesField): details.update({ 'data_type': 'BytesField' }) return details @classmethod def get_relation_pop_detail(cls, attr): """ Return model relation attr field details :type attr: Relation :param attr: :rtype: dict :return: """ return { 'relation_type': attr.relation_type, 'relation_name': attr.relation_name, 'join_on': attr.join_on, 'refer_to': attr.refer_to } @classmethod def init_model_with_default(cls, model_class): """ Init model object with default values :type model_class: spannerorm.base_model.BaseModel :param model_class: :rtype: spannerorm.base_model.BaseModel :return: model object """ model_object = model_class() model_attrs = Helper.get_model_attrs(model_object) for attr_name in model_attrs: attr = model_attrs.get(attr_name) if attr.default is not None: attr.value = attr.default return model_object @classmethod def model_cls_by_module_name(cls, prop_module_name): """ import module by name & return model :type prop_module_name: str :param prop_module_name: :rtype: spannerorm.base_model.BaseModel :return: """ prop_module = importlib.import_module(prop_module_name) for name, model in inspect.getmembers(prop_module): if inspect.isclass(model) and prop_module_name == model.__module__: return model return None @classmethod def process_result_set(cls, results): data = [] for row in results: row_data = {} for col in row: index = 0 for field in results.fields: if isinstance(row[index], six.string_types): value = str(row[index]) elif isinstance(row[index], DatetimeWithNanoseconds): value = time.mktime(row[index].timetuple()) else: value = row[index] field_name = str(field.name) row_data[field_name] = value index += 1 data.append(row_data) return data
sijanonly/spanner-orm
spannerorm/helper.py
helper.py
py
19,932
python
en
code
0
github-code
36
2939710815
#!/usr/bin/env python # """ Utilitities used by our app. We want to separate them from main.app so we can use them in other modules without running the code in app. """ # system imports # from hashlib import sha256 #################################################################### # def short_hash_email(email: dict) -> str: """ Generate a short hash of the email message. We do not know necessarily what parts of the message exist so try `RawEmail`, `HTMLBody`, `TextBody`, and `MessageID` in that order. """ # Generate a sha256 for the email message if "RawEmail" in email: text_to_hash = email["RawEmail"] elif "HtmlBody" in email: text_to_hash = email["HtmlBody"] elif "TextBody" in email: text_to_hash = email["TextBody"] else: text_to_hash = email["MessageID"] short_hash = sha256(text_to_hash.encode("utf-8")).hexdigest()[:8] return short_hash
scanner/postmark_webhooks
app/utils.py
utils.py
py
937
python
en
code
0
github-code
36
14991911664
from os import environ import uuid import logging import json # file_name = environ('log_file_name') file_name = 'app.log' class ModelLog: def __init__(self): ModelLog.load_create() @staticmethod def request_uid(use_case): """ Generate a unique unicode id for the object. The default implementation concatenates the class name, "_", and 12 random hex chars. """ return str(use_case + "_" + uuid.uuid4().hex[12:]) @staticmethod def load_create(): logging.basicConfig(filename=('%s' % file_name), level=logging.INFO, format='%(asctime)s ->%(message)s') logging.info('Started') @staticmethod def read_logs(uid): history = [] with open(file_name) as reader: lines = reader.readlines() for line in lines: try: val = json.loads(line.split('->')[-1]) if uid == val.get('name'): history.append(val) except Exception as e: print(e) return history @staticmethod def write_log(name, status, **kwargs): val = dict(name=name, status=status) val.update(kwargs) stringify = json.dumps(val) logging.info(stringify) custom_log = ModelLog()
ahmadaneeque/my-code
kubectl_docker/model-update-framework/re-train/mlog.py
mlog.py
py
1,357
python
en
code
0
github-code
36
12626651053
# 2. Реализовать функцию, принимающую несколько параметров, описывающих данные пользователя: имя, фамилия, год рождения, # город проживания, email, телефон. Функция должна принимать параметры как именованные аргументы. # Реализовать вывод данных о пользователе одной строкой. def user_data_input(): user_data_dict = {} print("Вводите данные пользователя через ENTER:\nФамилия, Имя, ГодРождения, ГородПроживания, email, " "Телефон") for userKey in ['Фамилия', 'Имя', 'ГодРождения', 'ГородПроживания', 'email', 'Телефон']: print(f"Введите значение поля {userKey}:") user_data_dict.update({userKey: str(input())}) return user_data_dict def user_data_print(first_name_p, lastname_p, birth_date_p, residence_city_p, email_p, phone_p): print(f"{first_name_p} {lastname_p}, {birth_date_p} года рождения. Проживает в городе {residence_city_p}. " f"Контакты: email: {email_p}, телефон: {phone_p}") user_data = user_data_input() user_data_print(first_name_p=user_data.get('Имя'), lastname_p=user_data.get('Фамилия'), birth_date_p=user_data.get('ГодРождения'), residence_city_p=user_data.get('ГородПроживания'), phone_p=user_data.get('Телефон'), email_p=user_data.get('email'))
AlexProsku/HW_Python
Lesson_3/task_2.py
task_2.py
py
1,702
python
ru
code
0
github-code
36
20025415289
#Develop a menu-based python program #menu items: 1. Addition 2. Subtraction 3. Multiplication 4. Division 5. Average 6. Find maximum 7. Find minimum import sys def PrintMenu(): print("Menu") print("1. Addition") print("2. Subtraction") print("3. Multiplication") print("4. Division") print("5. Average") print("6. Find maximum") print("7. Find minimum") print("8.Exit") def add(list1): return sum(list1) def GetChoice(): ch = int(input("Enter Choice : ")); return ch def sub(list1): subtraction=0; for i in list1: subtraction = int(i) - subtraction return subtraction def mul(list1): mult=1 for i in list1: mult = mult * int(i) return mult def Div(list1): div=1 div = list1[0]/list1[1] return div def Avg(list1): s = sum(list1) l = len(list1) avg = s/l return avg def Max(list1): return max(list1) def Min(list1): return min(list1) def Choice(ch): if ch == 1: print("Addition for Numeric List",add(list1)) elif ch == 2: print("Subtraction for Numeric List",sub(list1)) elif ch == 3: print("Multiplication for Numeric List",mul(list1)) elif ch == 4: print("Division for Numeric List",Div(list1)) elif ch == 5: print("Average for Numeric List",Avg(list1)) elif ch == 6: print("Maximum for Numeric List",Max(list1)) elif ch == 7: print("Minimum for Numeric List",Min(list1)) def repeate(): while True: PrintMenu() ch=GetChoice() if ch == 8: return else: Choice(ch) #Program Driver list1 = [10,5] #PrintMenu() repeate()
sohelbaba/Python
Practical 3/p1.py
p1.py
py
1,808
python
en
code
0
github-code
36
43288963064
from ctypes import * import sys import pytest @pytest.fixture def dll(sofile): return CDLL(str(sofile), use_errno=True) def test_char_result(dll): f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] f.restype = c_char result = f(0, 0, 0, 0, 0, 0) assert result == b'\x00' def test_boolresult(dll): f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] f.restype = c_bool false_result = f(0, 0, 0, 0, 0, 0) assert false_result is False true_result = f(1, 0, 0, 0, 0, 0) assert true_result is True def test_unicode_function_name(dll): f = dll[u'_testfunc_i_bhilfd'] f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] f.restype = c_int result = f(1, 2, 3, 4, 5.0, 6.0) assert result == 21 def test_truncate_python_longs(dll): f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] f.restype = c_int x = sys.maxint * 2 result = f(x, x, x, x, 0, 0) assert result == -8 def test_convert_pointers(dll): f = dll.deref_LP_c_char_p f.restype = c_char f.argtypes = [POINTER(c_char_p)] # s = c_char_p(b'hello world') ps = pointer(s) assert f(ps) == b'h' assert f(s) == b'h' # automatic conversion from char** to char* ################################################################ def test_call_some_args(dll): f = dll.my_strchr f.argtypes = [c_char_p] f.restype = c_char_p result = f(b"abcd", ord("b")) assert result == b"bcd" def test_variadic_sum(dll): f = dll.variadic_sum f.argtypes = [c_long] f.restype = c_long result = f(3, 13, 38, 100) assert result == 13 + 38 + 100 result = f(2, 13, 38) assert result == 13 + 38 @pytest.mark.pypy_only def test_keepalive_buffers(monkeypatch, dll): import gc f = dll.my_strchr f.argtypes = [c_char_p] f.restype = c_char_p # orig__call_funcptr = f._call_funcptr def _call_funcptr(funcptr, *newargs): gc.collect() gc.collect() gc.collect() return orig__call_funcptr(funcptr, *newargs) monkeypatch.setattr(f, '_call_funcptr', _call_funcptr) # result = f(b"abcd", ord("b")) assert result == b"bcd" def test_caching_bug_1(dll): # the same test as test_call_some_args, with two extra lines # in the middle that trigger caching in f._ptr, which then # makes the last two lines fail f = dll.my_strchr f.argtypes = [c_char_p, c_int] f.restype = c_char_p result = f(b"abcd", ord("b")) assert result == b"bcd" result = f(b"abcd", ord("b"), 42) assert result == b"bcd" def test_argument_conversion_and_checks(dll): #This test is designed to check for segfaults if the wrong type of argument is passed as parameter strlen = dll.my_strchr strlen.argtypes = [c_char_p, c_int] strlen.restype = c_char_p assert strlen(b"eggs", ord("g")) == b"ggs" # Should raise ArgumentError, not segfault with pytest.raises(ArgumentError): strlen(0, 0) with pytest.raises(ArgumentError): strlen(False, 0) def test_union_as_passed_value(dll): class UN(Union): _fields_ = [("x", c_short), ("y", c_long)] dll.ret_un_func.restype = UN dll.ret_un_func.argtypes = [UN] A = UN * 2 a = A() a[1].x = 33 u = dll.ret_un_func(a[1]) assert u.y == 33 * 10000 @pytest.mark.pypy_only def test_cache_funcptr(dll): tf_b = dll.tf_b tf_b.restype = c_byte tf_b.argtypes = (c_byte,) assert tf_b(-126) == -42 ptr = tf_b._ptr assert ptr is not None assert tf_b(-126) == -42 assert tf_b._ptr is ptr def test_custom_from_param(dll): class A(c_byte): @classmethod def from_param(cls, obj): seen.append(obj) return -126 tf_b = dll.tf_b tf_b.restype = c_byte tf_b.argtypes = (c_byte,) tf_b.argtypes = [A] seen = [] assert tf_b("yadda") == -42 assert seen == ["yadda"] @pytest.mark.xfail(reason="warnings are disabled") def test_warnings(dll): import warnings warnings.simplefilter("always") with warnings.catch_warnings(record=True) as w: dll.get_an_integer() assert len(w) == 1 assert issubclass(w[0].category, RuntimeWarning) assert "C function without declared arguments called" in str(w[0].message) @pytest.mark.xfail def test_errcheck(dll): import warnings def errcheck(result, func, args): assert result == -42 assert type(result) is int arg, = args assert arg == -126 assert type(arg) is int return result # tf_b = dll.tf_b tf_b.restype = c_byte tf_b.argtypes = (c_byte,) tf_b.errcheck = errcheck assert tf_b(-126) == -42 del tf_b.errcheck with warnings.catch_warnings(record=True) as w: dll.get_an_integer.argtypes = [] dll.get_an_integer() assert len(w) == 1 assert issubclass(w[0].category, RuntimeWarning) assert "C function without declared return type called" in str(w[0].message) with warnings.catch_warnings(record=True) as w: dll.get_an_integer.restype = None dll.get_an_integer() assert len(w) == 0 warnings.resetwarnings() def test_errno(dll): test_errno = dll.test_errno test_errno.restype = c_int set_errno(42) res = test_errno() n = get_errno() assert (res, n) == (42, 43) set_errno(0) assert get_errno() == 0 def test_issue1655(dll): def ret_list_p(icount): def sz_array_p(obj, func, args): assert ('.LP_c_int object' in repr(obj) or '.LP_c_long object' in repr(obj)) assert repr(args) in ("('testing!', c_int(4))", "('testing!', c_long(4))") assert args[icount].value == 4 return [obj[i] for i in range(args[icount].value)] return sz_array_p get_data_prototype = CFUNCTYPE(POINTER(c_int), c_char_p, POINTER(c_int)) get_data_paramflag = ((1,), (2,)) get_data_signature = ('test_issue1655', dll) get_data = get_data_prototype(get_data_signature, get_data_paramflag) assert get_data(b'testing!') == 4 get_data.errcheck = ret_list_p(1) assert get_data(b'testing!') == [-1, -2, -3, -4] def test_issue2533(tmpdir): import cffi ffi = cffi.FFI() ffi.cdef("int **fetchme(void);") ffi.set_source("_x_cffi", """ int **fetchme(void) { static int a = 42; static int *pa = &a; return &pa; } """) ffi.compile(verbose=True, tmpdir=str(tmpdir)) import sys sys.path.insert(0, str(tmpdir)) try: from _x_cffi import ffi, lib finally: sys.path.pop(0) fetchme = ffi.addressof(lib, 'fetchme') fetchme = int(ffi.cast("intptr_t", fetchme)) FN = CFUNCTYPE(POINTER(POINTER(c_int))) ff = cast(fetchme, FN) g = ff() assert g.contents.contents.value == 42 h = c_int(43) g[0] = pointer(h) # used to crash here assert g.contents.contents.value == 43
mozillazg/pypy
extra_tests/ctypes_tests/test_functions.py
test_functions.py
py
7,258
python
en
code
430
github-code
36
21025390512
from db import db from db.db import AuthorEntity def save(author: AuthorEntity): with db.tiktok_db: db.tiktok_db.connect(reuse_if_open=True) author.save() def add_authors(authors: list[str], category: int): with db.tiktok_db: db.tiktok_db.connect(reuse_if_open=True) authors_copy = authors.copy() new_authors = AuthorEntity.select().where( (AuthorEntity.author_id.in_(authors_copy)) & (AuthorEntity.category == category)) for i in new_authors: if i.author_id in authors_copy: authors_copy.remove(i.author_id) for a in authors_copy: AuthorEntity(author_id=a, last_publication_id='', category=category).save() def remove_authors(authors: list[str], category: int): with db.tiktok_db: db.tiktok_db.connect(reuse_if_open=True) AuthorEntity.delete().where( (AuthorEntity.author_id.not_in(authors)) & (AuthorEntity.category == category)) def update_last_publication_id(author_id: str, last_publication_id: str, category: int): with db.tiktok_db: db.tiktok_db.connect(reuse_if_open=True) author = AuthorEntity.get((AuthorEntity.author_id == author_id) & (AuthorEntity.category == category)) author.last_publication_id = last_publication_id author.save() def get_last_publication_id(author_id: str, category: int): with db.tiktok_db: db.tiktok_db.connect(reuse_if_open=True) author = AuthorEntity.get((AuthorEntity.author_id == author_id) & (AuthorEntity.category == category)) return author.last_publication_id def get_all_authors(): with db.tiktok_db: db.tiktok_db.connect(reuse_if_open=True) resp = AuthorEntity.select() authors = list() for a in resp: authors.append(a) return authors def get_all_authors_without_deleted(): with db.tiktok_db: db.tiktok_db.connect(reuse_if_open=True) resp = AuthorEntity.select().where(AuthorEntity.is_deleted == False) authors = list() for a in resp: authors.append(a) return authors def set_is_working(author: str, is_working: bool): with db.tiktok_db: db.tiktok_db.connect(reuse_if_open=True) author = AuthorEntity.get(AuthorEntity.author_id == author) author.is_working = is_working author.save() def set_is_deleted(author: str, is_deleted: bool): with db.tiktok_db: db.tiktok_db.connect(reuse_if_open=True) author = AuthorEntity.get(AuthorEntity.author_id == author) author.is_deleted = is_deleted author.save()
MAG135/robot
repositories/author_repository.py
author_repository.py
py
2,671
python
en
code
0
github-code
36
6753606270
from flask import Flask # create Flask app object and init all modules def create_app(config_object): from .main import create_module as main_create_module from app.api.v1 import create_module as api_v1_create_module # Init APP app = Flask(__name__) app.config.from_object(config_object) # Init modules main_create_module(app) app.logger.info("Init Main module") app.logger.info('Initializing API v1 module') api_v1_create_module(app) app.logger.info('API v1 module initialized') return app
artem-shestakov/PIN_and_Hash
app/__init__.py
__init__.py
py
545
python
en
code
0
github-code
36
27770460762
import os, sys sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fastapi_basic'))) from fastapi.testclient import TestClient from app import app from controllers.models.test import Calculate_Data from services.auth import service_auth client = TestClient(app) access_token = service_auth.gen_token('johndoe', 'secret') def test_not_authorization(): body = Calculate_Data() response = client.post( "/calculate_profit", headers={ 'Content-type': 'application/json', }, json=body.dict() ) print('Result response :', response.json()) assert response.status_code == 401 def test_calculate_profit(): body = Calculate_Data() response = client.post( "/calculate_profit", headers={ 'Content-type':'application/json', 'Authorization': f'Bearer {access_token}', }, json=body.dict() ) print('Result response :', response.json()) assert response.status_code == 200
jinybear/fastapi_basic
tests/test_controllers.py
test_controllers.py
py
1,030
python
en
code
0
github-code
36
43156875927
# 성적 계산 # minso.jeong@daum.net ''' 문제링크 : https://www.codeup.kr/problem.php?id=1127 ''' score = 0 for _ in range(3): p, n = input().split() score += float(p) * int(n) print('%.1f' %score)
minssoj/Learning_Algorithm_Up
code/1127.py
1127.py
py
211
python
en
code
0
github-code
36
18567314308
# scrape British Columbia acupuncturists number_of_rows = 2090 # set this before running delim = "\t" from urllib.request import urlopen import time def SetHomeDirectory(): import os os.chdir("C:\\Users\\Matt Scandale\\OneDrive - Council of Better Business Bureaus, Inc\\Desktop") SetHomeDirectory() iURL = "https://portal.ctcma.bc.ca/public" from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.by import By driver = webdriver.Firefox() driver.implicitly_wait(10) driver.get(iURL) print("Page title:", driver.title) id_province_field = "ctl01_TemplateBody_WebPartManager1_gwpciPublicRegistry_ciPublicRegistry_ResultsGrid_Sheet0_Input5_TextBox1" id_submit_button = "ctl01_TemplateBody_WebPartManager1_gwpciPublicRegistry_ciPublicRegistry_ResultsGrid_Sheet0_SubmitButton" id_page_size = "ctl01_TemplateBody_WebPartManager1_gwpciPublicRegistry_ciPublicRegistry_ResultsGrid_Grid1_ctl00_ctl02_ctl00_ChangePageSizeTextBox" id_change_page_size = "ctl01_TemplateBody_WebPartManager1_gwpciPublicRegistry_ciPublicRegistry_ResultsGrid_Grid1_ctl00_ctl02_ctl00_ChangePageSizeLinkButton" id_results_grid = "ctl01_TemplateBody_WebPartManager1_gwpciPublicRegistry_ciPublicRegistry_ResultsGrid_Grid1_ctl00" out_filename = "out.txt" print("writing to " + out_filename) fh = open(out_filename, "w", encoding="utf8") time.sleep(1) driver.execute_script("document.getElementById('" + id_province_field + "').value = 'BC'") province = driver.find_element(by=By.ID, value=id_province_field) print("Province: " + province.get_attribute('value')) time.sleep(1) driver.execute_script("document.getElementById('" + id_submit_button + "').click()") time.sleep(1) driver.execute_script("document.getElementById('" + id_page_size + "').value = '" + str(number_of_rows) + "'") time.sleep(1) driver.execute_script("document.getElementById('" + id_change_page_size + "').click()") # THIS IS ABSOLUTELY CRITICAL TO GET ALL ROWS! print("waiting for all rows to load") time.sleep(15) results_grid = driver.find_element(by=By.ID, value=id_results_grid) rows = results_grid.find_elements(by=By.CLASS_NAME, value="rgRow") print("Rows:", len(rows)) links = [] for row in rows: cells = row.find_elements(by=By.XPATH, value="td") cell = cells[0] anchors = cell.find_elements(by=By.XPATH, value="a") anchor = anchors[0] links.append(anchor) print("Links:", len(links)) row_number = 0 for link in links: row_number += 1 time.sleep(3) driver.switch_to.default_content() link.click() #iframe = driver.find_element_by_xpath("/html/body/form/div[1]/table/tbody/tr[" + str(xcount) + "]/td[2]/iframe") iframes = driver.find_elements(by=By.TAG_NAME, value="iframe") print("Frames:", len(iframes)) framecount = 0 for iframe in iframes: framecount += 1 if framecount == 1: print("switching context to frame " + str(framecount)) driver.switch_to.frame(iframe) time.sleep(2) #panel = driver.find_element(by=By.XPATH, value='//*[@id="ctl00_ContentPanel"]') #html = panel.get_attribute('innerHTML') person = driver.find_element(by=By.XPATH, value='//*[@id="ctl00_TemplateBody_WebPartManager1_gwpciNewContactMiniProfileCommon_ciNewContactMiniProfileCommon_contactName_fullName"]') person_value = person.get_attribute('innerHTML') address = driver.find_element(by=By.XPATH, value='//*[@id="ctl00_TemplateBody_WebPartManager1_gwpciAddress_ciAddress__address"]') address_value = address.get_attribute('innerHTML') phone = driver.find_element(by=By.XPATH, value='//*[@id="ctl00_TemplateBody_WebPartManager1_gwpciAddress_ciAddress__phoneNumber"]') phone_value = phone.get_attribute('innerHTML') email = driver.find_element(by=By.XPATH, value='//*[@id="ctl00_TemplateBody_WebPartManager1_gwpciAddress_ciAddress__email"]') email_value = email.get_attribute('innerHTML') company_value = "-" try: company = driver.find_element(by=By.XPATH, value='//*[@id="ctl00_TemplateBody_WebPartManager1_gwpciNewContactMiniProfileCommon_ciNewContactMiniProfileCommon_contactName_institute"]') company_value = company.get_attribute('innerHTML') except: pass print(person_value, address_value, phone_value, email_value, company_value) fh.write( person_value + "\t" + address_value + "\t" + phone_value + "\t" + email_value + "\t" + company_value + "\t" + "\n" ) #close_buttons = driver.find_elements_by_xpath("/html/body/form/div[1]/table/tbody/tr[1]/td[2]/table/tbody/tr/td[3]/ul/li[3]/a") #close_buttons = driver.find_elements_by_class_name("rwCloseButton") #for close_button in close_buttons: # close_button.click() fh.close() driver.close() print("end")
mscandale-iabbb/research_public
sample_iabbb_bots/scrape_bc_acupuncturists_shared.py
scrape_bc_acupuncturists_shared.py
py
4,974
python
en
code
0
github-code
36
1985519466
#imports libary needed to access photo from PIL import Image #Opens and loads the image that it processes you have to change this to what your picture is called. money = Image.open("money.png") money = money.convert("RGBA") pixels = money.load() #Sets two vairables for the loops that go through each pixel width, height = money.size #Nested loop for each pixel for a in range(0,width): for b in range(0,height): #This checks if the pixel is transparent or not if pixels[a,b][3] == 0: #If the pixel is transparent then it sets it to pink and not transparent pixels[a,b] = (255,192,203,255) #saves the image money.save("pinkbackgroundedmoney.png")
koanarec/recolorimagepython
recolor.py
recolor.py
py
730
python
en
code
0
github-code
36
23618566810
""" Makes it easier to create pretty representations """ def uirepr(object, name=None, defaults=None, exclude=None): """ Returns string of representation. """ if not hasattr(object, '__ui_repr__'): return repr(object) imports = {} collected = object.__ui_repr__(imports, name, defaults, exclude) result = '' for key in sorted(imports.iterkeys()): values = list(imports[key]) result += 'from {0} import {1}\n'.format(key, ', '.join(values)) # print headers result += '\n{0}'.format(collected[None]) del collected[None] if len(collected) == 0: return result notnone = [key for key, v in collected.iteritems() if v is not None] if len(notnone): length = max(len(key) for key in notnone) for key in sorted(collected.keys()): value = collected[key] if value is not None: value = value.rstrip() if value.count('\n') != 0: value = value.replace('\n', '\n' + ''.join([' ']*(length+4))) result += '\n{0:<{length}} = {1}'.format(key, value, length=length) for key in sorted(collected.keys()): value = collected[key] if value is None: result += '\n{0}'.format(key) return result def add_to_imports(object, imports): from inspect import isclass, isfunction if object is None: return if isclass(object) or isfunction(object): key = object.__module__ value = object.__name__ else: key = object.__class__.__module__ value = object.__class__.__name__ if key == '__builtin__': return if key in imports: imports[key].add(value) else: imports[key] = set([value]) def template_ui_repr(self, imports, name=None, defaults=None, exclude=None): """ Creates user friendly representation. """ from inspect import isdatadescriptor results = {} if name is None: name = getattr(self, '__ui_name__', self.__class__.__name__.lower()) results[None] = '{1} = {0.__class__.__name__}()'.format(self, name) add_to_imports(self, imports) # loop through dictionary attributes first. for key, value in self.__dict__.iteritems(): if key[0] == '_': continue if exclude is not None and key in exclude: continue if hasattr(value, '__ui_repr__'): default = None if defaults is None \ else defaults.__dict__.get(key, None) newname = name + '.' + key partial = value.__ui_repr__(imports, newname, default) results.update(partial) else: string = repr(value) if defaults is not None and key in defaults.__dict__ \ and type(value) is type(defaults.__dict__[key]) \ and string == repr(defaults.__dict__[key]): continue key = '{0}.{1}'.format(name, key) results[key] = string add_to_imports(string, imports) # then loops through class properties. for key in dir(self): if key[0] == '_': continue if key in self.__dict__: continue if exclude is not None and key in exclude: continue if not hasattr(self.__class__, key): continue value = getattr(self.__class__, key) if not isdatadescriptor(value): continue string = repr(getattr(self, key)) if defaults is None or not hasattr(defaults.__class__, key): pass elif not isdatadescriptor(getattr(defaults.__class__, key)): pass else: default = getattr(defaults, key) if type(getattr(self, key)) is type(default) and repr(default) == string: continue key = '{0}.{1}'.format(name, key) results[key] = string add_to_imports(string, imports) return results
mdavezac/LaDa
tools/uirepr.py
uirepr.py
py
3,584
python
en
code
5
github-code
36
37466207832
from concurrent import futures import sys import grpc import summary_api_pb2 import summary_api_pb2_grpc import pandas as pd from summary_statistics import calculate_frequency class DocumentSummarizer(summary_api_pb2_grpc.DocumentSummarizerServicer): def SummarizeDocument(self, request, context): if request.document.content != b'': data = pd.read_json(request.document.content.decode()) else: try: data = pd.read_csv(request.document.source.http_uri, index_col=0) request.document.content = data.to_json().encode('utf-8') except Exception as ex: raise ValueError("Cannot load html ({})".format(ex)) if request.params_for_aggregation not in data.columns: raise ValueError("Column ({}) is not in list ({})".format(request.params_for_aggregation,','.join(data.columns))) if len(data[request.params_for_aggregation].unique()) < int(request.exclude): temp_df = calculate_frequency(data, request.params_for_aggregation) else: raise ValueError("number of categories ({}) more than threshold ({})!".format((len(data[request.params_for_aggregation].unique())), int(request.exclude)) ) return summary_api_pb2.SummarizeDocumentReply(content=temp_df.reset_index(drop=True).to_json().encode('utf-8')) def serve(port): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) summary_api_pb2_grpc.add_DocumentSummarizerServicer_to_server(DocumentSummarizer(), server) server.add_insecure_port("localhost:{}".format(port)) print("start server listening on {}".format(port)) server.start() server.wait_for_termination() if __name__ == '__main__': port = sys.argv[1] if len(sys.argv) > 1 else 50052 serve(port)
doralaura24/visma
summary-statistics-service/summary/server.py
server.py
py
1,856
python
en
code
0
github-code
36
72640523303
#! /usr/bin/env python3 """ --- besspin.py is the main BESSPIN-Tool-Suite program. All documentation and features are based on solely executing this file. Please do not execute any other file. --- usage: besspin.py [-h] [-c CONFIGFILE | -cjson CONFIGFILESERIALIZED] [-w WORKINGDIRECTORY] [-l LOGFILE] [-d] [-ep {devHost,ciOnPrem,ciAWS,awsProd,awsDev}] [-job JOBID] BESSPIN (Balancing Evaluation of System Security Properties with Industrial Needs) optional arguments: -h, --help show this help message and exit -c CONFIGFILE, --configFile CONFIGFILE Overwrites the default config file: ./config.ini -cjson CONFIGFILESERIALIZED, --configFileSerialized CONFIGFILESERIALIZED Overwrites and augments the default production settings -w WORKINGDIRECTORY, --workingDirectory WORKINGDIRECTORY Overwrites the default working directory: ./workDir/ -l LOGFILE, --logFile LOGFILE Overwrites the default logFile: ./${workDir}/besspin.log -d, --debug Enable debugging mode. -ep {devHost,ciOnPrem,ciAWS,awsProd,awsDev}, --entrypoint {devHost,ciOnPrem,ciAWS,awsProd,awsDev} The entrypoint -job JOBID, --jobId JOBID The job ID in production mode. --- Defaults: workingDirectory = ./workDir configFile = ./config.ini logFile = ./${workDir}/besspin.log logging level = INFO entrypoint = devHost """ try: from besspin.base.utils.misc import * from besspin.base.config import loadConfiguration, genProdConfig from besspin.target.launch import startBesspin, endBesspin, resetTarget from besspin.cyberPhys.launch import startCyberPhys, endCyberPhys from besspin.base.utils import aws from besspin.base.threadControl import createBesspinLocks import logging, argparse, os, shutil, atexit, signal except Exception as exc: try: #check we're in nix import sys if (sys.executable.split('/')[1] != 'nix'): print (f"(Error)~ Please run within a nix shell. [Run <nix-shell> in besspin directory].") else: raise except: print(f"(Error)~ <{exc.__class__.__name__}>: {exc}") print(f"(Info)~ End of BESSPIN! [Exit code -1:Fatal]") exit(-1) def main (xArgs): # Create working Directory repoDir = os.path.abspath(os.path.dirname(__file__)) if (xArgs.workingDirectory): workDir = os.path.abspath(xArgs.workingDirectory) else: workDir = os.path.join(repoDir,'workDir') if (os.path.isdir(workDir)): # already exists, delete try: shutil.rmtree(workDir) except Exception as exc: print(f"(Error)~ Failed to delete <{workDir}>.\n{formatExc(exc)}.") exitBesspin(EXIT.Configuration, preSetup=True) try: os.mkdir(workDir) except Exception as exc: print(f"(Error)~ Failed to create the working directory <{workDir}>.\n{formatExc(exc)}.") exitBesspin(EXIT.Files_and_paths, preSetup=True) # Check log file if (xArgs.logFile): logFile = os.path.abspath(xArgs.logFile) else: logFile = os.path.join(workDir,'besspin.log') try: fLog = open(logFile,'w') fLog.close() except Exception as exc: print(f"(Error)~ Failed to create the log file <{logFile}>.\n{formatExc(exc)}.") exitBesspin(EXIT.Files_and_paths, preSetup=True) # Entrypoint if(xArgs.entrypoint is None): xArgs.entrypoint = 'devHost' # Check jobId is valid, if provided if(xArgs.jobId): if(not(re.match("^[A-Za-z0-9-_+.]+$", xArgs.jobId))): print("(Error)~ Provided jobId contained invalid character(s). It must match regex '[A-Za-z0-9-_+.]'") exitBesspin(EXIT.Files_and_paths, preSetup=True) # setup the logging logLevel = logging.DEBUG if (xArgs.debug) else logging.INFO logging.basicConfig(filename=logFile,filemode='w',format='%(asctime)s: (%(levelname)s)~ %(message)s',datefmt='%I:%M:%S %p',level=logLevel) printAndLog(f"Welcome to BESSPIN!") #Prepare the peaceful exit setSetting('trash',trashCanObj()) atexit.register(exitPeacefully,getSetting('trash')) # Store critical settings setSetting('repoDir', repoDir) setSetting ('workDir', workDir) setSetting('logFile', logFile) setSetting('debugMode', xArgs.debug) setSetting('besspinEntrypoint',xArgs.entrypoint) setSetting('prodJobId', xArgs.jobId) # Load all configuration and setup settings setupEnvFile = os.path.join(repoDir,'besspin','base','utils','setupEnv.json') setSetting('setupEnvFile', setupEnvFile) if (xArgs.configFile): configFile = os.path.abspath(xArgs.configFile) elif (xArgs.configFileSerialized): configFile = os.path.join(workDir,'production.ini') genProdConfig (xArgs.configFileSerialized, configFile) printAndLog(f"Configuration deserialized successfully to <{configFile}>.") else: configFile = os.path.join(repoDir,'config.ini') printAndLog(f"Using the default configuration in <{configFile}>.") setSetting('configFile', configFile) #Load the config file(s) loadConfiguration(configFile) #Create the global semaphores/Locks createBesspinLocks() #launch the tool if (isEqSetting('mode','cyberPhys')): startCyberPhys() endCyberPhys() else: xTarget = startBesspin() instruction = None if (isEqSetting('mode','production')): def sendSuccessMsgToPortal (nodeSuffix, reasonSuffix): aws.sendSQS(getSetting(f'{getSetting("besspinEntrypoint")}SqsQueueTX'), logAndExit, 'success', getSetting('prodJobId'), f"{getSetting('prodJobId')}-{nodeSuffix}", reason=f'besspin-production-{reasonSuffix}', hostIp=aws.getInstanceIp(logAndExit), fpgaIp=getSetting('productionTargetIp') ) printAndLog(f"Sent {reasonSuffix} message to the SQS queue.") # Notify portal that we have deployed successfully sendSuccessMsgToPortal('DEPLOY','deployment') # Wait for portal to instruct us to do something while (instruction != 'termination'): instruction = aws.pollPortalIndefinitely (getSetting(f'{getSetting("besspinEntrypoint")}S3Bucket'), xTarget.process, logAndExit) if (instruction == 'deadProcess'): warnAndLog ("The main process is dead. Will exit without a notice from Portal.") break printAndLog(f"Received {instruction} notice from Portal.") if (instruction == 'reset'): # execute reset flow xTarget = resetTarget(xTarget) # Notify portal that we have reset successfully sendSuccessMsgToPortal('RESET','reset') endBesspin(xTarget,(instruction=='deadProcess')) exitBesspin(EXIT.Success) if __name__ == '__main__': # Reading the bash arguments xArgParser = argparse.ArgumentParser (description='BESSPIN (Balancing Evaluation of System Security Properties with Industrial Needs)') xGroupConfig = xArgParser.add_mutually_exclusive_group(required=False) xGroupConfig.add_argument ('-c', '--configFile', help='Overwrites the default config file: ./config.ini') xGroupConfig.add_argument ('-cjson', '--configFileSerialized', help='Overwrites and augments the default production settings') xArgParser.add_argument ('-w', '--workingDirectory', help='Overwrites the default working directory: ./workDir/') xArgParser.add_argument ('-l', '--logFile', help='Overwrites the default logFile: ./${workDir}/besspin.log') xArgParser.add_argument ('-d', '--debug', help='Enable debugging mode.', action='store_true') xArgParser.add_argument ('-ep', '--entrypoint', choices=['devHost','ciOnPrem','ciAWS','awsProd','awsDev'], help='The entrypoint') xArgParser.add_argument ('-job', '--jobId', help='The job ID in production mode.') xArgs = xArgParser.parse_args() #Trapping the signals signalsToCatch = [signal.SIGINT, signal.SIGTERM] for xSignal in signalsToCatch: signal.signal(xSignal,exitOnInterrupt) main(xArgs)
GaloisInc/BESSPIN-Tool-Suite
besspin.py
besspin.py
py
8,521
python
en
code
5
github-code
36
21720674809
''' you are given a string made up of parenthesis only.Your task is to check whether parenthesis are balanced or not.If they are balanced print 1 else print 0 Input Description: You are given a string ‘s’ Output Description: Print 1 for balanced and 0 for imbalanced Sample Input : {({})} Sample Output : 1 ''' n = input() s = [char for char in n] even = len(s)//2 odd = (len(s)+1)//2 first = s[:even] if(len(s)%2 == 0) else s[:odd] second = s[even:] if(len(s)%2 == 0) else s[odd:] check = [] for i in range(len(first)): for j in range(len(second)): if(i==j and (first[i]=='{' and second[j] == '}') or (first[i]=='(' and second[j]==')') or (first[i]=='[' and second[j]==']')): check.append('1') if(set(check) == set('1')): print(1) else: print(0)
Aishwarya0206/Codekata
Strings/13.py
13.py
py
775
python
en
code
0
github-code
36
34821763042
sample_input = [[1, 5], [1, 2, 1], [1, 3, 2], [1, 4, 3], [3, 5, 5], [4, 3, 4], [4, 5, 1]] final_input = [[2, 4], [1, 6, 8], [1, 31, 2], [1, 2, 16], [31, 33, 2], [31, 32, 13], [6, 31, 10], [6, 5, 1], [35, 4, 16], [33, 34, 3], [5, 4, 16], [32, 34, 15], [32, 33, 18], [32, 35, 9], [2, 4, 6], [2, 31, 9], [34, 35, 16]] sample_input2 = [[1, 14], [1, 2, 2], [1, 3, 2], [1, 4, 3], [3, 4, 2], [3, 14, 5], [4, 3, 4], [4, 14, 3]] def one_travel(plan, possible_routes_list: list, current_cost: int = 0, history: str = ""): reduced_routes_list = possible_routes_list.copy() possible_outcomes = [] output = [] if history == "": history = str(plan[0]) for route in possible_routes_list: if route[0] == plan[0]: reduced_routes_list.remove(route) possible_outcomes.append([(route[1], plan[1]), route[2]]) for plan_and_cost in possible_outcomes: output.append( [plan_and_cost[0], reduced_routes_list, current_cost + plan_and_cost[1], history+f"→{plan_and_cost[0][0]}"]) return output def final_answer(input_data): possible_outcomes = one_travel(input_data[0], input_data[1:]) finished_routes_list = [] while possible_outcomes: last_route = possible_outcomes.pop() if last_route: if last_route[0][0] == last_route[0][1]: finished_routes_list.append((last_route[3], last_route[2])) else: possible_outcomes.extend(one_travel(*last_route)) print("\nFrom all the possible routes reaching destination:") cheapest_cost = finished_routes_list[0][1] for route in finished_routes_list: print(route) if route[1] < cheapest_cost: cheapest_cost = route[1] print(f'The cheapest one costs {cheapest_cost}') final_answer(final_input)
Bodziowy/Party-Parrot-Puzzles
Task06.py
Task06.py
py
2,285
python
en
code
0
github-code
36
36720262828
import time import uuid class stagedfiles: """ This type, when filled out as staged_files(n) or mod_n for some integer n, will watch the project that is in its input for consumed (staged) files and deliver them on each iteration """ def __init__(self, cs, samhandle, dbhandle): self.cs = cs self.samhandle = samhandle self.dbhandle = dbhandle self.stage_project = cs.dataset self.n = int(cs.cs_split_type[12:].strip(")")) def params(self): return [] def peek(self): if not self.cs.cs_last_split: self.cs.cs_last_split = 0 snapshotbit = "" else: snapshotbit = "minus snapshot_id %d" % self.cs.cs_last_split new = self.cs.dataset + "_slice_%d_stage_%d" % (self.cs.cs_last_split, self.n) self.samhandle.create_definition( self.cs.experiment, new, "project_name %s and consumed_status 'consumed' %s with limit %d " % (self.stage_project, snapshotbit, self.n), ) if self.samhandle.count_files(self.cs.job_type_obj.experiment, "defname:" + new) == 0: raise StopIteration return new def next(self): res = self.peek() snap1 = self.samhandle.take_snapshot(self.cs.job_type_obj.experiment, res) newfullname = res.replace("slice", "full") + "_%s" % int(time.time()) if self.cs.cs_last_split: snapshotbit = "snapshot_id %s or" % self.cs.cs_last_split self.samhandle.create_definition( self.cs.experiment, newfullname, "snapshot_id %s or snapshot_id %s " % (self.cs.cs_last_split, snap1) ) snap = self.samhandle.take_snapshot(self.cs.job_type_obj.experiment, newfullname) else: snap = snap1 self.cs.cs_last_split = snap return res def len(self): return self.samhandle.count_files(self.cs.experiment, "defname:" + self.ds) / self.n + 1 return res def edit_popup(self): return "null"
fermitools/poms
webservice/split_types/stagedfiles.py
stagedfiles.py
py
2,089
python
en
code
0
github-code
36
28982674293
class Solution: def findRepeatedDnaSequences(self, s: str) -> List[str]: if len(s) < 10: return [] tenLengthSet = set() ans = set() for i in range(len(s)-9): cur = s[i:i+10] if( cur in tenLengthSet ): ans.add(cur) tenLengthSet.add(cur) # print(ans) return list(ans)
Kirroneku/leetcode_practice
Done Randomly/Bit Manipulation/find_repeated_dna_fast.py
find_repeated_dna_fast.py
py
451
python
en
code
0
github-code
36
36395259809
import os import os.path as osp import random def load_class_idx_to_label(txt_path='./resource/imagenet1000_clsidx_to_labels.txt'): txt_str = '' with open(txt_path, 'r') as fio: for line in fio.readlines(): txt_str += line.strip('\n\r') return eval(txt_str) def load_folder_idx_to_label(txt_path='./resource/imagenet1000_folder_to_labels.txt'): _dict = {} with open(txt_path, 'r') as fio: for line in fio.readlines(): v, k = line.strip('\n\r').split(': ') _dict[k] = v return _dict def calc_accuracy_each_class(logits, targets, each_class=True): corrects = logits.max(dim=1)[1].eq(targets) total_acc = corrects.sum().item() / targets.size(0) if not each_class: return total_acc acc_dict = {} cnt_dict = {} for correct, target in zip(corrects, targets): target = target.item() if target not in acc_dict.keys(): acc_dict[target] = correct.item() cnt_dict[target] = 1 else: acc_dict[target] += correct.item() cnt_dict[target] += 1 for k in acc_dict.keys(): acc_dict[k] /= cnt_dict[k] return total_acc, [v for k, v in sorted(acc_dict.items())]
hyk1996/Rank-Diminishing-in-Deep-Neural-Networks
core/utils/imagenet.py
imagenet.py
py
1,242
python
en
code
4
github-code
36
8298790752
# from https://github.com/iskandr/fancyimpute import numpy as np from sklearn.utils.extmath import randomized_svd from sklearn.utils import check_array import warnings F32PREC = np.finfo(np.float32).eps from joblib import Memory memory = Memory('cache_dir', verbose=0) def soft_impute_rank(X_obs, n_folds = 5, max_rank = 10): l_mae = [] for ii in range(n_folds): obs_mask = ~np.isnan(X_obs) # randomly sample some test mask test_mask = np.array(np.random.binomial(np.ones_like(obs_mask), obs_mask * .2), dtype=bool) X_obs_train = X_obs.copy() X_obs_train[test_mask] = np.nan si = SoftImpute(max_rank=max_rank, verbose=False) X_obs_imp = si.fit_transform(X_obs_train) si.U mae_obs = si.mae_obs mae_test = np.mean(np.abs(X_obs[test_mask] - X_obs_imp[test_mask])) l_mae.append(mae_test) return si.U, l_mae @memory.cache def get_U_softimpute(X_obs, list_rank=None, boxplot=False, n_folds=3): """ return U_hat with SoftImput strategy Rank are cross selected (wrt MSE) in list_rank""" assert np.sum(np.isnan(X_obs)) > 0, 'X_obs do not contains any nan in "get_U_softimpute"' best_mae = float('inf') best_U = None best_rank = None list_rank = [1,2,3,4,5,6,7,8,9,10,20,30,100] ll_mae = [] for max_rank in list_rank: U, l_mae = soft_impute_rank(X_obs, n_folds = n_folds, max_rank = max_rank) ll_mae.append(l_mae) # print(' -get_U_softimpute, rank={}, mae={} + {}'.format(max_rank, np.round(np.mean(l_mae),4), np.round(np.std(l_mae),4))) if np.mean(l_mae) < best_mae: best_mae = np.mean(l_mae) best_U = U best_rank = max_rank if boxplot: sns.swarmplot(data=np.array(ll_mae).T) plt.xticks(ticks = np.arange(len(list_rank)), labels=[str(x) for x in list_rank]) plt.xlabel('SVD rank') plt.ylabel('MAE on test fold') plt.show() print('-get_U_softimpute, best_rank=',best_rank) print('-get_U_softimpute, best_mae=',best_mae) return best_U def masked_mae(X_true, X_pred, mask): masked_diff = X_true[mask] - X_pred[mask] return np.mean(np.abs(masked_diff)) def generate_random_column_samples(column): col_mask = np.isnan(column) n_missing = np.sum(col_mask) if n_missing == len(column): # logging.warn("No observed values in column") return np.zeros_like(column) mean = np.nanmean(column) std = np.nanstd(column) if np.isclose(std, 0): return np.array([mean] * n_missing) else: return np.random.randn(n_missing) * std + mean class Solver(object): def __init__( self, fill_method="zero", min_value=None, max_value=None, normalizer=None): self.fill_method = fill_method self.min_value = min_value self.max_value = max_value self.normalizer = normalizer def __repr__(self): return str(self) def __str__(self): field_list = [] for (k, v) in sorted(self.__dict__.items()): if v is None or isinstance(v, (float, int)): field_list.append("%s=%s" % (k, v)) elif isinstance(v, str): field_list.append("%s='%s'" % (k, v)) return "%s(%s)" % ( self.__class__.__name__, ", ".join(field_list)) def _check_input(self, X): if len(X.shape) != 2: raise ValueError("Expected 2d matrix, got %s array" % (X.shape,)) def _check_missing_value_mask(self, missing): if not missing.any(): warnings.simplefilter("always") warnings.warn("Input matrix is not missing any values") if missing.all(): raise ValueError("Input matrix must have some non-missing values") def _fill_columns_with_fn(self, X, missing_mask, col_fn): for col_idx in range(X.shape[1]): missing_col = missing_mask[:, col_idx] n_missing = missing_col.sum() if n_missing == 0: continue col_data = X[:, col_idx] fill_values = col_fn(col_data) if np.all(np.isnan(fill_values)): fill_values = 0 X[missing_col, col_idx] = fill_values def fill( self, X, missing_mask, fill_method=None, inplace=False): """ Parameters ---------- X : np.array Data array containing NaN entries missing_mask : np.array Boolean array indicating where NaN entries are fill_method : str "zero": fill missing entries with zeros "mean": fill with column means "median" : fill with column medians "min": fill with min value per column "random": fill with gaussian samples according to mean/std of column inplace : bool Modify matrix or fill a copy """ X = check_array(X, force_all_finite=False) if not inplace: X = X.copy() if not fill_method: fill_method = self.fill_method if fill_method not in ("zero", "mean", "median", "min", "random"): raise ValueError("Invalid fill method: '%s'" % (fill_method)) elif fill_method == "zero": # replace NaN's with 0 X[missing_mask] = 0 elif fill_method == "mean": self._fill_columns_with_fn(X, missing_mask, np.nanmean) elif fill_method == "median": self._fill_columns_with_fn(X, missing_mask, np.nanmedian) elif fill_method == "min": self._fill_columns_with_fn(X, missing_mask, np.nanmin) elif fill_method == "random": self._fill_columns_with_fn( X, missing_mask, col_fn=generate_random_column_samples) return X def prepare_input_data(self, X): """ Check to make sure that the input matrix and its mask of missing values are valid. Returns X and missing mask. """ X = check_array(X, force_all_finite=False) if X.dtype != "f" and X.dtype != "d": X = X.astype(float) self._check_input(X) missing_mask = np.isnan(X) self._check_missing_value_mask(missing_mask) return X, missing_mask def clip(self, X): """ Clip values to fall within any global or column-wise min/max constraints """ X = np.asarray(X) if self.min_value is not None: X[X < self.min_value] = self.min_value if self.max_value is not None: X[X > self.max_value] = self.max_value return X def project_result(self, X): """ First undo normalization and then clip to the user-specified min/max range. """ X = np.asarray(X) if self.normalizer is not None: X = self.normalizer.inverse_transform(X) return self.clip(X) def solve(self, X, missing_mask): """ Given an initialized matrix X and a mask of where its missing values had been, return a completion of X. """ raise ValueError("%s.solve not yet implemented!" % ( self.__class__.__name__,)) def fit_transform(self, X, y=None): """ Fit the imputer and then transform input `X` Note: all imputations should have a `fit_transform` method, but only some (like IterativeImputer in sklearn) also support inductive mode using `fit` or `fit_transform` on `X_train` and then `transform` on new `X_test`. """ X_original, missing_mask = self.prepare_input_data(X) observed_mask = ~missing_mask X = X_original.copy() if self.normalizer is not None: X = self.normalizer.fit_transform(X) X_filled = self.fill(X, missing_mask, inplace=True) if not isinstance(X_filled, np.ndarray): raise TypeError( "Expected %s.fill() to return NumPy array but got %s" % ( self.__class__.__name__, type(X_filled))) X_result = self.solve(X_filled, missing_mask) if not isinstance(X_result, np.ndarray): raise TypeError( "Expected %s.solve() to return NumPy array but got %s" % ( self.__class__.__name__, type(X_result))) X_result = self.project_result(X=X_result) X_result[observed_mask] = X_original[observed_mask] return X_result def fit(self, X, y=None): """ Fit the imputer on input `X`. Note: all imputations should have a `fit_transform` method, but only some (like IterativeImputer in sklearn) also support inductive mode using `fit` or `fit_transform` on `X_train` and then `transform` on new `X_test`. """ raise ValueError( "%s.fit not implemented! This imputation algorithm likely " "doesn't support inductive mode. Only fit_transform is " "supported at this time." % ( self.__class__.__name__,)) def transform(self, X, y=None): """ Transform input `X`. Note: all imputations should have a `fit_transform` method, but only some (like IterativeImputer in sklearn) also support inductive mode using `fit` or `fit_transform` on `X_train` and then `transform` on new `X_test`. """ raise ValueError( "%s.transform not implemented! This imputation algorithm likely " "doesn't support inductive mode. Only %s.fit_transform is " "supported at this time." % ( self.__class__.__name__, self.__class__.__name__)) class SoftImpute(Solver): """ Implementation of the SoftImpute algorithm from: "Spectral Regularization Algorithms for Learning Large Incomplete Matrices" by Mazumder, Hastie, and Tibshirani. """ def __init__( self, shrinkage_value=None, convergence_threshold=0.001, max_iters=100, max_rank=None, n_power_iterations=1, init_fill_method="zero", min_value=None, max_value=None, normalizer=None, verbose=True): """ Parameters ---------- shrinkage_value : float Value by which we shrink singular values on each iteration. If omitted then the default value will be the maximum singular value of the initialized matrix (zeros for missing values) divided by 100. convergence_threshold : float Minimum ration difference between iterations (as a fraction of the Frobenius norm of the current solution) before stopping. max_iters : int Maximum number of SVD iterations max_rank : int, optional Perform a truncated SVD on each iteration with this value as its rank. n_power_iterations : int Number of power iterations to perform with randomized SVD init_fill_method : str How to initialize missing values of data matrix, default is to fill them with zeros. min_value : float Smallest allowable value in the solution max_value : float Largest allowable value in the solution normalizer : object Any object (such as BiScaler) with fit() and transform() methods verbose : bool Print debugging info """ Solver.__init__( self, fill_method=init_fill_method, min_value=min_value, max_value=max_value, normalizer=normalizer) self.shrinkage_value = shrinkage_value self.convergence_threshold = convergence_threshold self.max_iters = max_iters self.max_rank = max_rank self.n_power_iterations = n_power_iterations self.verbose = verbose self.U = None self.V = None self.S = None self.mae_obs = None def _converged(self, X_old, X_new, missing_mask): # check for convergence old_missing_values = X_old[missing_mask] new_missing_values = X_new[missing_mask] difference = old_missing_values - new_missing_values ssd = np.sum(difference ** 2) old_norm = np.sqrt((old_missing_values ** 2).sum()) # edge cases if old_norm == 0 or (old_norm < F32PREC and np.sqrt(ssd) > F32PREC): return False else: return (np.sqrt(ssd) / old_norm) < self.convergence_threshold def _svd_step(self, X, shrinkage_value, max_rank=None): """ Returns reconstructed X from low-rank thresholded SVD and the rank achieved. """ if max_rank: # if we have a max rank then perform the faster randomized SVD (U, s, V) = randomized_svd( X, max_rank, n_iter=self.n_power_iterations) else: # perform a full rank SVD using ARPACK (U, s, V) = np.linalg.svd( X, full_matrices=False, compute_uv=True) s_thresh = np.maximum(s - shrinkage_value, 0) rank = (s_thresh > 0).sum() s_thresh = s_thresh[:rank] U_thresh = U[:, :rank] V_thresh = V[:rank, :] S_thresh = np.diag(s_thresh) X_reconstruction = np.dot(U_thresh, np.dot(S_thresh, V_thresh)) self.U, self.S, self.V = U_thresh, S_thresh, V_thresh return X_reconstruction, rank def _max_singular_value(self, X_filled): # quick decomposition of X_filled into rank-1 SVD _, s, _ = randomized_svd( X_filled, 1, n_iter=5) return s[0] def solve(self, X, missing_mask): X = check_array(X, force_all_finite=False) X_init = X.copy() X_filled = X observed_mask = ~missing_mask max_singular_value = self._max_singular_value(X_filled) if self.verbose: print("[SoftImpute] Max Singular Value of X_init = %f" % ( max_singular_value)) if self.shrinkage_value: shrinkage_value = self.shrinkage_value else: # totally hackish heuristic: keep only components # with at least 1/50th the max singular value shrinkage_value = max_singular_value / 50.0 for i in range(self.max_iters): X_reconstruction, rank = self._svd_step( X_filled, shrinkage_value, max_rank=self.max_rank) X_reconstruction = self.clip(X_reconstruction) # print error on observed data if self.verbose: mae = masked_mae( X_true=X_init, X_pred=X_reconstruction, mask=observed_mask) print( "[SoftImpute] Iter %d: observed MAE=%0.6f rank=%d" % ( i + 1, mae, rank)) converged = self._converged( X_old=X_filled, X_new=X_reconstruction, missing_mask=missing_mask) X_filled[missing_mask] = X_reconstruction[missing_mask] if converged: break if self.verbose: print("[SoftImpute] Stopped after iteration %d for lambda=%f" % ( i + 1, shrinkage_value)) self.mae_obs = masked_mae( X_true=X_init, X_pred=X_reconstruction, mask=observed_mask) return X_filled if __name__=='__main__': from generate_data import gen_lrmf, gen_dlvm from generate_data import ampute import matplotlib.pyplot as plt import seaborn as sns Z, X, w, y, ps = gen_lrmf(d=3) X_obs = ampute(X) print('boxplot of get_U_softimpute with gen_lrmf(d=3)') U = get_U_softimpute(X_obs, boxplot=True)
TwsThomas/miss-vae
softimpute.py
softimpute.py
py
16,429
python
en
code
2
github-code
36
43639564307
#!/usr/bin/env python # coding=utf-8 class Item(): def __init__(self, key = None): self.key = key self.nextItem = None class Stack(): def __init__(self): self.head = None def push(self, item): if self.head is None: item.nextItem = None self.head = item else: item.nextItem = self.head self.head = item def pop(self): if self.head is None: raise Exception("Stack underflow") item = self.head self.head = item.nextItem return item if __name__ == '__main__': A = Stack() for i in range(10): A.push(Item(i)) for i in range(11): try: print(A.pop().key, end = " ") except Exception: print("\nStack underflow exception handled")
blry/CLRS
C10-Elementary-Data-Structures/10_2_2_stack_using_linked_list.py
10_2_2_stack_using_linked_list.py
py
843
python
en
code
0
github-code
36
70031496105
from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * import pickle import store_database d = store_database.d class TodayStores(QWidget): def __init__(self): super().__init__() self.set_ui() def set_ui(self): self.setWindowTitle("Today's Stores") self.resize(350,400) self.back_g = QLabel(self) self.pixmap = QPixmap('TSbg.jpg') self.back_g.setPixmap(self.pixmap) font = QFont('Century Gothic',12) font.setBold(True) self.titleLbl = QLabel("Please select a store:", self) self.titleLbl.setFont(font) self.titleLbl.resize(250, 50) self.titleLbl.move(90,60) global datetime datetime = QDateTime.currentDateTime() self.timeLbl = QLabel((datetime.toString("dddd,dd MMMM yyyy hh:mm")), self) # display current system date/time self.timeLbl.setFont(font) self.timeLbl.resize(300, 50) # or user's defined date/time self.timeLbl.move(40,100) day_now = datetime.toString("dddd") # string operation to obtain day(eg. Monday) of current date or input date time_now = int(datetime.toString("hmm")) # string operation to obtain time(eg. 1030) of current or inputted time # convert string to integer for comparison purpose lst_of_stall = [] count_op_stall = 0 for key, value in d.items(): op_start_time = value[0] op_end_time = value[1] op_stall = key[0] if (key[1] == day_now) and (op_start_time <= time_now <= op_end_time): # compare the day and time with count_op_stall += 1 # the stalls' operating days and times in the 'stalls operating hour " lst_of_stall.append(op_stall) # dictionary and append name of opening stalls in a list if count_op_stall == 0: self.stallLbl = QLabel(("Sorry,all the stores are closed!"), self) self.stallLbl.setFont(font) else: for stall in lst_of_stall: # a loop to create buttons for all the opening stalls self.stallBtn = QPushButton(stall, self) self.stallBtn.setStyleSheet("QPushButton{border-image: url(ui/button.png)}" "QPushButton:hover{border-image: url(ui/button_hover.png)}" "QPushButton:pressed{border-image: url(ui/button_clicked.png)}") self.stallBtn.setFont(QFont("Century Gothic",11)) self.stallBtn.move(75,(count_op_stall+1)*30) self.stallBtn.resize(200,28) count_op_stall +=1 global stallName # this variable is used to get the text on each QPushButtons and then used in stallName = self.stallBtn.text() # StallMenu() class to display the stall menu self.stallBtn.clicked.connect(self.show_stall_menu) self.backBtn = QPushButton("Back to main menu", self) self.backBtn.setFont(font) self.backBtn.resize(350,35) self.backBtn.move(0,365) self.backBtn.clicked.connect(self.back_to_main_window) def back_to_main_window(self): from MainWindow import MainWindow self.mainWin = MainWindow() self.mainWin.show() self.hide() def show_stall_menu(self): global stallName stallName =self.sender().text() self.window3 = StallMenu() self.window3.show() self.hide() class StallMenu(QWidget): def __init__(self): QWidget.__init__(self) self.set_up_ui() self.retrieve_menu() def set_up_ui(self): self.setWindowTitle("NTU Canteen: Stall Menu") self.resize(1000, 740) self.back_g1 = QLabel(self) self.pixmap = QPixmap('bg3.jpg') self.back_g1.setPixmap(self.pixmap) self.menuLb = QLabel(self) self.priceLb = QLabel(self) self.stlname = QLabel(self) self.op_hr = QPushButton("Operating Hours", self) self.op_hr.setFont(QFont("Century Gothic",12)) self.op_hr.move(700,200) self.op_hr.resize(170,30) self.op_hr.setStyleSheet("QPushButton{border-image: url(ui/button.png)}" "QPushButton:hover{border-image: url(ui/button_hover.png)}" "QPushButton:pressed{border-image: url(ui/button_clicked.png)}") self.op_hr.clicked.connect(self.show_op_hour) self.calWt = QPushButton("Calculate Wait Time",self) self.calWt.setFont(QFont("Century Gothic",12)) self.calWt.move(500,200) self.calWt.resize(170,30) self.calWt.setStyleSheet("QPushButton{border-image: url(ui/button.png)}" "QPushButton:hover{border-image: url(ui/button_hover.png)}" "QPushButton:pressed{border-image: url(ui/button_clicked.png)}") self.calWt.clicked.connect(self.cal_wait_time) self.backb = QPushButton("Back to the Main Screen", self) self.backb.setFont(QFont("Century Gothic",15)) self.backb.resize(1000,40) self.backb.move(0,700) self.backb.clicked.connect(self.back_to_mainwin) self.backm = QPushButton("Back to the Previous Screen", self) self.backm.setFont(QFont("Century Gothic", 15)) self.backm.resize(1000, 40) self.backm.move(0,660) self.backm.clicked.connect(self.back_to_win2) if stallName == 'Indian Cuisine': self.indian = QLabel(self) indian_picture = QPixmap('indianicon.jpg') self.indian.setPixmap(indian_picture) self.indian.move(100,200) elif stallName == 'VinFood Western': self.western = QLabel(self) western_picture = QPixmap('westernicon.jpg') self.western.setPixmap(western_picture) self.western.move(100, 200) elif stallName == 'Mini Wok': self.wok = QLabel(self) wok_picture = QPixmap('wokicon.jpg') self.wok.setPixmap(wok_picture) self.wok.move(100, 200) elif stallName == 'Malay BBQ': self.bbq = QLabel(self) bbq_picture = QPixmap('bbq.jpg') self.bbq.setPixmap(bbq_picture) self.bbq.move(100, 200) else: self.huat = QLabel(self) huat_picture = QPixmap('huat.jpg') self.huat.setPixmap(huat_picture) self.huat.move(150, 250) def retrieve_menu(self): # open pickler file which store all the menus for different stalls in their respective dictionaries aFile = open('backup.out', 'rb') self.dict_ic = pickle.load(aFile) self.dict_vw = pickle.load(aFile) self.dict_mw = pickle.load(aFile) self.dict_mbbq = pickle.load(aFile) self.dict_huat = pickle.load(aFile) aFile.close() day_now = datetime.toString("dddd") # string operation to obtain day(eg. Monday) of current date or input date time_now = int( datetime.toString("hmm")) # string operation to obtain time(eg. 1030) of current or inputted time # convert string to integer for comparison purpose lst_stall = ["Indian Cuisine", "VinFood Western", "Mini Wok", "Malay BBQ", "Huat Beverage"] lst_dict = [self.dict_ic, self.dict_vw, self.dict_mw, self.dict_mbbq, self.dict_huat] for i in lst_stall: if stallName == i: # stallName obtained from StallList() class self.stlname.setText(i) self.stlname.move(300,50) self.stlname.setFont(QFont("Cream Peach",50)) stallindex = lst_stall.index(i) dict_name = lst_dict[stallindex] for key, value in dict_name.items(): starttime = value[0] endtime = value[1] if day_now == key[1] and (starttime <= time_now <= endtime): # based on the day and time, display the stall self.menuLb.setText(key[2]) # menu according to its operating time self.menuLb.move(400,250) self.priceLb.setText(key[3]) self.priceLb.move(730,250) self.menuLb.setFont(QFont("Ink Free",20)) self.priceLb.setFont(QFont("Ink Free",20)) self.update() def back_to_mainwin(self): from MainWindow import MainWindow self.mainWin = MainWindow() self.mainWin.show() self.hide() def back_to_win2(self): self.win2 = TodayStores() self.win2.show() self.hide() def show_op_hour(self): # Dialog box to display operating hour for the stall opHourDlg = QDialog() opHourDlg.setMinimumWidth(480) opHourDlg.setWindowTitle("Operating Hour for " + stallName) opHourDlg.setWindowModality(Qt.ApplicationModal) vbox = QVBoxLayout() vbox.setAlignment(Qt.AlignCenter) op_days = [] for store_day, op_time in d.items(): # go through the "stall operating hour" dictionary and store the opening op_start = op_time[0] # days and times of the stall in a list then display them in the dialog window op_end = op_time[1] day = store_day[1] if (store_day[0] == stallName): op_days.append((day, op_start, op_end)) for day in op_days: dayLbl = QLabel(day[0]) if day[1] == 0: timeLbl=QLabel("Closed") else: self.day1_text = str(day[1]) self.day2_text = str(day[2]) timeday1 = QDateTime.fromString(self.day1_text,"hmm") # convert time from 24-hour clock format to am/pm format timeday2 = QDateTime.fromString(self.day2_text, "hhmm") timeLbl = QLabel(timeday1.toString("h:mm ap") + " - " + timeday2.toString("h:mm ap")) hbox = QHBoxLayout() hbox.addWidget(dayLbl) hbox.addWidget(timeLbl) vbox.addLayout(hbox) okBtn = QPushButton("Ok", self) vbox.addWidget(okBtn) opHourDlg.setLayout(vbox) okBtn.clicked.connect(opHourDlg.close) opHourDlg.exec() def cal_wait_time(self): self.win3 = QueueTime() self.win3.show() class QueueTime(QWidget): def __init__(self): QWidget.__init__(self) self.setWindowTitle("Calculate Wait Time") self.resize(300,150) self.queueLbl = QLabel("Enter number of people in queue", self) # allow user to input no. of people queuing self.queueLbl.setFont(QFont("Century Gothic",12)) self.queueLbl.move(20,20) self.numPaxLe = QLineEdit(self) self.numPaxLe.resize(200,25) self.numPaxLe.move(50,50) self.calBtn = QPushButton("Calculate waiting time",self) self.calBtn.setStyleSheet("QPushButton{border-image: url(ui/button.png)}" "QPushButton:hover{border-image: url(ui/button_hover.png)}" "QPushButton:pressed{border-image: url(ui/button_clicked.png)}") self.calBtn.setFont(QFont("Century Gothic", 12)) self.calBtn.resize(200,25) self.calBtn.move(50,80) self.calBtn.clicked.connect(self.calculate_wait_time) def calculate_wait_time(self): if self.numPaxLe.text().isdigit(): # validatation to ensure that user input a digit, allow user numpax = int(self.numPaxLe.text()) # to re-enter until a correct input is achieved stall_info = d[(stallName, "Monday")] # to obtain the waiting time per person from the "stall wait_time = int(stall_info[2]) * numpax # operating hour" dictionary msg = QMessageBox() # message box to display est. waiting time for the stall msg.setText("Estimated waiting time is: " + str(wait_time) + " minutes") msg.setWindowTitle("Waiting time info") msg.setIcon(QMessageBox.Information) msg.setStandardButtons(QMessageBox.Ok) msg.exec() else: msg = QMessageBox() # message box to alert user that input is invalid msg.setText("Please enter a valid number") msg.setWindowTitle("Error") msg.setIcon(QMessageBox.Warning) msg.setStandardButtons(QMessageBox.Ok) msg.exec()
cammy-mun/Canteen-Food-Menu
DSAI1_Lee_Luo_Mun/TodayStores.py
TodayStores.py
py
12,873
python
en
code
0
github-code
36
73520890983
palavra = str(input('Digite a frase: ')) palavra = palavra.strip().upper() analise = palavra.split() analise = ''.join(analise) inverso = '' for letra in range(len(analise)-1, -1, -1): inverso += analise[letra] print(analise) print(inverso) if (analise == inverso): print('\nTEMOS UM PALINDROMO') else: print('\nNÂO TEMOS UM PALÍNDROMO')
luks-rossato/curso-Python3
Exercicios propostos/Ex 053 - Palíndromo].py
Ex 053 - Palíndromo].py
py
355
python
pt
code
0
github-code
36
17964305010
from sklearn.model_selection import train_test_split from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_classif from sklearn.feature_selection import chi2 import numpy as np import argparse import sys import os from sklearn.svm import SVC from sklearn.metrics import confusion_matrix from sklearn.ensemble import RandomForestClassifier from sklearn.neural_network import MLPClassifier from sklearn.ensemble import AdaBoostClassifier import csv from sklearn.model_selection import KFold from scipy import stats import warnings #to ignore convergence warning warnings.filterwarnings('ignore') def accuracy( C ): ''' Compute accuracy given Numpy array confusion matrix C. Returns a floating point value ''' total = np.sum(C) correct=0 for i in range(len(C)): correct = correct+C[i][i] if total != 0: acc = correct/total else: acc = 0 return acc def recall( C ): ''' Compute recall given Numpy array confusion matrix C. Returns a list of floating point values ''' lst1 = [] lst2 = [] result = [] for i in range(len(C)): lst1.append(C[i][i]) lst2.append(np.sum(C[i])) for i in range(len(C)): if lst2[i] !=0: result.append(lst1[i]/lst2[i]) else: result.append(0) return result def precision( C ): ''' Compute precision given Numpy array confusion matrix C. Returns a list of floating point values ''' lst1 = [] lst2 = [] result = [] for i in range(len(C)): lst1.append(C[i][i]) lst2.append(np.sum(C[:,i])) for i in range(len(C)): if lst2[i] != 0 : result.append(lst1[i]/lst2[i]) else: result.append(0.0) return result def class31(filename): ''' This function performs experiment 3.1 Parameters filename : string, the name of the npz file from Task 2 Returns: X_train: NumPy array, with the selected training features X_test: NumPy array, with the selected testing features y_train: NumPy array, with the selected training classes y_test: NumPy array, with the selected testing classes i: int, the index of the supposed best classifier ''' print("Doing class31") iBest = 0 max_acc = 0 line1 = [1] line2 = [2] line3 = [3] line4 = [4] line5 = [5] #load file and array data = np.load(filename) data = data['arr_0'] #get x and y then split them by using train_test_split X, y = data[:,0:-1], data[:,-1] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) #1.SVC: support vector machine with a linear kernel. clf_linear = SVC(kernel='linear',max_iter=1000) clf_linear.fit(X_train, y_train) #Perform classification on samples in X. y_pred_linear = clf_linear.predict(X_test) svc_linear_matrix = confusion_matrix(y_test, y_pred_linear) iBest = 1 max_acc = accuracy(svc_linear_matrix) #2.SVC: support vector machine with a radial basis function (γ = 2) kernel. clf_rbf = SVC(kernel='rbf',gamma=2,max_iter=1000) clf_rbf.fit(X_train, y_train) y_pred_rbf = clf_rbf.predict(X_test) svc_rbf_matrix = confusion_matrix(y_test, y_pred_rbf) if accuracy(svc_rbf_matrix) > max_acc: iBest = 2 max_acc = accuracy(svc_rbf_matrix) #3.RandomForestClassifier: with a maximum depth of 5, and 10 estimators. clf_forest = RandomForestClassifier(n_estimators=10, max_depth=5) clf_forest.fit(X_train,y_train) y_pred_forest = clf_forest.predict(X_test) forest_matrix = confusion_matrix(y_test, y_pred_forest) if accuracy(forest_matrix) > max_acc: iBest = 3 max_acc = accuracy(forest_matrix) #4.MLPClassifier: A feed-forward neural network, with α = 0.05. clf_mlp = MLPClassifier(alpha=0.05) clf_mlp.fit(X_train,y_train) y_pred_mlp = clf_mlp.predict(X_test) mlp_matrix = confusion_matrix(y_test,y_pred_mlp) if accuracy(mlp_matrix) > max_acc: iBest = 4 max_acc = accuracy(mlp_matrix) #5.AdaBoostClassifier: with the default hyper-parameters. clf_ada = AdaBoostClassifier() clf_ada.fit(X_train,y_train) y_pred_ada = clf_ada.predict(X_test) ada_matrix = confusion_matrix(y_test,y_pred_ada) if accuracy(ada_matrix) > max_acc: iBest = 5 max_acc = accuracy(ada_matrix) #save result to a csv file line1.append(accuracy(svc_linear_matrix)) line2.append(accuracy(svc_rbf_matrix)) line3.append(accuracy(forest_matrix)) line4.append(accuracy(mlp_matrix)) line5.append(accuracy(ada_matrix)) line1 = line1 + recall(svc_linear_matrix) line2 = line2 + recall(svc_rbf_matrix) line3 = line3 + recall(forest_matrix) line4 = line4 + recall(mlp_matrix) line5 = line5 + recall(ada_matrix) line1 = line1 + precision(svc_linear_matrix) line2 = line2 + precision(svc_rbf_matrix) line3 = line3 + precision(forest_matrix) line4 = line4 + precision(mlp_matrix) line5 = line5 + precision(ada_matrix) for i in range(len(svc_linear_matrix)): line1 = line1 + list(svc_linear_matrix[i]) line2 = line2 + list(svc_rbf_matrix[i]) line3 = line3 + list(forest_matrix[i]) line4 = line4 + list(mlp_matrix[i]) line5 = line5 + list(ada_matrix[i]) with open( 'a1_3.1.csv', 'w', newline='') as csv_file: writer = csv.writer(csv_file, delimiter=',') writer.writerow(line1) writer.writerow(line2) writer.writerow(line3) writer.writerow(line4) writer.writerow(line5) print(iBest) print("Class31 done!") return (X_train, X_test, y_train, y_test,iBest) def class32(X_train, X_test, y_train, y_test,iBest): ''' This function performs experiment 3.2 Parameters: X_train: NumPy array, with the selected training features X_test: NumPy array, with the selected testing features y_train: NumPy array, with the selected training classes y_test: NumPy array, with the selected testing classes i: int, the index of the supposed best classifier (from task 3.1) Returns: X_1k: numPy array, just 1K rows of X_train y_1k: numPy array, just 1K rows of y_train ''' accuracies=[] print("Doing class32") if iBest == 1: clf = SVC(kernel='linear',max_iter=1000) if iBest == 2: clf = SVC(kernel='rbf',gamma=2,max_iter=1000) if iBest == 3: clf = RandomForestClassifier(n_estimators=10, max_depth=5) if iBest == 4: clf = MLPClassifier(alpha=0.05) if iBest == 5: clf = AdaBoostClassifier() for i in [1000,5000,10000,15000,20000]: if i == 1000: X_1k = X_train[:i] y_1k = y_train[:i] new_X_test = X_test[:i] new_y_test = y_test[:i] new_X_train = X_train[:i] new_y_train = y_train[:i] clf.fit(new_X_train, new_y_train) y_pred = clf.predict(new_X_test) matrix = confusion_matrix(new_y_test,y_pred) accuracies.append(accuracy(matrix)) with open( 'a1_3.2.csv', 'w', newline='') as csv_file: writer = csv.writer(csv_file, delimiter=',') writer.writerow(accuracies) print("Class32 done!") return (X_1k, y_1k) def class33(X_train, X_test, y_train, y_test, i, X_1k, y_1k): ''' This function performs experiment 3.3 Parameters: X_train: NumPy array, with the selected training features X_test: NumPy array, with the selected testing features y_train: NumPy array, with the selected training classes y_test: NumPy array, with the selected testing classes i: int, the index of the supposed best classifier (from task 3.1) X_1k: numPy array, just 1K rows of X_train (from task 3.2) y_1k: numPy array, just 1K rows of y_train (from task 3.2) ''' print("Doing class33") k=[5,10,20,30,40,50] line1 =[] line2 =[] line3 =[] line4 =[] line5 =[] line6 =[] line7 =[] line8 =[] #3.3.1 # I tried to use cols_32k = selector.get_support(indices=True) # Then get selector.pvalues_[cols_32k] # I found that result is same as what I do (sort pvalues then convert it to a list and find first K p values) for j in k: selector = SelectKBest(f_classif, j) new_X = selector.fit_transform(X_train, y_train) pp = np.sort(selector.pvalues_) pp = pp.tolist() if j == 5: line1.append(5) line1 = line1 + pp[:j] # 3.3.2 if i == 1: clf = SVC(kernel='linear',max_iter=1000) if i == 2: clf = SVC(kernel='rbf',gamma=2,max_iter=1000) if i == 3: clf = RandomForestClassifier(n_estimators=10, max_depth=5) if i == 4: clf = MLPClassifier(alpha=0.05) if i == 5: clf = AdaBoostClassifier() # for 1K part new_X = selector.fit_transform(X_1k, y_1k) clf.fit(new_X,y_1k) y_pred_1 = clf.predict(selector.transform(X_test)) matrix_1 = confusion_matrix(y_test,y_pred_1) line7.append(accuracy(matrix_1)) # 3.3.3 (a) get index of 5 features cols_1k = selector.get_support(indices=True) print(cols_1k) # for 32K part new_X = selector.fit_transform(X_train, y_train) clf.fit(new_X,y_train) y_pred_32 = clf.predict(selector.transform(X_test)) matrix_32 = confusion_matrix(y_test,y_pred_32) line7.append(accuracy(matrix_32)) #3.3.3 (a) get index of 5 features cols_32k = selector.get_support(indices=True) print(cols_32k) #3.3.3(a) find common features line8.append(list(set(cols_1k) & set(cols_32k))) if j == 10: line2.append(10) line2 = line2 + pp[:j] if j == 20: line3.append(20) line3 = line3 + pp[:j] if j == 30: line4.append(30) line4 = line4 + pp[:j] if j == 40: line5.append(40) line5 = line5 + pp[:j] if j == 50: line6.append(50) line6 = line6 + pp[:j] with open( 'a1_3.3.csv', 'w', newline='') as csv_file: writer = csv.writer(csv_file, delimiter=',') writer.writerow(line1) writer.writerow(line2) writer.writerow(line3) writer.writerow(line4) writer.writerow(line5) writer.writerow(line6) writer.writerow(line7) writer.writerow(line8) print("Class33 done!") def class34( filename, i ): ''' This function performs experiment 3.4 Parameters filename : string, the name of the npz file from Task 2 i: int, the index of the supposed best classifier (from task 3.1) ''' print("Doing class34") print(i) X_train_list = [] y_train_list = [] X_test_list = [] y_test_list = [] fold_1 = [] fold_2 = [] fold_3 = [] fold_4 = [] fold_5 = [] p_values = [] #read data and use Kfold to make 5 folds. data = np.load(filename) data = data['arr_0'] X, y = data[:,0:-1], data[:,-1] kf = KFold(n_splits=5,shuffle=True) for train_index, test_index in kf.split(X): X_train_list.append(X[train_index]) X_test_list.append(X[test_index]) y_train_list.append(y[train_index]) y_test_list.append(y[test_index]) for k in range(5): accuracy_list = [] X_train = X_train_list[k] X_test = X_test_list[k] y_train = y_train_list[k] y_test = y_test_list[k] #1.for clf linear clf_linear = SVC(kernel='linear',max_iter=1000) clf_linear.fit(X_train, y_train) y_pred_linear = clf_linear.predict(X_test) matrix_linear = confusion_matrix(y_test, y_pred_linear) accuracy_list.append(accuracy(matrix_linear)) #2.for clf rbf clf_rbf = SVC(kernel='rbf',gamma=2,max_iter=1000) clf_rbf.fit(X_train, y_train) y_pred_rbf = clf_rbf.predict(X_test) matrix_rbf = confusion_matrix(y_test, y_pred_rbf) accuracy_list.append(accuracy(matrix_rbf)) #3.for forest clf_forest = RandomForestClassifier(n_estimators=10, max_depth=5) clf_forest.fit(X_train,y_train) y_pred_forest = clf_forest.predict(X_test) forest_matrix = confusion_matrix(y_test, y_pred_forest) accuracy_list.append(accuracy(forest_matrix)) #4.for MLP clf_mlp = MLPClassifier(alpha=0.05) clf_mlp.fit(X_train,y_train) y_pred_mlp = clf_mlp.predict(X_test) mlp_matrix = confusion_matrix(y_test,y_pred_mlp) accuracy_list.append(accuracy(mlp_matrix)) #5.for AdaBoost clf_ada = AdaBoostClassifier() clf_ada.fit(X_train,y_train) y_pred_ada = clf_ada.predict(X_test) ada_matrix = confusion_matrix(y_test,y_pred_ada) accuracy_list.append(accuracy(ada_matrix)) if k == 0: fold_1 = accuracy_list if k == 1: fold_2 = accuracy_list if k == 2: fold_3 = accuracy_list if k == 3: fold_4 = accuracy_list if k == 4: fold_5 = accuracy_list matrix = np.array([fold_1,fold_2,fold_3,fold_4,fold_5]) a=matrix[:,i-1] for k in range(5): if k != i-1 : b = matrix[:,k] S = stats.ttest_rel(a, b) p_values.append(S.pvalue) with open( 'a1_3.4.csv', 'w', newline='') as csv_file: writer = csv.writer(csv_file, delimiter=',') writer.writerow(fold_1) writer.writerow(fold_2) writer.writerow(fold_3) writer.writerow(fold_4) writer.writerow(fold_5) writer.writerow(p_values) print("Class34 done!") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-i", "--input", help="the input npz file from Task 2", required=True) args = parser.parse_args() #TODO : complete each classification experiment, in sequence. X_train, X_test, y_train, y_test,iBest = class31(args.input) X_1k, y_1k = class32(X_train, X_test, y_train, y_test,iBest) class33(X_train, X_test, y_train, y_test, iBest, X_1k, y_1k) class34(args.input, iBest)
Yangnnnn/Identifying-political-persuasion-on-Reddit
code/a1_classify.py
a1_classify.py
py
15,122
python
en
code
0
github-code
36
30382531462
import os import random import numpy as np import pandas as pd from sklearn.model_selection import train_test_split import torch from torch.utils.data import TensorDataset import torch.nn as nn import torch.nn.functional as F import os from transformers import logging import warnings import yaml from pathlib import Path logging.set_verbosity_warning() task_path = './tasks/' train_path = './SST2_train.tsv' val_path = './SST2_dev.tsv' test_path = './SST2_test.tsv' device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def load_config(): with open(Path('/root/configs/config.yaml'), "r") as config_file: try: run_config = yaml.safe_load(config_file) except yaml.YAMLError as exc: print(exc) return {} return run_config def transform_dataset(transformer, dataset, dataloader_flag = False): if transformer is None: dataset_new = dataset if dataloader_flag: feature_loader =torch.utils.data.DataLoader(dataset_new, batch_size = 128, shuffle = True) return feature_loader return dataset_new X = dataset.features X = X.reshape(X.shape[0], -1) feature = torch.tensor(transformer.fit_transform(X)).float() targets = dataset.targets.clone().detach() dataset_new = TensorDataset(feature, targets) dataset_new.features = feature dataset_new.targets = targets if dataloader_flag: feature_loader =torch.utils.data.DataLoader(dataset_new, batch_size = 512, shuffle = False) return feature_loader return dataset_new def create_feature_loader(model, data, file_path, batch_size = 512, shuffle= False): if os.path.isfile(file_path): print("loading from dataset") feature_dataset = torch.load(file_path) else: print("reconstruct") feature_dataset = model.get_feature_dataset(data, file_path) feature_loader =torch.utils.data.DataLoader(feature_dataset, batch_size=batch_size, shuffle = shuffle) return feature_loader
XiaoyanAmy/HLT_Coursework
src/util.py
util.py
py
2,186
python
en
code
0
github-code
36
14859359219
''' Created on 2017��3��23�� @author: gb ''' import sys import os import jieba import gensim,logging import sys def savefile(savepath,content): fp=open(savepath,"w") fp.write(content) fp.close() def readfile(path): fp=open(path,"r") content=fp.read() fp.close() return content corpus_path="train_corpus_small/" seg_path="train_corpus_seg/" catelist=os.listdir(corpus_path) print(catelist) for mydir in catelist: class_path=corpus_path+mydir+"/" seg_dir=seg_path+mydir+'/' if not os.path.exists(seg_dir): os.makedirs(seg_dir) file_list=os.listdir(class_path) for file_path in file_list: fullname=class_path+file_path content=readfile(fullname).strip() content=content.replace("\r\n","").strip() content_seg=jieba.cut(content) savefile(seg_dir+file_path," ".join(content_seg)) print("jieba end")
guob1l/gensimW2V
jieba.py
jieba.py
py
966
python
en
code
4
github-code
36
72002642984
import os.path from django.http import HttpResponse from cmis_storage.storage import CMISStorage def get_file(request, path): """ Returns a file stored in the CMIS-compatible content management system :param path: The full path of the file within the CMS """ _, filename = os.path.split(path) storage = CMISStorage() stream = storage.open_stream(path) response = HttpResponse() response['Content-Disposition'] = 'attachment; filename=%s' % filename response.write(stream.read()) return response
JoseTomasTocino/cmis_storage
cmis_storage/views.py
views.py
py
547
python
en
code
1
github-code
36
6044301694
import socket, threading HEADER = 64 #Constante para número de bytes por mensagem FORMAT = 'utf-8' #Constante para decodificar mensagens DISCONNECT_MESSAGE = '!disconnect' #Constante para desconectar o cliente print("## SETUP DO SERVIDOR ##") ip = input("INSIRA UM ENDEREÇO DE IP OU 'localhost'\n") port = 80 server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #AF_INET define IPV4 e SOCK_STREAM define o protocolo como TCP server.bind((ip, port)) print ("-------------------------") def receivedCheckSum(receivedMsg, packets, checkSum): # Divisao em pacotes c1 = receivedMsg[0:packets] c2 = receivedMsg[packets:2 * packets] c3 = receivedMsg[2 * packets:3 * packets] c4 = receivedMsg[3 * packets:4 * packets] # Calcula a soma dos pacotes recebidos, incluindo o checkSum para conferir posteriormente o resultado somaRecebida = bin(int(c1, 2) + int(c2, 2) + int(c3, 2) + int(c4, 2) + int(checkSum))[2:] # Calculo do overflow if len(somaRecebida) > packets: x = len(somaRecebida) - packets somaRecebida = bin(int(somaRecebida[0:x], 2) + int(somaRecebida[x:], 2))[2:] # Calculo do complemento da soma somaRecebida = '' for i in somaRecebida: if i == '1': somaRecebida += '0' else: somaRecebida += '1' return somaRecebida def detectaErroCheckSum(somaRecebida, checkSum): somaFinal = bin(int(checkSum, 2) + int(somaRecebida, 2))[2:] compara = '' for i in somaFinal: if i == '1': compara += '0' else: compara += '1' if int(compara, 2) == 0: print("CheckSum recebido = 0\nMensagem recebida com sucesso!") else: print("CheckSum recebido != 0\nMensagem corrompida") def handle_client(conn, addr): print(f"[*] NOVA CONEXÃO: {addr} conectado...") connected = True while connected: msg_length = conn.recv(HEADER).decode(FORMAT) if msg_length: msg_length = int(msg_length) msg = conn.recv(msg_length).decode(FORMAT) if msg == DISCONNECT_MESSAGE: connected = False print(f"[{addr}] {msg}") conn.send("[*] ACK!".encode(FORMAT)) conn.close() def start(): server.listen() #Começa a escutar por conexões print(f"[*] Escutando {ip}, {port}") while True: conn, addr = server.accept() thread = threading.Thread(target = handle_client, args = (conn,addr)) thread.start() #Inicia o handle_client em thread com conn e addr passados como parâmetros print(f"[*] CONEXÕES ATIVAS: {threading.activeCount() - 1}") print("[*] Servidor Iniciando...") start()
vnvz/App-Cliente-Servidor
serverTCP.py
serverTCP.py
py
2,720
python
pt
code
0
github-code
36
72516589224
# settings.py import os from os.path import join, dirname from dotenv import load_dotenv dotenv_path = join(dirname(__file__), '.env') load_dotenv(dotenv_path) # Accessing variables. NUM_REQUESTS = os.getenv('NUM_REQUESTS') URL = os.getenv('URL') SECRET = os.getenv('SECRET') SLEEP_SECONDS = os.getenv('SLEEP_SECONDS') # Using variables. print('Num Requests: ' + NUM_REQUESTS) print('URL: ' + URL) print('Secret: ' + SECRET) print('Sleep Seconds: ' + SLEEP_SECONDS)
blainemincey/generateApiRequests
settings.py
settings.py
py
470
python
en
code
0
github-code
36
38790064496
""" This script shows how pedestrian detections and robot data can be converted to a spatio-temporal grid The output data of this script can then be used to train a CoPA-Map model """ import pandas as pd from copa_map.model.Gridifier import Gridifier, GridParams from copa_map.model.InitInducing import InducingInitializer from copa_map.util.occ_grid import OccGrid from copa_map.util import util as ut from copa_map.util import fov from os.path import join, exists from copy import copy import zipfile data_folder = join(ut.abs_path(), "data") csv_names = ["atc_10days_path_pedest_train.csv", "atc_10days_path_robot_train.csv", "atc_4days_path_pedest_test.csv", "atc_4days_path_robot_test.csv"] # Extract the csv files from the zipped file all_csv_exist = all([exists(join(data_folder, name)) for name in csv_names]) if not all_csv_exist: print("Extracting csv files from zip file...") with zipfile.ZipFile(join(data_folder, 'atc_rob_path_ped_detections.zip'), 'r') as zip_ref: zip_ref.extractall(data_folder) a = 1 # Create pd dataframes from csv files # The csv files with pedestrian data (*_pedest_*) will be converted to a dataframe of the following form: # [pos_x, pos_y, tidx_bin, t] x [n_det] # pos_x: First spatial dim of detected pedestrian # pos_y: Second spatial dim of detected pedestrian # tidx_bin: The bin where the detection falls into. The data is already pre binned to a bin size of one hour # t: timestamp of the detection # The csv files with robot data (*_robot_*) will be converted to a dataframe of the following form: # [robot_x, robot_y, delta_t, tidx_bin, t] x [n_rob] # The dataframe then contains the positions of the robot # robot_x: First spatial dim of the robot # robot_y: Second spatial dim of the robot # delta_t: Dwell/rest time of the robot at the corresponding spatial location # tidx_bin: The bin where the robot position falls into. The data is already pre binned to a bin size of one hour # t: timestamp of the robot pose df_data_train = pd.read_csv(join(data_folder, csv_names[0]), index_col=0) df_rob_train = pd.read_csv(join(data_folder, csv_names[1]), index_col=0) # Also create for test data, since we only want to test at locations that were visited during training # The values of the robot's poses correspond to the values from the training data, but the timestamps were adjusted df_data_test = pd.read_csv(join(data_folder, csv_names[2]), index_col=0) df_rob_test = pd.read_csv(join(data_folder, csv_names[3]), index_col=0) # Read the occupancy map to define the location of the grid occ_map = OccGrid.from_ros_format(path_yaml=join(data_folder, "atc_map.yaml")) # Overwrite default params of the grid with these params # For all default params, see class definition of GridParams class params_grid_train = GridParams(cell_resolution=0.5, origin=occ_map.orig, rotation=occ_map.rotation, width=occ_map.width, height=occ_map.height, rate_min=1e-5, bin_size=3600) params_grid_test = copy(params_grid_train) print("Creating grid for training data") gf_train = Gridifier(occ_map=occ_map, fov=fov.Circle(r=3.5), params=params_grid_train) gf_train.setup_data(df_data_train, df_rob_train) print("Creating grid for test data") gf_test = Gridifier(occ_map=occ_map, fov=fov.Circle(r=3.5), params=params_grid_test, create_gt=True) gf_test.setup_data(df_data_test, df_rob_test) # Create the inducing points by clustering # There are two methods implemented: # 3D-KMeans: Clustering over the complete input matrix X with targets as weights # 2D-KMeans: Do separate clustering steps for each time bin. The number of clusters for every bin follows from # (number of spatial cells of the bin)/(number of all datapoints) * (number of all inducing points) print("Clustering for initial inducing point selection...") init_inducing = InducingInitializer(X=gf_train.get_input_points(), Y_all=gf_train.get_observations(), alpha=0.02) init_inducing.get_init_inducing(method="2D-KMeans") path_train = join(data_folder, "grid_atc_50cm_60min_train_xy.csv") path_test = join(data_folder, "grid_atc_50cm_60min_test_xy.csv") path_inducing = join(data_folder, "grid_atc_50cm_60min_train_z.csv") print("Saving training data to " + str(path_train)) gf_train.output_to_text(path_train) print("Saving inducing point data to " + str(path_inducing)) init_inducing.output_to_text(path_inducing) print("Saving test data to " + str(path_test)) gf_test.output_to_text(path_test) print("All data saved. Run 02_copa_map_atc_train.py to train the model.") plot = True if plot: print("Plotting training data") gf_train.plot()
MarvinStuede/copa-map
src/copa_map/examples/01_atc_gridify_data.py
01_atc_gridify_data.py
py
4,685
python
en
code
0
github-code
36
6722106434
from PyQt4 import QtGui as qg, QtCore as qc try: _fromUtf8 = qc.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s class ListWidgetController: def __init__(self, widget): self.widget = widget def show_in_gadgets_list(self, gadgets): self.widget.clear() for gadget in gadgets: cell = '<pre>' cell += '<b>%s</b>\n' % gadget['address'] for instruction in gadget['instructions']: cell += '%s\n' % instruction cell += '</pre>' cell = qc.QString(cell) item = qg.QListWidgetItem(cell, self.widget) self.widget.insertItem(self.widget.count(), item)
Tanesh1701/ropa
ropa/gui/controller/list_widget_controller.py
list_widget_controller.py
py
710
python
en
code
null
github-code
36
16874206716
from utils.rabbit_controller import RabbitMqController from utils.tools import Tools from utils.verifier import Verifier from utils.watcher import Watcher if __name__ == "__main__": """ This is the SAAS Client! Results will be under the server http://3.73.75.114:5000/ 1. Get all md5 of /bin 2. Watch changes in loop 3. Every change get list of events 4. Go over the event- find the file 5. Check if the file changed 6. Check if its cryptominer -> by strings/ filename. 7. Check if the file is in a md5 database of malicious feed 8. do ps -a 9. Check if there is a reverse_shell 10. do tcpdump, check if there is malicious ip / domain 11. If one of the above is true: Write it to the screen. 12. EVERY 1 MIN add cpu usage for a map of ps. Check if something is weird (cryptominer). After 30min delete last one. """ verifier = Verifier() tools = Tools() watcher = Watcher() files_dict = tools.get_md5() resources = [] alerts = [] while True: controller = RabbitMqController() alerts.extend(verifier.verify_malware_dict(files_dict, is_send_to_rabbit=True)) for alert in alerts: controller.send_alert(alert) alerts = [] controller.connection.close() events = watcher.watch() alert_from_verifier, resources = verifier.verify_resources(resources, is_send_to_rabbit=True) alerts.extend(alert_from_verifier) alerts.extend(verifier.verify_filesystem_event(events, is_send_to_rabbit=True)) alerts.extend(verifier.verify_cryptominer(events, is_send_to_rabbit=True)) alerts.extend(verifier.verify_reverse_shell(events, is_send_to_rabbit=True)) alerts.extend(verifier.verify_request(is_send_to_rabbit=True)) files_dict = tools.get_md5()
Oreldm/RuntimeDefender
saas/client.py
client.py
py
1,910
python
en
code
0
github-code
36
25810655781
from datetime import datetime import unittest from mongoengine import Document, StringField, IntField from eve.exceptions import SchemaException from eve.utils import str_to_date, config from eve_mongoengine import EveMongoengine from tests import BaseTest, Eve, SimpleDoc, ComplexDoc, LimitedDoc, WrongDoc, SETTINGS class TestMongoengineFix(unittest.TestCase): """ Test fixing mongoengine classes for Eve's purposes. """ def create_app(self, *models): app = Eve(settings=SETTINGS) app.debug = True ext = EveMongoengine(app) ext.add_model(models) return app.test_client() def assertDateTimeAlmostEqual(self, d1, d2, precission='minute'): """ Used for testing datetime, which cannot (or we do not want to) be injected into tested object. Omits second and microsecond part. """ self.assertEqual(d1.year, d2.year) self.assertEqual(d1.month, d2.month) self.assertEqual(d1.day, d2.day) self.assertEqual(d1.hour, d2.hour) self.assertEqual(d1.minute, d2.minute) def _test_default_values(self, app, cls, updated_name='updated', created_name='created'): # test updated and created fields if they are correctly generated now = datetime.utcnow() d = cls(a="xyz", b=29) updated = getattr(d, updated_name) created = getattr(d, created_name) self.assertEqual(type(updated), datetime) self.assertEqual(type(created), datetime) self.assertDateTimeAlmostEqual(updated, now) self.assertDateTimeAlmostEqual(created, now) d.save() # test real returned values json_data = app.get('/simpledoc/').get_json() created_attr = app.application.config['DATE_CREATED'] created_str = json_data[config.ITEMS][0][created_attr] date_created = str_to_date(created_str) self.assertDateTimeAlmostEqual(now, date_created) d.delete() def test_default_values(self): app = self.create_app(SimpleDoc) self.assertEqual(SimpleDoc._db_field_map['updated'], '_updated') self.assertEqual(SimpleDoc._reverse_db_field_map['_updated'], 'updated') self.assertEqual(SimpleDoc._db_field_map['created'], '_created') self.assertEqual(SimpleDoc._reverse_db_field_map['_created'], 'created') self._test_default_values(app, SimpleDoc) def test_wrong_doc(self): with self.assertRaises(TypeError): self.create_app(WrongDoc) def test_nondefault_last_updated_field(self): # redefine to get entirely new class class SimpleDoc(Document): a = StringField() b = IntField() sett = SETTINGS.copy() sett['LAST_UPDATED'] = 'last_change' app = Eve(settings=sett) app.debug = True ext = EveMongoengine(app) ext.add_model(SimpleDoc) client = app.test_client() with app.app_context(): # to get current app's config self._test_default_values(client, SimpleDoc, updated_name='last_change') def test_nondefault_date_created_field(self): # redefine to get entirely new class class SimpleDoc(Document): a = StringField() b = IntField() sett = SETTINGS.copy() sett['DATE_CREATED'] = 'created_at' app = Eve(settings=sett) app.debug = True ext = EveMongoengine(app) ext.add_model(SimpleDoc) app = app.test_client() self._test_default_values(app, SimpleDoc, created_name='created_at')
MongoEngine/eve-mongoengine
tests/test_mongoengine_fix.py
test_mongoengine_fix.py
py
3,618
python
en
code
39
github-code
36
29834508518
from apps.maps.dto.map import MapDto from apps.maps.factory.choice import ChoiceFactory from apps.maps.models.map import Map class MapFactory: def __init__(self): self.dto = MapDto self.choice_factory = ChoiceFactory() def dto_from_model(self, item: Map) -> MapDto: return self.dto( unique_id=item.unique_id, email=item.user.email, choices=[self.choice_factory.dto_from_model(choice) for choice in item.choices.all()], is_paid=item.is_paid )
yellowpearl/realtor
src/apps/maps/factory/map.py
map.py
py
535
python
en
code
0
github-code
36
9328318313
import pandas as pd import matplotlib.pyplot as plt def gerar_grafico_tempos(caminho_entrada): df = pd.read_csv(caminho_entrada, delimiter=';') labels = ['Média', 'Mediana'] values = [df['Média'].iloc[0], df['Mediana'].iloc[0]] plt.bar(labels, values, color=['blue', 'green']) plt.title('Média e Mediana do Tempo de Correção') plt.ylabel('Horas') for i, v in enumerate(values): plt.text(i, v + 5, str(v), ha='center', va='bottom', fontweight='bold') plt.tight_layout() plt.savefig('grafico_tempos_correcao.png', dpi=300) plt.show() gerar_grafico_tempos("result_correction_time.csv")
ClaudioJansen/GitHub-Script
Tis 06/correction_time/graphic_generator.py
graphic_generator.py
py
645
python
pt
code
0
github-code
36
72240477223
from LinkedList import LinkedList from LinkedList import build_ll_from_lst from Node import Node """ Return data in Nth node from the end head could be None as well for empty list # Approaches 1) Use a queue to keek track of last pos_from_tail values, then dequeue at end 2) Use list, then return list[len(list)-pos_from_tail] 3) Two loops. Find length, then stop when you get to Node (if you can't use data structure) """ def get_nth_node_from_tail(node, pos_from_tail): if node is None: return None vals_list = [] while node is not None: vals_list.append(node.value) node = node.next return vals_list[len(vals_list) - pos_from_tail - 1] # Tests def test_get_nth_node_from_tail(): l1 = build_ll_from_lst([1,2,3]) node = l1.head assert get_nth_node_from_tail(node,0) == 3 assert get_nth_node_from_tail(node,1) == 2 assert get_nth_node_from_tail(node,2) == 1 if __name__ == "__main__": test_get_nth_node_from_tail()
bfortuner/problems
lists/python/nth_node_from_tail.py
nth_node_from_tail.py
py
938
python
en
code
37
github-code
36