index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
15,674
|
ohwani/molla
|
refs/heads/main
|
/post/models.py
|
from django.db import models
# Create your models here.
# class Post(models.Model):
# user = models.ForeignKey(settings.AUTH_USER_MODEL)
# title = models.CharField(max_length=120)
# slug = models.SlugField(unique=True)
# image = models.ImageField()
# content = models.TextField()
# create_at = models.DateTimeField(auto_now=True, auto_now_add=False)
# update_at = models.DateTimeField(auto_now=True, auto_now_add=False)
# def __str__(self):
# return self.title
|
{"/accounts/views.py": ["/accounts/serializers.py", "/accounts/models.py"], "/accounts/serializers.py": ["/accounts/models.py"], "/accounts/models.py": ["/accounts/regex.py"]}
|
15,675
|
ohwani/molla
|
refs/heads/main
|
/accounts/serializers.py
|
# from django.contrib.auth import authenticate
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
# from rest_framework_simplejwt.serializers import TokenObtainSerializer
# from rest_framework_simplejwt.tokens import RefreshToken
from .models import User
import re
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
extra_kwargs = {
'password' : {'write_only': True}
}
validators = [
UniqueTogetherValidator(
queryset = User.objects.all(),
fields = ['email'],
message = 'This email already exists'
)
]
def validate(self, attrs):
check_password = re.compile('^(?=.*[A-Za-z])(?=.*\d)(?=.*[$@$!%*#?&])[A-Za-z\d$@$!%*#?&]{8,}$')
email = attrs.get('email', None)
password = attrs.get('password', None)
password2 = attrs.get('password2', None)
if not re.match(check_password, attrs['password']):
raise serializers.ValidationError({'password': 'Please check your password.'})
if password != password2:
raise serializers.ValidationError({'password': 'Passwords must match.'})
return attrs
# class LoginSerializer(serializers.Serializer):
# class Meta:
# model = User
# fields = ['emali', 'passowrd']
# def validate(self, attrs):
# email = attrs.get('email', None)
# password = attrs.get('password', None)
# user = authenticate(email=email, password=password)
# if user is None:
# raise serializers.ValidationError("Invalid login credentials")
# try:
# refresh = RefreshToken.for_user(user)
# refresh_token = str(refresh)
# access_token = str(refresh.access_token)
# update_last_login(None, user)
# validation = {
# 'access': access_token,
# 'refresh': refresh_token,
# 'email': user.email,
# 'role': user.role,
# }
# return validation
# except AuthUser.DoesNotExist:
# raise serializers.ValidationError("Invalid login credentials")
# def validate_email(self, attrs):
# email = User.objects.filter(email=attrs)
# if email.exists():
# raise serializers.ValidationError('This email already exists')
# return attrs
|
{"/accounts/views.py": ["/accounts/serializers.py", "/accounts/models.py"], "/accounts/serializers.py": ["/accounts/models.py"], "/accounts/models.py": ["/accounts/regex.py"]}
|
15,728
|
escap-data-hub/LLString
|
refs/heads/master
|
/llstring/llstring/training/__init__.py
|
"""
The :mod:'llstring.training' module implements a trainer
which builds an IDF from raw text input (either from file or list)
"""
from .idf_trainer import IDFTrainer
__all__ = ['IDFTrainer']
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,729
|
escap-data-hub/LLString
|
refs/heads/master
|
/examples/norm.py
|
#! /usr/bin/env python
# levenshtein_example.py
#
# Example script to demonstrate Levenshtein string-match classifier
#
# Copyright 2016 Massachusetts Institute of Technology, Lincoln Laboratory
# version 0.1
#
# author: Charlie Dagli
# dagli@ll.mit.edu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Imports
#
import os
import logging
import cPickle as pickle
import sys
import io
from llstring.utilities.normalization.latin_normalization import *
import re
#
# Logging
#
LOG_LEVEL = logging.DEBUG
logging.basicConfig(level=LOG_LEVEL,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
logger = logging.getLogger(__name__)
if __name__ == "__main__":
normalizer = MITLLLatinNormalizer()
skip = 1
for file in sys.argv:
if (skip == 1):
skip = 0
else:
with io.open(file,'r',encoding='utf8') as f:
for line in f:
words = line.split()
first = words[0]
line = line[len(first):]
line = normalizer.normalize_unicode_composed(unicode(line))
line = normalizer.remove_html_markup(line)
line = re.sub(r'\#[a-zA-Z0-9_]+', ' ',line,flags=re.UNICODE) # remove hashtags
line = normalizer.remove_twitter_meta(line)
line = normalizer.remove_nonsentential_punctuation(line)
line = normalizer.remove_word_punctuation(line)
line = normalizer.remove_repeats(line)
line = normalizer.clean_string(line)
if (line == ' '): line = ''
both = words[0] + "\t" + line
print both
#print newline
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,730
|
escap-data-hub/LLString
|
refs/heads/master
|
/llstring/llstring/training/idf_trainer.py
|
#!/usr/bin/env python
# idf_trainer.py
#
# Class to learn IDF weighting from training data
#
# Copyright 2015-2016 Massachusetts Institute of Technology, Lincoln Laboratory
# version 0.1
#
# author: Charlie K. Dagli
# dagli@ll.mit.edu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import cPickle as pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
class IDFTrainer:
""" Class to learn IDF weighting from training data """
# Logging
LOG_LEVEL = logging.INFO
logging.basicConfig(level=LOG_LEVEL,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
logger = logging.getLogger(__name__)
def __init__(self,min_df=2,norm="l2"):
""" Constructor """
self.cv = CountVectorizer(min_df=min_df)
self.tfidf = TfidfTransformer(norm)
self.LOG_IDF = None
self.CORPUS_VOCAB = None
self.OOV_IDF_VAL = 0 #min idf value to assign for out-of-vocabulary terms
self.IDF_MODEL = dict()
def compute_idf(self,corpus):
"""
Compute IDF using corpus.
Per sklearn conventions, "corpus" can be either a:
file: a file object for a file containing content (newline separated)
content: a iterable containing all the data in memory (i.e. a list)
filename: list of filenames of documents in which content is contained
"""
self.cv.fit_transform(corpus)
self.logger.debug(self.cv.vocabulary_)
self.CORPUS_VOCAB = self.cv.vocabulary_
self.logger.debug(self.CORPUS_VOCAB)
# if corpus is file object, seek back to beginning of file...
if isinstance(corpus,file):
corpus.seek(0)
freq_term_matrix = self.cv.transform(corpus)
self.tfidf.fit(freq_term_matrix)
self.LOG_IDF = self.tfidf.idf_
self.N = freq_term_matrix.shape[0] #num of "docs" processed
if isinstance(corpus,file):
corpus.close()
# Compute OOV_IDF_VAL: min idf value to assign for out-of-vocabulary terms
nt=1; self.OOV_IDF_VAL = math.log(self.N/(nt+1))+1
# collect model components
self.IDF_MODEL['idf'] = self.LOG_IDF
self.IDF_MODEL['corpus_vocab'] = self.CORPUS_VOCAB
self.IDF_MODEL['oov_idf_val'] = self.OOV_IDF_VAL
def save_model(self,fnameout):
""" Save-out learned IDF dictionary and associated metadata (e.g. self.IDF_MODEL) """
self.logger.info("saving IDF model to {0}".format(fnameout))
pickle.dump(self.IDF_MODEL,open(fnameout,"wb"))
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,731
|
escap-data-hub/LLString
|
refs/heads/master
|
/llstring/llstring/matching/__init__.py
|
"""
The :mod:'llstring.matching' module implements classifiers
based on basic string matching algorithms: Levenshtein Distance,
Jaro-Winkler Similarity and Soft TF-IDF Similarity.
"""
from .mitll_string_matcher import MITLLStringMatcher
from .softtfidf import Softtfidf
__all__ = ['MITLLStringMatcher','Softtfidf']
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,732
|
escap-data-hub/LLString
|
refs/heads/master
|
/llstring/llstring/matching/mitll_string_matcher.py
|
#!/usr/bin/env python
# mitll_string_matcher.py
#
# MITLLSTringMatcher:
# SKLEARN compatable classifier implementing string matching techniques:
# - Levenshtein Distance
# - Jaro-Winkler
# - Soft TF-IDF
#
# Copyright 2015 Massachusetts Institute of Technology, Lincoln Laboratory
# version 0.1
#
# author: Charlie Dagli
# dagli@ll.mit.edu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Imports
import logging
import numpy as np
import cPickle as pickle
import jellyfish
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.linear_model import LogisticRegression as LR
from sklearn.metrics import roc_auc_score
from .softtfidf import Softtfidf
from ..utilities import normalization as normutils
class MITLLStringMatcher(BaseEstimator,ClassifierMixin):
"""
MIT-LL String Matcher as Sklearn Estimator:
String Matching Techniques:
- Levenshtein Distance
- Jaro-Winkler
- Soft TF-IDF
"""
# Logging
LOG_LEVEL = logging.INFO
logging.basicConfig(level=LOG_LEVEL,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
logger = logging.getLogger(__name__)
def __init__(self,algorithm='jw', stf_thresh=0.6, idf_model=None, text_normalizer = None):
""" Initialize dict containing hyperparameters """
self.algorithm = algorithm
self.stf_thresh = stf_thresh
self.idf_model = idf_model
self.text_normalizer = text_normalizer
#
# Basic String Matching Functions
#
def levenshtein_similarity(self,s,t):
""" Levenshtein Similarity """
Ns = len(s); Nt = len(t);
lev_sim = 1.0 - (jellyfish.levenshtein_distance(s,t))/float(max(Ns,Nt))
return lev_sim
def jaro_winkler_similarity(self,s,t):
""" Jaro-Winkler Similarity """
jw_sim = jellyfish.jaro_winkler(s,t)
return jw_sim
def soft_tfidf_similarity(self,s,t):
"""
Soft TFIDF Similarity:
This similarity measure is only meaningful when you have multi-word strings.
For single words, this measure will return 0.0
"""
stf = self.hyparams['matcher'] #soft tf-idf object
tfidf_sim = 0.5*(stf.score(s,t)+stf.score(t,s))
return tfidf_sim
#
# Utitlity Functions
#
def init_hyparams(self):
""" Initialize hyper-parameters dict """
self.hyparams = dict()
self.hyparams['match_fcn'] = None
self.hyparams['algo'] = self.algorithm
self.hyparams['txt_normer'] = self.text_normalizer
if self.algorithm == 'lev': #levenshtein
self.hyparams['match_fcn'] = self.levenshtein_similarity
elif self.algorithm== 'jw': #jaro-winkler
self.hyparams['match_fcn'] = self.jaro_winkler_similarity
elif self.algorithm== 'stf': #softtfidf
self.hyparams['match_fcn'] = self.soft_tfidf_similarity
self.hyparams['stf_thresh'] = self.stf_thresh
self.hyparams['idf_model'] = self.idf_model
def validate_hyparams(self):
""" Basic hyperparameter input validation"""
if self.hyparams['algo'] not in set(['lev','jw','stf']):
raise ValueError("Value of algorithm has to be either 'lev','jw' or 'stf'. Got {0}".format(self.hyparams['algo']))
if self.hyparams['txt_normer'] not in set(['latin',None]):
raise ValueError("The only value of txt_normer currently support is 'latin' (or None)")
if self.hyparams['algo'] == 'stf':
if (self.hyparams['stf_thresh'] < 0) | (self.hyparams['stf_thresh'] > 1):
raise ValueError("Value of soft tf-idf's internal jaro-winkler threshold", \
"must be [0,1].")
if self.hyparams['idf_model']:
if set(self.hyparams['idf_model'].keys()) != set(['idf','corpus_vocab','oov_idf_val']):
raise ValueError("IDF model provided must contain only the following keys: ", \
"'idf', 'corpus_vocab', and 'oov_idf_val'.")
if (not isinstance(self.hyparams['idf_model']['idf'],np.ndarray)) or \
(self.hyparams['idf_model']['idf'].dtype.type is not np.float64):
raise ValueError("idf_model['idf'] must be an np.ndarray of dtype np.float64")
if not isinstance(self.hyparams['idf_model']['corpus_vocab'],dict):
raise ValueError("idf_model['corpus_vocab'] must be a dict.")
if not isinstance(self.hyparams['idf_model']['oov_idf_val'],float):
raise ValueError("idf_model['oov_idf_val'] must be a float.")
def init_algorithm(self):
""" Validate hyperparameter inputs, init matcher object if neccessary"""
self.validate_hyparams()
# Initialize Soft TF-IDF matcher if needed
if self.hyparams['algo'] == 'stf': #softtfidf
self.hyparams['matcher'] = Softtfidf(self.hyparams['stf_thresh'],self.hyparams['idf_model'])
if self.hyparams['txt_normer'] == 'latin':
self.normalizer = normutils.latin_normalization.MITLLLatinNormalizer()
else:
self.normalizer = normutils.text_normalization.MITLLTextNormalizer() #generic normer
def get_raw_similarities(self, X, y=None):
""" Convert input to raw similarities """
#make sure we have [0,1] class encoding in y
if y:
if set(y) != set((0,1)):
raise ValueError("y expects class labels to be from {0,1}")
similarities = list()
for i in xrange(len(X)):
pair = X[i]
s = unicode(self.normalizer.normalize(pair[0]),'utf-8')
t = unicode(self.normalizer.normalize(pair[1]),'utf-8')
if (len(s) > 0) and (len(t) > 0):
sim = self.hyparams['match_fcn'](s,t)
similarities.append(sim)
else:
similarities.append(0.0)
if y: y[i] = -1 #set y-value of non-conforming pair to -1
sims_array = np.asarray(similarities).reshape(-1,1)
if y:
return (sims_array,y)
else:
return sims_array
def save_model(self,fnameout):
""" Save model parameters out after fitting. """
if self.lr_:
model_out = dict()
model_out['algo'] = self.hyparams['algo']
model_out['txt_normer'] = self.hyparams['txt_normer']
model_out['calibration'] = self.lr_
if self.hyparams['algo'] == 'stf':
model_out['stf_thresh'] = self.hyparams['stf_thresh']
model_out['idf_model'] = self.hyparams['idf_model']
pickle.dump(model_out,open(fnameout,"wb"))
return self
else:
raise ValueError("save_model failed: No model has yet been fit or loaded.")
def load_model(self,fnamein):
""" Load model parameters. """
model_in = pickle.load(open(fnamein,'rb')) # will throw I/O error if file not found
self.init_hyparams() #initialize hyper-parameter dict
self.hyparams['algo'] = model_in['algo']
self.hyparams['txt_normer'] = model_in['txt_normer']
self.lr_ = model_in['calibration']
if model_in['algo'] == 'stf':
self.hyparams['stf_thresh'] = model_in['stf_thresh']
self.hyparams['idf_model'] = model_in['idf_model']
self.init_algorithm() #validate hyparams (we assume object not fit when load_model called)
return self
#
# Learning
#
def fit(self,X,y):
""" Fit string matching models to training data
Assuming X is list of tuples: (('s1',t1'),...,('sN',tN'))
"""
y = y[:] #shallow copy y, b/c in-place operations to follow
# Initialize hyper-parameter dict then algorithm
self.init_hyparams(); self.init_algorithm()
# Get string match scores
(s,y) = self.get_raw_similarities(X,y)
# Get rid of any non-conforming pairs
data = zip(s,y)
for pair in reversed(data): #iterate backwards to remove items from "data"
#so as not to mess up internal indexing of for-loop
if pair[1] == -1:
data.remove(pair)
(s,y) = zip(*data)
# Do Platt Scaling
self.lr_ = LR(penalty='l1',class_weight='balanced')
self.lr_.fit(s,y)
return self
#
# Inference
#
def decision_function(self,X):
""" Take input data, turn into decision """
s = self.get_raw_similarities(X)
return self.lr_.decision_function(s)
def predict(self,X):
""" Class predictions """
s = self.get_raw_similarities(X)
return self.lr_.predict(s)
def predict_proba(self,X):
""" Posterior match probabilities (need this for log-loss for CV """
s = self.get_raw_similarities(X)
return self.lr_.predict_proba(s)
#
# Evaluate
#
def score(self,X,y,sample_weight=None):
""" Score matcher """
return roc_auc_score(y,self.predict(X),sample_weight=sample_weight)
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,733
|
escap-data-hub/LLString
|
refs/heads/master
|
/llstring/llstring/utilities/sampling/reservoir_sampler.py
|
#!/usr/bin/env python
# reservoir_sampler.py
#
# Perform uniform sampling from an (possibly infinite) input stream
#
# Copyright 2015-2016 Massachusetts Institute of Technology, Lincoln Laboratory
# version 0.1
#
# author: Charlie K. Dagli
# dagli@ll.mit.edu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import logging
class ReservoirSampler:
""" Class to perform uniform sampling from an input stream """
# Logging
LOG_LEVEL = logging.INFO
logging.basicConfig(level=LOG_LEVEL,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
logger = logging.getLogger(__name__)
def __init__(self,K):
""" Constructor """
self.K = K
self.N = 0
self.sample = list()
def update_sample(self,item):
""" Update sampler """
self.N += 1
if len(self.sample) < self.K:
self.sample.append(item)
else:
s = int(random.random()*self.N)
if s < self.K:
self.sample[s] = item
def get_sample(self):
""" Return sample """
return self.sample
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,734
|
escap-data-hub/LLString
|
refs/heads/master
|
/llstring/setup.py
|
#! /usr/bin/env python
# setup.py
#
# Setup and Install of llstring
#
# Copyright 2016 Massachusetts Institute of Technology, Lincoln Laboratory
# version 0.1
#
# author: Charlie Dagli
# dagli@ll.mit.edu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Imports
from setuptools import setup, find_packages
# Setup
setup(name='llstring',
version='0.0.1',
description='MIT-LL String Processing and Matching Tools',
url='https://g62code.llan.ll.mit.edu/cdagli/mitll-string-match',
author='Charlie Dagli',
author_email='dagli@ll.mit.edu',
license='APLv2',
packages=find_packages())
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,735
|
escap-data-hub/LLString
|
refs/heads/master
|
/examples/soft_tfidf_example.py
|
#! /usr/bin/env python
# soft_tfidf_example.py
#
# Example script to demonstrate Soft TF-IDF string-match classifier
#
# Copyright 2016 Massachusetts Institute of Technology, Lincoln Laboratory
# version 0.1
#
# author: Charlie Dagli
# dagli@ll.mit.edu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Imports
#
import os
import logging
import cPickle as pickle
from llstring.matching.mitll_string_matcher import MITLLStringMatcher
from sklearn import cross_validation
from sklearn.grid_search import GridSearchCV
#
# Logging
#
LOG_LEVEL = logging.DEBUG
logging.basicConfig(level=LOG_LEVEL,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
logger = logging.getLogger(__name__)
if __name__ == "__main__":
# Input and Output Filenames
exampledir = os.path.dirname(os.path.realpath(__file__))
fnamein = os.path.join(exampledir,"data/input/match_training_data.pckl")
# Load Training Data
train = pickle.load(open(fnamein,"rb"))
X = train['X'] #string pairs
y = train['y'] #corresponding labels (1:match, 0:no-match)
# Train/Test Splits (via sklearn)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.3,random_state=0)
#
# Soft TF-IDF Matcher (from training data)
#
idf_model = pickle.load(open(os.path.join(exampledir,"data/output/models/english_socialmedia_idf.pckl"),'rb'))
# ^ see idf_training_example.py
stf_thresh = 0.6 # Internal JW threshold
matcher = MITLLStringMatcher(algorithm='stf',text_normalizer='latin',idf_model=idf_model,stf_thresh=stf_thresh)
# ^ Initialize Soft TF-IDF matcher
matcher.fit(X_train,y_train) # Fit matcher to training data
fname_model = os.path.join(exampledir,"data/output/models/english_socialmedia_stf_{0}.model".format(stf_thresh))
# ^ Model-out filename
matcher.save_model(fname_model) # Save model out
posts = matcher.predict_proba(X_test) # Posterior probabilities of match
preds = matcher.predict(X_test) # Predicted labels for test data
confs = matcher.decision_function(X_test) # Confidence (as distance to hyperplane)
score = matcher.score(X_test,y_test) # Return classificaton performance
raw_sims = matcher.get_raw_similarities(X_test) # Return raw similarity scores (not probabilities)
# Scoring an example string pair
s = u"Abe Lincoln"; t = u"Abraham Lincoln Lab"
post = matcher.predict_proba([(s,t)])[0][1] # Posterior probability of match
pred = matcher.predict([(s,t)])[0] # Predicted label for pair
logger.info("Example Match Posterior: {0}".format(post))
#
# Soft TF-IDF Matcher (from pre-trained model)
#
matcher2 = MITLLStringMatcher(algorithm='stf',text_normalizer='latin',stf_thresh=0.6)
# ^ Initialize Soft TF-IDF matcher
matcher2.load_model(os.path.join(exampledir,"data/output/models/english_socialmedia_stf_{0}.model".format(stf_thresh)))
# ^ Load-in model
posts2 = matcher2.predict_proba(X_test) # Posterior probabilities of match
if (posts2 == posts2).all(): logger.info("Soft TF-IDF Test: Pass")
else: logger.info("Soft TF-IDF Test: Fail")
#
# Soft TF-IDF w/ hyper-parameter tuning (via sklearn Grid-Search) Example
#
matcher_stub = MITLLStringMatcher(algorithm='stf',idf_model=idf_model,text_normalizer = 'latin')
# ^ Initialize Soft TF-IDF matcher stub
param_grid = {'stf_thresh':[0.4,0.5,0.6,0.7,0.8,0.9]} # Setup hyper-parameter grid
cv_matcher = GridSearchCV(matcher_stub,param_grid,cv=5,verbose=2) # Initialize GridSearchCV matcher of type
# MITLLStringMatcher(algorithm='stf')
cv_matcher.fit(X_train,y_train) # Re-train model on all training data using
# (model fit to best performing hyper-parameter
# combination)
fname_model = os.path.join(exampledir,"data/output/models/english_socialmedia_stf_optimal.model")
# ^ Model-out filename
cv_matcher.best_estimator_.save_model(fname_model) # Save optimal model out
logger.info("Best stf_thresh found by CV: {0}".format(cv_matcher.best_params_['stf_thresh']))
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,736
|
escap-data-hub/LLString
|
refs/heads/master
|
/llstring/llstring/utilities/normalization/text_normalization.py
|
#!/usr/bin/env python
# text_normalization.py
#
# Generic Text Normalization Routines
#
# Copyright 2013-2016 Massachusetts Institute of Technology, Lincoln Laboratory
# version 0.1
#
# author: Charlie Dagli & William M. Cambpell
# {dagli,wcampbell}@ll.mit.edu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
import unicodedata
class MITLLTextNormalizer:
""" Text-Normalization Routines """
# Logging
LOG_LEVEL = logging.INFO
logging.basicConfig(level=LOG_LEVEL,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
logger = logging.getLogger(__name__)
def __init__(self):
""" Constructor """
self.rewrite_hash = self.create_utf8_rewrite_hash()
def normalize(self,ln):
""" Text Line Normalizer """
# Various normalization routines -- pick and choose as needed
ln = unicode(ln) #make sure we're in unicode
ln = self.normalize_unicode_composed(ln) #from base-class
ln = self.filter_unicode(ln) #from base-class
ln = self.remove_html_markup(ln) #from base class
if (ln == ' '):
ln = ''
return ln
def normalize_unicode_composed(self,txt):
""" Normalize unicode: Composed """
return unicodedata.normalize('NFKC', txt)
def normalize_unicode_decomposed(self,txt):
""" Normalize unicode: Decomposed (i.e. expanded unicode) """
return unicodedata.normalize('NFKD', txt)
def filter_unicode(self,ln):
""" Filter Unicode """
out = ''
for i in xrange(0,len(ln)):
if (ord(ln[i]) < 0x7f):
out = out + ln[i]
elif (self.rewrite_hash.has_key(ln[i])):
out = out + self.rewrite_hash[ln[i]]
else:
out = out + " "
# Clean up extra spaces
out = re.sub('^\s+', '', out)
out = re.sub('\s+$', '', out)
out = re.sub('\s+', ' ', out)
out = re.sub('\s+.$', '.', out)
return out
def create_utf8_rewrite_hash (self):
"""
Rewrite utf-8 chars to ascii in a rational manner
Strictly speaking (and in python)
any ascii character >= 128 is not valid
"""
rewrite_hash = dict([])
rewrite_hash[u'\xA0'] = " " # NO-BREAK SPACE
rewrite_hash[u'\xA6'] = " " # BROKEN BAR
rewrite_hash[u'\xA7'] = " " # SECTION SIGN
rewrite_hash[u'\xAC'] = " " # NOT SIGN
rewrite_hash[u'\xAD'] = " " # SOFT HYPHEN
rewrite_hash[u'\xB6'] = " " # PILCROW SIGN
rewrite_hash[u'\xBC'] = " 1/4 " # VULGAR FRACTION ONE QUARTER
rewrite_hash[u'\xBD'] = " 1/2 " # VULGAR FRACTION ONE HALF
rewrite_hash[u'\xBE'] = " 3/4 " # VULGAR FRACTION THREE QUARTERS
rewrite_hash[u'\u0336'] = " " # COMBINING LONG STROKE OVERLAY
rewrite_hash[u'\u2000'] = " " # EN QUAD
rewrite_hash[u'\u2001'] = " " # EM QUAD
rewrite_hash[u'\u2009'] = " " # THIN SPACE
rewrite_hash[u'\u200A'] = " " # HAIR SPACE
rewrite_hash[u'\u200B'] = " " # ZERO WIDTH SPACE
rewrite_hash[u'\u200E'] = " " # LEFT-TO-RIGHT MARK
rewrite_hash[u'\u200F'] = " " # RIGHT-TO-LEFT MARK
rewrite_hash[u'\u2010'] = "-" # HYPHEN
rewrite_hash[u'\u2011'] = "-" # NON-BREAKING HYPHEN
rewrite_hash[u'\u2013'] = " " # EN DASH
rewrite_hash[u'\u2014'] = " " # EM DASH
rewrite_hash[u'\u2015'] = " " # HORIZONTAL BAR
rewrite_hash[u'\u2020'] = " " # DAGGER
rewrite_hash[u'\u2021'] = " " # DOUBLE DAGGER
rewrite_hash[u'\u2022'] = " " # BULLET
rewrite_hash[u'\u2023'] = " " # TRIANGULAR BULLET
rewrite_hash[u'\u2024'] = " " # ONE DOT LEADER
rewrite_hash[u'\u2025'] = " " # TWO DOT LEADER
rewrite_hash[u'\u2026'] = " " # HORIZONTAL ELLIPSIS
rewrite_hash[u'\u2027'] = " " # HYPHENATION POINT
rewrite_hash[u'\u2028'] = " " # LINE SEPARATOR
rewrite_hash[u'\u2029'] = "\n" # PARAGRAPH SEPARATOR
rewrite_hash[u'\u202A'] = " " # LEFT-TO-RIGHT EMBEDDING (???)
rewrite_hash[u'\u202B'] = " " # RIGHT-TO-LEFT EMBEDDING (???)
rewrite_hash[u'\u202C'] = " " # POP DIRECTIONAL FORMATTING (???)
rewrite_hash[u'\u202D'] = " " # LEFT-TO-RIGHT OVERRIDE
rewrite_hash[u'\u202E'] = " " # RIGHT-TO-LEFT OVERRIDE
rewrite_hash[u'\u202F'] = " " # NARROW NO-BREAK SPACE
rewrite_hash[u'\u203B'] = " " # REFERENCE MARK
rewrite_hash[u'\u206B'] = " " # ACTIVATE SYMMETRIC SWAPPING
rewrite_hash[u'\u206E'] = " " # NATIONAL DIGIT SHAPES
rewrite_hash[u'\u206F'] = " " # NOMINAL DIGIT SHAPES
rewrite_hash[u'\u2116'] = " " # NUMERO SIGN
rewrite_hash[u'\u2154'] = "2/3" # VULGAR FRACTION TWO THIRDS
rewrite_hash[u'\u2192'] = " " # RIGHTWARDS ARROW
rewrite_hash[u'\u21FC'] = " " # LEFT RIGHT ARROW WITH DOUBLE VERTICAL STROKE
rewrite_hash[u'\u2212'] = "-" # MINUS SIGN
rewrite_hash[u'\u23AF'] = " " # HORIZONTAL LINE EXTENSION
rewrite_hash[u'\u25BA'] = " " # BLACK RIGHT-POINTING POINTER
rewrite_hash[u'\u2665'] = " " # BLACK HEART SUIT
return rewrite_hash
def remove_html_markup (self,ln):
""" remove HTML style angle bracketed tags """
ln = re.sub('\<\S+\>', ' ', ln)
# remove market symbols
# ln = re.sub('\([A-Z0-9a-z\_]*\.[A-Z]+\:[^\)]+\)\,?', '', ln)
# remove web site URLs and links
ln = re.sub('https?:\/\/?\s*\S+\s', ' ', ln)
ln = re.sub('https?:\/\/?\s*\S+$', '', ln)
ln = re.sub('\(https?:\\\\\S+\)', ' ', ln)
ln = re.sub('\(?www\.\S+\)?', ' ', ln)
ln = re.sub('\[ID:[^\]]+\]', ' ', ln)
ln = re.sub('\[id:[^\]]+\]', ' ', ln)
ln = re.sub('\(PDF\)', ' ', ln)
# replace html special characters
ln = re.sub(r'—', ' ', ln)
ln = re.sub(r'\"\;', ' ', ln)
ln = re.sub(r'\&\#39\;', ' ', ln)
# Clean up extra spaces
ln = re.sub('^\s+', '', ln)
ln = re.sub('\s+$', '', ln)
ln = re.sub('\s+', ' ', ln)
return ln
def clean_string(self,s):
""" Strip leading characters, lower """
if isinstance(s,unicode):
ss = s.lower()
else:
ss = unicode(s.lower(),"utf-8")
if len(ss) == 0:
ss = u''
return ss
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,737
|
escap-data-hub/LLString
|
refs/heads/master
|
/llstring/llstring/__init__.py
|
"""
The :mod:'llstring' module implements classifiers
based on basic string matching algorithms (Levenshtein Distance,
Jaro-Winkler Similarity and Soft TF-IDF Similarity) as well
as provides a variety of basic string processing/normalization
tools.
"""
from pkgutil import extend_path as __extend_path
__path__ = __extend_path(__path__, __name__)
__all__ = ['matching','training','utilities']
import matching, training, utilities
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,738
|
escap-data-hub/LLString
|
refs/heads/master
|
/llstring/llstring/utilities/normalization/__init__.py
|
"""
The :mod:'llstring.normalization' sub-package implements
generic and latin script text normalization. Included
as well are functions for web and social media normalization.
"""
from .text_normalization import MITLLTextNormalizer
from .latin_normalization import MITLLLatinNormalizer
__all__ = ['MITLLTextNormalizer','MITLLLatinNormalizer']
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,739
|
escap-data-hub/LLString
|
refs/heads/master
|
/llstring/llstring/utilities/sampling/__init__.py
|
"""
The :mod:'llstring.sampling' sub-package implements
basic reservoir sampling: one-pass uniform sampling of
a large dataset.
"""
from .reservoir_sampler import ReservoirSampler
__all__ = ['ReservoirSampler']
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,740
|
escap-data-hub/LLString
|
refs/heads/master
|
/llstring/llstring/utilities/normalization/latin_normalization.py
|
#!/usr/bin/env python
# latin_normalization.py
#
# Text Normalization Routines for Latin Script Text (including for Twitter data)
#
# Copyright 2013-2016 Massachusetts Institute of Technology, Lincoln Laboratory
# version 0.1
#
# author: William M. Campbell and Charlie Dagli
# {wcampbell,dagli}@ll.mit.edu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
from .text_normalization import MITLLTextNormalizer
class MITLLLatinNormalizer(MITLLTextNormalizer):
"""
Text-Normalization Routines for Latin Script Text
"""
# Logging
LOG_LEVEL = logging.INFO
logging.basicConfig(level=LOG_LEVEL,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
logger = logging.getLogger(__name__)
def __init__(self):
""" Constructor """
MITLLTextNormalizer.__init__(self)
self.update_utf8_rewrite_hash()
def normalize(self,ln):
""" Normalize text """
ln = MITLLTextNormalizer.normalize(self,ln)
ln = self.convertUTF8_to_ascii(ln)
ln = self.remove_twitter_meta(ln)
ln = self.remove_nonsentential_punctuation(ln)
ln = self.remove_word_punctuation(ln)
ln = self.remove_repeats(ln)
if (ln == ' '):
ln = ''
return ln
def get_counts (self,msg):
""" Word Count """
counts = {}
for sent in msg:
f = sent.split(' ')
for w in f:
if (not counts.has_key(w)):
counts[w] = 0.0
counts[w] += 1
return counts
def remove_repeats (self,msg):
""" Remove Repeats """
# twitter specific repeats
msg = re.sub(r"(.)\1{2,}", r"\1\1\1", msg) # characters repeated 3 or more times
# laughs
msg = re.sub(r"(ja|Ja)(ja|Ja)+(j)?", r"jaja", msg) # spanish
msg = re.sub(r"(rs|Rs)(Rs|rs)+(r)?", r"rsrs", msg) # portugese
msg = re.sub(r"(ha|Ha)(Ha|ha)+(h)?", r"haha", msg) # english
return msg
def splitter(self,ln):
""" Line Splitter """
# horridly simple splitter
ln = ln.replace(". ", ".\n\n").replace("? ","?\n\n").replace("! ","!\n\n")
ln = ln.replace('."', '."\n\n')
f = ln.split("\n")
fout = []
for s in f:
s = s.rstrip()
s = re.sub(r'^\s+', '', s)
if (s!=""):
fout.append(s)
return fout
def convertUTF8_to_ascii(self,ln):
""" UTF8 to ASCII Converter """
out = ''
for i in xrange(0,len(ln)):
if (ord(ln[i]) < 0x7f):
out = out + ln[i]
elif (self.rewrite_hash.has_key(ln[i])):
out = out + self.rewrite_hash[ln[i]]
else:
out = out + " "
# Clean up extra spaces
out = re.sub('^\s+', '', out)
out = re.sub('\s+$', '', out)
out = re.sub('\s+', ' ', out)
out = re.sub('\s+.$', '.', out)
out = out.encode('ascii','ignore')
return out
def update_utf8_rewrite_hash (self):
""" Rewrite Hash """
# Strictly speaking (and in python) any ascii character >= 128 is not valid
# This tries to rewrite utf-8 chars to ascii in a rational manner
self.rewrite_hash[u'\xA1'] = " " # INVERTED EXCLAMATION MARK
self.rewrite_hash[u'\xA2'] = " cents " # CENT SIGNS
self.rewrite_hash[u'\xA3'] = " pounds " # POUND SIGN
self.rewrite_hash[u'\xA4'] = " " # CURRENCY SIGN
self.rewrite_hash[u'\xA5'] = " yen " # YEN SIGN
self.rewrite_hash[u'\xA8'] = " " # DIAERESIS
self.rewrite_hash[u'\xA9'] = " " # COPYRIGHT SIGN
self.rewrite_hash[u'\xAA'] = " " # FEMININE ORDINAL INDICATOR
self.rewrite_hash[u'\xAB'] = " " # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
self.rewrite_hash[u'\xAE'] = " " # REGISTERED SIGN
self.rewrite_hash[u'\xAF'] = " " # MACRON
self.rewrite_hash[u'\xB0'] = " degrees " # DEGREE SIGN
self.rewrite_hash[u'\xB1'] = " plus-or-minus " # PLUS-MINUS SIGN
self.rewrite_hash[u'\xB2'] = " " # SUPERSCRIPT TWO
self.rewrite_hash[u'\xB3'] = " "; # SUPERSCRIPT THREE
self.rewrite_hash[u'\xB4'] = "'" # ACUTE ACCENT
self.rewrite_hash[u'\xB5'] = " micro " # MICRO SIGN
self.rewrite_hash[u'\xB7'] = " " # MIDDLE DOT
self.rewrite_hash[u'\xB8'] = " " # CEDILLA
self.rewrite_hash[u'\xB9'] = " " # SUPERSCRIPT ONE
self.rewrite_hash[u'\xBA'] = " " # MASCULINE ORDINAL INDICATOR
self.rewrite_hash[u'\xBF'] = " " # INVERTED QUESTION MARK
self.rewrite_hash[u'\xC0'] = "A" # LATIN CAPITAL LETTER A WITH GRAVE
self.rewrite_hash[u'\xC1'] = "A" # LATIN CAPITAL LETTER A WITH ACUTE
self.rewrite_hash[u'\xC2'] = "A" # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
self.rewrite_hash[u'\xC3'] = "A" # LATIN CAPITAL LETTER A WITH TILDE
self.rewrite_hash[u'\xC4'] = "A" # LATIN CAPITAL LETTER A WITH DIAERESIS
self.rewrite_hash[u'\xC5'] = "A" # LATIN CAPITAL LETTER A WITH RING ABOVE
self.rewrite_hash[u'\xC6'] = "AE" # LATIN CAPITAL LETTER AE
self.rewrite_hash[u'\xC7'] = "C" # LATIN CAPITAL LETTER C WITH CEDILLA
self.rewrite_hash[u'\xC8'] = "E" # LATIN CAPITAL LETTER E WITH GRAVE
self.rewrite_hash[u'\xC9'] = "E" # LATIN CAPITAL LETTER E WITH ACUTE
self.rewrite_hash[u'\xCA'] = "E" # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
self.rewrite_hash[u'\xCB'] = "E" # LATIN CAPITAL LETTER E WITH DIAERESIS
self.rewrite_hash[u'\xCC'] = "I" # LATIN CAPITAL LETTER I WITH GRAVE
self.rewrite_hash[u'\xCD'] = "I" # LATIN CAPITAL LETTER I WITH ACUTE
self.rewrite_hash[u'\xCE'] = "I" # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
self.rewrite_hash[u'\xCF'] = "I" # LATIN CAPITAL LETTER I WITH DIAERESIS
self.rewrite_hash[u'\xD0'] = "Th" # LATIN CAPITAL LETTER ETH
self.rewrite_hash[u'\xD1'] = "N" # LATIN CAPITAL LETTER N WITH TILDE
self.rewrite_hash[u'\xD2'] = "O" # LATIN CAPITAL LETTER O WITH GRAVE
self.rewrite_hash[u'\xD3'] = "O" # LATIN CAPITAL LETTER O WITH ACUTE
self.rewrite_hash[u'\xD4'] = "O" # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
self.rewrite_hash[u'\xD5'] = "O" # LATIN CAPITAL LETTER O WITH TILDE
self.rewrite_hash[u'\xD6'] = "O" # LATIN CAPITAL LETTER O WITH DIAERESIS
self.rewrite_hash[u'\xD7'] = "x" # MULTIPLICATION SIGN
self.rewrite_hash[u'\xD8'] = "O" # LATIN CAPITAL LETTER O WITH STROKE
self.rewrite_hash[u'\xD9'] = "U" # LATIN CAPITAL LETTER U WITH GRAVE
self.rewrite_hash[u'\xDA'] = "U" # LATIN CAPITAL LETTER U WITH ACUTE
self.rewrite_hash[u'\xDB'] = "U" # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
self.rewrite_hash[u'\xDC'] = "U" # LATIN CAPITAL LETTER U WITH DIAERESIS
self.rewrite_hash[u'\xDD'] = "Y" # LATIN CAPITAL LETTER Y WITH ACUTE
self.rewrite_hash[u'\xDE'] = "Th" # LATIN CAPITAL LETTER THORN
self.rewrite_hash[u'\xDF'] = "ss" # LATIN SMALL LETTER SHARP S
self.rewrite_hash[u'\xE0'] = "a" # LATIN SMALL LETTER A WITH GRAVE
self.rewrite_hash[u'\xE1'] = "a" # LATIN SMALL LETTER A WITH ACUTE
self.rewrite_hash[u'\xE2'] = "a" # LATIN SMALL LETTER A WITH CIRCUMFLEX
self.rewrite_hash[u'\xE3'] = "a" # LATIN SMALL LETTER A WITH TILDE
self.rewrite_hash[u'\xE4'] = "a" # LATIN SMALL LETTER A WITH DIAERESIS
self.rewrite_hash[u'\xE5'] = "a" # LATIN SMALL LETTER A WITH RING ABOVE
self.rewrite_hash[u'\xE6'] = "ae" # LATIN SMALL LETTER AE
self.rewrite_hash[u'\xE7'] = "c" # LATIN SMALL LETTER C WITH CEDILLA
self.rewrite_hash[u'\xE8'] = "e" # LATIN SMALL LETTER E WITH GRAVE
self.rewrite_hash[u'\xE9'] = "e" # LATIN SMALL LETTER E WITH ACUTE
self.rewrite_hash[u'\xEA'] = "e" # LATIN SMALL LETTER E WITH CIRCUMFLEX
self.rewrite_hash[u'\xEB'] = "e" # LATIN SMALL LETTER E WITH DIAERESIS
self.rewrite_hash[u'\xEC'] = "i" # LATIN SMALL LETTER I WITH GRAVE
self.rewrite_hash[u'\xED'] = "i" # LATIN SMALL LETTER I WITH ACUTE
self.rewrite_hash[u'\xEE'] = "i" # LATIN SMALL LETTER I WITH CIRCUMFLEX
self.rewrite_hash[u'\xEF'] = "i" # LATIN SMALL LETTER I WITH DIAERESIS
self.rewrite_hash[u'\xF0'] = "th" # LATIN SMALL LETTER ETH
self.rewrite_hash[u'\xF1'] = "n" # LATIN SMALL LETTER N WITH TILDE
self.rewrite_hash[u'\xF2'] = "o" # LATIN SMALL LETTER O WITH GRAVE
self.rewrite_hash[u'\xF3'] = "o" # LATIN SMALL LETTER O WITH ACUTE
self.rewrite_hash[u'\xF4'] = "o" # LATIN SMALL LETTER O WITH CIRCUMFLEX
self.rewrite_hash[u'\xF5'] = "o" # LATIN SMALL LETTER O WITH TILDE
self.rewrite_hash[u'\xF6'] = "o" # LATIN SMALL LETTER O WITH DIAERESIS
self.rewrite_hash[u'\xF7'] = " divided by " # DIVISION SIGN
self.rewrite_hash[u'\xF8'] = "o" # LATIN SMALL LETTER O WITH STROKE
self.rewrite_hash[u'\xF9'] = "u" # LATIN SMALL LETTER U WITH GRAVE
self.rewrite_hash[u'\xFA'] = "u" # LATIN SMALL LETTER U WITH ACUTE
self.rewrite_hash[u'\xFB'] = "u" # LATIN SMALL LETTER U WITH CIRCUMFLEX
self.rewrite_hash[u'\xFC'] = "u" # LATIN SMALL LETTER U WITH DIAERESIS
self.rewrite_hash[u'\xFD'] = "y" # LATIN SMALL LETTER Y WITH ACUTE
self.rewrite_hash[u'\xFE'] = "th" # LATIN SMALL LETTER THORN
self.rewrite_hash[u'\xFF'] = "y" # LATIN SMALL LETTER Y WITH DIAERESIS
self.rewrite_hash[u'\u0100'] = "A" # LATIN CAPTIAL LETTER A WITH MACRON
self.rewrite_hash[u'\u0101'] = "a" # LATIN SMALL LETTER A WITH MACRON
self.rewrite_hash[u'\u0102'] = "A" # LATIN CAPITAL LETTER A WITH BREVE
self.rewrite_hash[u'\u0103'] = "a" # LATIN SMALL LETTER A WITH BREVE
self.rewrite_hash[u'\u0104'] = "A" # LATIN CAPITAL LETTER A WITH OGONEK
self.rewrite_hash[u'\u0105'] = "a" # LATIN SMALL LETTER A WITH OGONEK
self.rewrite_hash[u'\u0106'] = "C" # LATIN CAPITAL LETTER C WITH ACUTE
self.rewrite_hash[u'\u0107'] = "c" # LATIN SMALL LETTER C WITH ACUTE
self.rewrite_hash[u'\u0108'] = "C" # LATIN CAPITAL LETTER C WITH CIRCUMFLEX
self.rewrite_hash[u'\u0109'] = "c" # LATIN SMALL LETTER C WITH CIRCUMFLEX
self.rewrite_hash[u'\u010A'] = "C" # LATIN CAPITAL LETTER C WITH DOT ABOVE
self.rewrite_hash[u'\u010B'] = "c" # LATIN SMALL LETTER C WITH DOT ABOVE
self.rewrite_hash[u'\u010C'] = "C" # LATIN CAPITAL LETTER C WITH CARON
self.rewrite_hash[u'\u010D'] = "c" # LATIN SMALL LETTER C WITH CARON
self.rewrite_hash[u'\u010E'] = "D" # LATIN CAPITAL LETTER D WITH CARON
self.rewrite_hash[u'\u010F'] = "d" # LATIN SMALL LETTER D WITH CARON
self.rewrite_hash[u'\u0110'] = "D" # LATIN CAPITAL LETTER D WITH STROKE
self.rewrite_hash[u'\u0111'] = "d" # LATIN SMALL LETTER D WITH STROKE
self.rewrite_hash[u'\u0112'] = "E" # LATIN CAPITAL LETTER E WITH MACRON
self.rewrite_hash[u'\u0113'] = "e" # LATIN SMALL LETTER E WITH MACRON
self.rewrite_hash[u'\u0114'] = "E" # LATIN CAPITAL LETTER E WITH BREVE
self.rewrite_hash[u'\u0115'] = "e" # LATIN SMALL LETTER E WITH BREVE
self.rewrite_hash[u'\u0116'] = "E" # LATIN CAPITAL LETTER E WITH DOT ABOVE
self.rewrite_hash[u'\u0117'] = "e" # LATIN SMALL LETTER E WITH DOT ABOVE
self.rewrite_hash[u'\u0118'] = "E" # LATIN CAPITAL LETTER E WITH OGONEK
self.rewrite_hash[u'\u0119'] = "e" # LATIN SMALL LETTER E WITH OGONEK
self.rewrite_hash[u'\u011A'] = "E" # LATIN CAPITAL LETTER E WITH CARON
self.rewrite_hash[u'\u011B'] = "e" # LATIN SMALL LETTER E WITH CARON
self.rewrite_hash[u'\u011C'] = "G" # LATIN CAPITAL LETTER G WITH CIRCUMFLEX
self.rewrite_hash[u'\u011D'] = "g" # LATIN SMALL LETTER G WITH CIRCUMFLEX
self.rewrite_hash[u'\u011E'] = "G" # LATIN CAPITAL LETTER G WITH BREVE
self.rewrite_hash[u'\u011F'] = "g" # LATIN SMALL LETTER G WITH BREVE
self.rewrite_hash[u'\u0120'] = "G" # LATIN CAPITAL LETTER G WITH DOT ABOVE
self.rewrite_hash[u'\u0121'] = "g" # LATIN SMALL LETTER G WITH DOT ABOVE
self.rewrite_hash[u'\u0122'] = "G" # LATIN CAPITAL LETTER G WITH CEDILLA
self.rewrite_hash[u'\u0123'] = "g" # LATIN SMALL LETTER G WITH CEDILLA
self.rewrite_hash[u'\u0124'] = "H" # LATIN CAPITAL LETTER H WITH CIRCUMFLEX
self.rewrite_hash[u'\u0125'] = "h" # LATIN SMALL LETTER H WITH CIRCUMFLEX
self.rewrite_hash[u'\u0126'] = "H" # LATIN CAPITAL LETTER H WITH STROKE
self.rewrite_hash[u'\u0127'] = "h" # LATIN SMALL LETTER H WITH STROKE
self.rewrite_hash[u'\u0128'] = "I" # LATIN CAPITAL LETTER I WITH TILDE
self.rewrite_hash[u'\u0129'] = "i" # LATIN SMALL LETTER I WITH TILDE
self.rewrite_hash[u'\u012A'] = "I" # LATIN CAPITAL LETTER I WITH MACRON
self.rewrite_hash[u'\u012B'] = "i" # LATIN SMALL LETTER I WITH MACRON
self.rewrite_hash[u'\u012C'] = "I" # LATIN CAPITAL LETTER I WITH BREVE
self.rewrite_hash[u'\u012D'] = "i" # LATIN SMALL LETTER I WITH BREVE
self.rewrite_hash[u'\u012E'] = "I" # LATIN CAPITAL LETTER I WITH OGONEK
self.rewrite_hash[u'\u012F'] = "i" # LATIN SMALL LETTER I WITH OGONEK
self.rewrite_hash[u'\u0130'] = "I" # LATIN CAPITAL LETTER I WITH DOT ABOVE
self.rewrite_hash[u'\u0131'] = "i" # LATIN SMALL LETTER DOTLESS I
self.rewrite_hash[u'\u0132'] = "IJ" # LATIN CAPITAL LIGATURE IJ
self.rewrite_hash[u'\u0133'] = "ij" # LATIN SMALL LIGATURE IJ
self.rewrite_hash[u'\u0134'] = "J" # LATIN CAPITAL LETTER J WITH CIRCUMFLEX
self.rewrite_hash[u'\u0135'] = "j" # LATIN SMALL LETTER J WITH CIRCUMFLEX
self.rewrite_hash[u'\u0136'] = "K" # LATIN CAPITAL LETTER K WITH CEDILLA
self.rewrite_hash[u'\u0137'] = "k" # LATIN SMALL LETTER K WITH CEDILLA
self.rewrite_hash[u'\u0138'] = "k" # LATIN SMALL LETTER KRA
self.rewrite_hash[u'\u0139'] = "L" # LATIN CAPITAL LETTER L WITH ACUTE
self.rewrite_hash[u'\u013A'] = "l" # LATIN SMALL LETTER L WITH ACUTE
self.rewrite_hash[u'\u013B'] = "L" # LATIN CAPITAL LETTER L WITH CEDILLA
self.rewrite_hash[u'\u013C'] = "l" # LATIN SMALL LETTER L WITH CEDILLA
self.rewrite_hash[u'\u013D'] = "L" # LATIN CAPITAL LETTER L WITH CARON
self.rewrite_hash[u'\u013E'] = "l" # LATIN SMALL LETTER L WITH CARON
self.rewrite_hash[u'\u013F'] = "L" # LATIN CAPITAL LETTER L WITH MIDDLE DOT
self.rewrite_hash[u'\u0140'] = "l" # LATIN SMALL LETTER L WITH MIDDLE DOT
self.rewrite_hash[u'\u0141'] = "L" # LATIN CAPITAL LETTER L WITH STROKE
self.rewrite_hash[u'\u0142'] = "l" # LATIN SMALL LETTER L WITH STROKE
self.rewrite_hash[u'\u0143'] = "N" # LATIN CAPITAL LETTER N WITH ACUTE
self.rewrite_hash[u'\u0144'] = "n" # LATIN SMALL LETTER N WITH ACUTE
self.rewrite_hash[u'\u0145'] = "N" # LATIN CAPITAL LETTER N WITH CEDILLA
self.rewrite_hash[u'\u0146'] = "n" # LATIN SMALL LETTER N WITH CEDILLA
self.rewrite_hash[u'\u0147'] = "N" # LATIN CAPITAL LETTER N WITH CARON
self.rewrite_hash[u'\u0148'] = "n" # LATIN SMALL LETTER N WITH CARON
self.rewrite_hash[u'\u0149'] = "n" # LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
self.rewrite_hash[u'\u014A'] = "N" # LATIN CAPITAL LETTER ENG
self.rewrite_hash[u'\u014B'] = "n" # LATIN SMALL LETTER ENG
self.rewrite_hash[u'\u014C'] = "O" # LATIN CAPITAL LETTER O WITH MACRON
self.rewrite_hash[u'\u014D'] = "o" # LATIN SMALL LETTER O WITH MACRON
self.rewrite_hash[u'\u014E'] = "O" # LATIN CAPITAL LETTER O WITH BREVE
self.rewrite_hash[u'\u014F'] = "o" # LATIN SMALL LETTER O WITH BREVE
self.rewrite_hash[u'\u0150'] = "O" # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
self.rewrite_hash[u'\u0151'] = "o" # LATIN SMALL LETTER O WITH DOUBLE ACUTE
self.rewrite_hash[u'\u0152'] = "oe" # LATIN CAPITAL LIGATURE OE
self.rewrite_hash[u'\u0153'] = "oe" # LATIN SMALL LIGATURE OE
self.rewrite_hash[u'\u0153'] = "R" # LATIN CAPITAL LETTER R WITH ACUTE
self.rewrite_hash[u'\u0154'] = "R" # LATIN CAPITAL LETTER R WITH ACUTE
self.rewrite_hash[u'\u0155'] = "r" # LATIN SMALL LETTER R WITH ACUTE
self.rewrite_hash[u'\u0156'] = "R" # LATIN CAPITAL LETTER R WITH CEDILLA
self.rewrite_hash[u'\u0157'] = "r" # LATIN SMALL LETTER R WITH CEDILLA
self.rewrite_hash[u'\u0158'] = "R" # LATIN CAPITAL LETTER R WITH CARON
self.rewrite_hash[u'\u0159'] = "r" # LATIN SMALL LETTER R WITH CARON
self.rewrite_hash[u'\u015A'] = "S" # LATIN CAPITAL LETTER S WITH ACUTE
self.rewrite_hash[u'\u015B'] = "s" # LATIN SMALL LETTER S WITH ACUTE
self.rewrite_hash[u'\u015C'] = "S" # LATIN CAPITAL LETTER S WITH CIRCUMFLEX
self.rewrite_hash[u'\u015D'] = "s" # LATIN SMALL LETTER S WITH CIRCUMFLEX
self.rewrite_hash[u'\u015E'] = "S" # LATIN CAPITAL LETTER S WITH CEDILLA
self.rewrite_hash[u'\u015F'] = "s" # LATIN SMALL LETTER S WITH CEDILLA
self.rewrite_hash[u'\u0160'] = "S" # LATIN CAPITAL LETTER S WITH CARON
self.rewrite_hash[u'\u0161'] = "s" # LATIN SMALL LETTER S WITH CARON
self.rewrite_hash[u'\u0162'] = "T" # LATIN CAPITAL LETTER T WITH CEDILLA
self.rewrite_hash[u'\u0163'] = "t" # LATIN SMALL LETTER T WITH CEDILLA
self.rewrite_hash[u'\u0164'] = "T" # LATIN CAPITAL LETTER T WITH CARON
self.rewrite_hash[u'\u0165'] = "t" # LATIN SMALL LETTER T WITH CARON
self.rewrite_hash[u'\u0166'] = "T" # LATIN CAPITAL LETTER T WITH STROKE
self.rewrite_hash[u'\u0167'] = "t" # LATIN SMALL LETTER T WITH STROKE
self.rewrite_hash[u'\u0168'] = "U" # LATIN CAPITAL LETTER U WITH TILDE
self.rewrite_hash[u'\u0169'] = "u" # LATIN SMALL LETTER U WITH TILDE
self.rewrite_hash[u'\u016A'] = "U" # LATIN CAPITAL LETTER U WITH MACRON
self.rewrite_hash[u'\u016B'] = "u" # LATIN SMALL LETTER U WITH MACRON
self.rewrite_hash[u'\u016C'] = "U" # LATIN CAPITAL LETTER U WITH BREVE
self.rewrite_hash[u'\u016D'] = "u" # LATIN SMALL LETTER U WITH BREVE
self.rewrite_hash[u'\u016E'] = "U" # LATIN CAPITAL LETTER U WITH RING ABOVE
self.rewrite_hash[u'\u016F'] = "u" # LATIN SMALL LETTER U WITH RING ABOVE
self.rewrite_hash[u'\u0170'] = "U" # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
self.rewrite_hash[u'\u0171'] = "u" # LATIN SMALL LETTER U WITH DOUBLE ACUTE
self.rewrite_hash[u'\u0172'] = "U" # LATIN CAPITAL LETTER U WITH OGONEK
self.rewrite_hash[u'\u0173'] = "u" # LATIN SMALL LETTER U WITH OGONEK
self.rewrite_hash[u'\u0174'] = "W" # LATIN CAPITAL LETTER W WITH CIRCUMFLEX
self.rewrite_hash[u'\u0175'] = "w" # LATIN SMALL LETTER W WITH CIRCUMFLEX
self.rewrite_hash[u'\u0176'] = "Y" # LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
self.rewrite_hash[u'\u0177'] = "y" # LATIN SMALL LETTER Y WITH CIRCUMFLEX
self.rewrite_hash[u'\u0178'] = "Y" # LATIN CAPITAL LETTER Y WITH DIAERESIS
self.rewrite_hash[u'\u0179'] = "Z" # LATIN CAPITAL LETTER Z WITH ACUTE
self.rewrite_hash[u'\u017A'] = "z" # LATIN SMALL LETTER Z WITH ACUTE
self.rewrite_hash[u'\u017B'] = "Z" # LATIN CAPITAL LETTER Z WITH DOT ABOVE
self.rewrite_hash[u'\u017C'] = "z" # LATIN SMALL LETTER Z WITH DOT ABOVE
self.rewrite_hash[u'\u017D'] = "Z" # LATIN CAPITAL LETTER Z WITH CARON
self.rewrite_hash[u'\u017E'] = "z" # LATIN SMALL LETTER Z WITH CARON
self.rewrite_hash[u'\u017F'] = "s" # LATIN SMALL LETTER LONG S
self.rewrite_hash[u'\u0180'] = "b" # LATIN SMALL LETTER B WITH STROKE
self.rewrite_hash[u'\u0181'] = "B" # LATIN CAPITAL LETTER B WITH HOOK
self.rewrite_hash[u'\u0182'] = "B" # LATIN CAPITAL LETTER B WITH TOPBAR
self.rewrite_hash[u'\u0183'] = "b" # LATIN SMALL LETTER B WITH TOPBAR
self.rewrite_hash[u'\u0184'] = "b" # LATIN CAPITAL LETTER TONE SIX
self.rewrite_hash[u'\u0185'] = "b" # LATIN SMALL LETTER TONE SIX
self.rewrite_hash[u'\u0186'] = "O" # LATIN CAPITAL LETTER OPEN O
self.rewrite_hash[u'\u0187'] = "C" # LATIN CAPITAL LETTER C WITH HOOK
self.rewrite_hash[u'\u0188'] = "c" # LATIN SMALL LETTER C WITH HOOK
self.rewrite_hash[u'\u0189'] = "D" # LATIN CAPITAL LETTER AFRICAN D
self.rewrite_hash[u'\u018A'] = "D" # LATIN CAPITAL LETTER D WITH HOOK
self.rewrite_hash[u'\u018B'] = "d" # LATIN CAPITAL LETTER D WITH TOPBAR
self.rewrite_hash[u'\u018C'] = "d" # LATIN SMALL LETTER D WITH TOPBAR
self.rewrite_hash[u'\u018D'] = " " # LATIN SMALL LETTER TURNED DELTA
self.rewrite_hash[u'\u018E'] = " " # LATIN CAPITAL LETTER REVERSED E
self.rewrite_hash[u'\u018F'] = " " # LATIN CAPITAL LETTER SCHWA
self.rewrite_hash[u'\u0190'] = "E" # LATIN CAPITAL LETTER OPEN E
self.rewrite_hash[u'\u0191'] = "F" # LATIN CAPITAL LETTER F WITH HOOK
self.rewrite_hash[u'\u0192'] = "f" # LATIN SMALL LETTER F WITH HOOK
self.rewrite_hash[u'\u0193'] = "G" # LATIN CAPITAL LETTER G WITH HOOK
self.rewrite_hash[u'\u0194'] = " " # LATIN CAPITAL LETTER GAMMA
self.rewrite_hash[u'\u0195'] = "hv" # LATIN SMALL LETTER HV
self.rewrite_hash[u'\u0196'] = "I" # LATIN CAPITAL LETTER IOTA
self.rewrite_hash[u'\u0197'] = "I" # LATIN CAPITAL LETTER I WITH STROKE
self.rewrite_hash[u'\u0198'] = "K" # LATIN CAPITAL LETTER K WITH HOOK
self.rewrite_hash[u'\u0199'] = "k" # LATIN SMALL LETTER K WITH HOOK
self.rewrite_hash[u'\u019A'] = "l" # LATIN SMALL LETTER L WITH BAR
self.rewrite_hash[u'\u019B'] = " " # LATIN SMALL LETTER LAMBDA WITH STROKE
self.rewrite_hash[u'\u019C'] = " " # LATIN CAPITAL LETTER TURNED M
self.rewrite_hash[u'\u019D'] = "N" # LATIN CAPITAL LETTER N WITH LEFT HOOK
self.rewrite_hash[u'\u019E'] = "n" # LATIN SMALL LETTER N WITH LONG RIGHT LEG
self.rewrite_hash[u'\u019F'] = "O" # LATIN CAPITAL LETTER O WITH MIDDLE TILDE
self.rewrite_hash[u'\u0226'] = "a" # LATIN CAPITAL LETTER A WITH DOT ABOVE
self.rewrite_hash[u'\u0227'] = "a" # LATIN SMALL LETTER A WITH DOT ABOVE
self.rewrite_hash[u'\u02DC'] = " " # SMALL TILDE
self.rewrite_hash[u'\u0391'] = "A" # GREEK CAPITAL LETTER ALPHA
self.rewrite_hash[u'\u03A4'] = "T" # GREEK CAPITAL LETTER TAU
self.rewrite_hash[u'\u03A9'] = " omega " # GREEK CAPITAL LETTER OMEGA
self.rewrite_hash[u'\u03B2'] = " beta " # GREEK SMALL LETTER BETA
self.rewrite_hash[u'\u03BC'] = " mu " # GREEK SMALL LETTER MU
self.rewrite_hash[u'\u03C0'] = " pi " # GREEK SMALL LETTER PI
self.rewrite_hash[u'\u0441'] = "c" # CYRILLIC SMALL LETTER ES
self.rewrite_hash[u'\u1F7B'] = "u" # GREEK SMALL LETTER UPSILON WITH OXIA
self.rewrite_hash[u'\u1E25'] = "h" # LATIN SMALL LETTER H WITH DOT BELOW
self.rewrite_hash[u'\u1ECB'] = "i" # LATIN SMALL LETTER I WITH DOT BELOW
self.rewrite_hash[u'\u2018'] = "'" # LEFT SINGLE QUOTATION MARK
self.rewrite_hash[u'\u2019'] = "'" # RIGHT SINGLE QUOTATION MARK
self.rewrite_hash[u'\u201A'] = " " # SINGLE LOW-9 QUOTATION MARK
self.rewrite_hash[u'\u201C'] = " " # LEFT DOUBLE QUOTATION MARK
self.rewrite_hash[u'\u201D'] = " " # RIGHT DOUBLE QUOTATION MARK
self.rewrite_hash[u'\u201E'] = " " # DOUBLE LOW-9 QUOTATION MARK
self.rewrite_hash[u'\u201F'] = " " # OUBLE HIGH-REVERSED-9 QUOTATION MARK
self.rewrite_hash[u'\u2032'] = "\'" # PRIME
self.rewrite_hash[u'\u2033'] = " " # DOUBLE PRIME
self.rewrite_hash[u'\u20AC'] = " euros " # EURO SIGN
self.rewrite_hash[u'\u2122'] = " " # TRADE MARK SIGN
self.rewrite_hash[u'\uFB01'] = "fi" # LATIN SMALL LIGATURE FI
self.rewrite_hash[u'\uFF00'] = " " #
return
def remove_word_punctuation (self,ln):
""" Punctuation Remover """
ln = re.sub("^(\S+)[\.\!\?]", "\g<1>", ln)
ln = re.sub("\s(\S+)[\.\!\?]", " \g<1>", ln)
ln = re.sub("(\S+)[\.\!\?]$", "\g<1>", ln)
ln = re.sub("\s[\.\!\?]\s", " ", ln)
ln = re.sub("^[\.\!\?]$", "", ln)
# Clean up extra spaces
ln = re.sub('^\s+', '', ln)
ln = re.sub('\s+$', '', ln)
ln = re.sub('\s+', ' ', ln)
return ln
def remove_twitter_meta (self,ln):
""" Twitter Metadata Remover """
# ln = re.sub(r'\#\S+', ' ', ln) # remove hashtags --old version
# ln = re.sub(r'\@\S+', ' ', ln) # remove @tags -- old version
# ln = re.sub(r'\#[a-zA-Z0-9_]+', ' ', ln) # remove hashtags
ln = re.sub(r'\@[a-zA-Z0-9_]+', ' ', ln) # remove @tags
ln = re.sub('\sRT\s', ' ', ln) # remove retweet marker
ln = re.sub('^RT\s', ' ', ln)
# Clean up extra spaces
ln = re.sub('^\s+', '', ln)
ln = re.sub('\s+$', '', ln)
ln = re.sub('\s+', ' ', ln)
return ln
def remove_nonsentential_punctuation (self,ln):
""" Remove non-sentential punctuation """
# remove '-'
ln = re.sub('^\-+', '', ln)
ln = re.sub('\-\-+', '', ln)
ln = re.sub('\s\-+', '', ln)
# remove '~'
ln = re.sub('\~', ' ', ln)
# remove standard double quotes
ln = re.sub('\"', '', ln)
# remove single quotes
ln = re.sub("^\'+", '', ln)
ln = re.sub("\'+$", '', ln)
ln = re.sub("\'+\s+", ' ', ln)
ln = re.sub("\s+\'+", ' ', ln)
ln = re.sub("\s+\`+", ' ', ln)
ln = re.sub("^\`+", ' ', ln)
# remove ':'
ln = re.sub("\:\s", " ", ln)
ln = re.sub("\:$", "", ln)
# remove ';'
ln = re.sub('\;\s', ' ', ln)
ln = re.sub('\;$', '', ln)
# remove '_'
ln = re.sub('\_+\s', ' ', ln)
ln = re.sub('^\_+', '', ln)
ln = re.sub('_+$', '', ln)
ln = re.sub('\_\_+', ' ', ln)
# remove ','
ln = re.sub('\,+([\#A-Za-z])', ' \g<1>', ln)
ln = re.sub('\,+$', ' ', ln)
ln = re.sub('\,\.\s', ' ', ln)
ln = re.sub('\,\s', ' ', ln)
# remove '*'
ln = re.sub('\s\*+', ' ', ln)
ln = re.sub('\*+\s', ' ', ln)
ln = re.sub('\*\.', ' ', ln)
ln = re.sub('\s\*+\s', ' ', ln)
ln = re.sub('^\*+', '', ln)
ln = re.sub('\*+$', '', ln)
# Keep only one '.', '?', or '!'
ln = re.sub('\?[\!\?]+', '?', ln)
ln = re.sub('\![\?\!]+', '!', ln)
ln = re.sub('\.\.+', '.', ln)
# # remove '/'
ln = re.sub('\s\/', ' ', ln)
ln = re.sub('\/\s', ' ', ln)
# remove sentence final '!' and '?'
# ln = re.sub('[\!\?]+\s*$', '', ln)
# remove other special characters
ln = re.sub('\|', ' ', ln)
ln = re.sub(r'\\', ' ', ln)
# Remove parentheses that are not part of emoticons.
# Note sure of the best way to do this, but here's a conservative
# approach.
ln = re.sub('\(([@\#A-Za-z0-9])', '\g<1>', ln)
ln = re.sub('([@\#A-Za-z0-9])\)', '\g<1> ', ln)
# Clean up extra spaces
ln = re.sub('^\s+', '', ln)
ln = re.sub('\s+$', '', ln)
ln = re.sub('\s+', ' ', ln)
return ln
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,741
|
escap-data-hub/LLString
|
refs/heads/master
|
/llstring/llstring/matching/softtfidf.py
|
#!/usr/bin/env python
# softtfidf.py
#
# Soft TF-IDF String Comparison Algorithm
#
# Copyright 2015-2016 Massachusetts Institute of Technology, Lincoln Laboratory
# version 0.1
#
# author: Charlie Dagli
# dagli@ll.mit.edu
#
# Original logic written by @drangons for the entity_resolution_spark repository:
# https://github.com/drangons/entity_resolution_spark
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Imports
import logging
import math
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import jellyfish as jf
class Softtfidf:
"""
This module implements the soft tf-idf algorithm described in:
A Comparison of String Distance Metrics for Name-Matching Tasks
Cohen et al., IJCAI 2003
"""
# Logging
LOG_LEVEL = logging.INFO
logging.basicConfig(level=LOG_LEVEL,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
logger = logging.getLogger(__name__)
def __init__(self,threshold=0.6, idf_model=None):
""" Constructor """
self.THRESHOLD = threshold
if idf_model == None:
self.LOG_IDF = None
self.CORPUS_VOCAB = None
self.OOV_IDF_VAL = None
else:
self.LOG_IDF = idf_model['idf']
self.CORPUS_VOCAB = idf_model['corpus_vocab']
self.OOV_IDF_VAL = idf_model['oov_idf_val']
def set_model(self,idf_model):
""" Set softtfidf matcher's model parameters """
# Set (or compute) IDF and corresponding vocabulary
self.LOG_IDF = idf_model['idf']
self.CORPUS_VOCAB = idf_model['corpus_vocab']
self.OOV_IDF_VAL = idf_model['oov_idf_val']
def set_threshold(self,threshold=0.6):
""" Set threshold """
self.THRESHOLD = threshold
def compute_VwS(self,s):
""" Compute V(w,S) as defined by Cohen et al.'s IJCAI03 paper """
# Get term-frequency vectors and vocab list for string
cv = CountVectorizer(min_df = 0.0, token_pattern=u'(?u)\\b\\w+\\b')
tf = cv.fit_transform([s]); tf = tf.tocsr()
vocab = cv.vocabulary_
# Compute V(w,S) for string
vprime_ws = dict()
vprime_ws_norm = 0
for w in vocab:
if w in self.CORPUS_VOCAB:
vprime_ws[w] = math.log(tf[0,vocab[w]]+1)*self.LOG_IDF[self.CORPUS_VOCAB[w]]
else:
vprime_ws[w] = math.log(tf[0,vocab[w]]+1)*self.OOV_IDF_VAL #if not in vocab, defauly to OOC_IDF_VAL
vprime_ws_norm += vprime_ws[w]**2
vprime_ws_norm = math.sqrt(vprime_ws_norm)
return (vocab,vprime_ws,vprime_ws_norm)
def score(self,s,t):
""" Returns the soft tf-idf similarity """
# Check to see whether a model exists; otherwise default to degenerate solution
if (self.LOG_IDF is None) | (self.CORPUS_VOCAB is None) | (self.OOV_IDF_VAL is None):
self.logger.info("Either (or both) IDF or corpus vocabulary parameters not given "
+"Defaulting to degenerate mode where corpus consists only of the "
+"two strings given as input.");
self.compute_query_idf([s,t])
# Get V(w,S) and V(w,T) (along with vocab lists for s and t)
try:
(s_vocab,vprime_ws,vprime_ws_norm) = self.compute_VwS(s)
(t_vocab,vprime_wt,vprime_wt_norm) = self.compute_VwS(t)
except ValueError:
self.logger.info("string got stop-listed; most likely b/c " \
"it is of length 1, with the only character being a " \
"non-normalized punctuation mark. (i.e. '.')")
sim = 0.0
return sim
#compute D(w,T) for all w
max_vT = dict()
jw_sims = dict()
for w in s_vocab:
max_vT[w] = dict(); max_vT[w]['score'] = 0.0; max_vT[w]['max_v'] = '';
jw_sims[w] = dict()
for v in t_vocab:
dist = jf.jaro_winkler(w,v)
jw_sims[w][v] = dist
if (dist >= max_vT[w]['score']):
max_vT[w]['score'] = dist
max_vT[w]['max_v'] = v
self.logger.debug("max_vT: {0}".format(max_vT))
# compute soft tf-idf sim
sim = 0.0
self.logger.debug(s_vocab)
for w in s_vocab:
for v in t_vocab:
if (jw_sims[w][v] >= self.THRESHOLD):
inner_sum = (vprime_ws[w]/vprime_ws_norm)*(vprime_wt[max_vT[w]['max_v']]/vprime_wt_norm)*max_vT[w]['score']
self.logger.debug(u"(w,vprime_ws[w],vprime_ws_norm): ({0},{1},{2})".format(w,vprime_ws[w],vprime_ws_norm))
self.logger.debug(u"(max_vT[w]['max_v'],vprime_wt[max_vT['max_v'],vprime_wt_norm): ({0},{1},{2})".format(max_vT[w]['max_v'],vprime_wt[max_vT[w]['max_v']],vprime_wt_norm))
self.logger.debug(u"(max_vT[w]['score']): ({0})".format(max_vT[w]['score']))
self.logger.debug(u"(w,v,inner_sum): ({0},{1},{2})".format(w,v,inner_sum))
sim += inner_sum
break
self.logger.debug("Soft TF-IDF Similarity: {0}".format(sim))
return sim
def compute_query_idf(self,corpus):
""" Compute IDF from s and t in case you have no externally computed IDF to use """
cv = CountVectorizer(min_df = 0.0)
cv.fit_transform(corpus)
self.logger.debug(cv.vocabulary_)
freq_term_matrix = cv.transform(corpus)
tfidf = TfidfTransformer(norm="l2")
tfidf.fit(freq_term_matrix)
log_idf = tfidf.idf_
self.LOG_IDF = log_idf
self.CORPUS_VOCAB = cv.vocabulary_
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,742
|
escap-data-hub/LLString
|
refs/heads/master
|
/examples/idf_training_example.py
|
#! /usr/bin/env python
# idf_training_example.py
#
# Example script to learn IDF from a training corpus
#
# Copyright 2016 Massachusetts Institute of Technology, Lincoln Laboratory
# version 0.1
#
# author: Charlie Dagli
# dagli@ll.mit.edu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Imports
#
import os, logging
import numpy as np
from llstring.training import idf_trainer
#
# Logging
#
LOG_LEVEL = logging.DEBUG
logging.basicConfig(level=LOG_LEVEL,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
logger = logging.getLogger(__name__)
def idf_display(idft,N=10):
""" Print tokens with highest and lowest IDF
Anecdotally, high IDF corresponds to unames being used as fullnames
Input: idft: trained IDFTrainer instance
N: Number of tokens to print
"""
index2name = dict()
for name in idft.CORPUS_VOCAB.keys():
index2name[idft.CORPUS_VOCAB[name]] = name
logger.info("")
logger.info("TOKENS CORRESPONDING TO LOWEST IDF VALUES")
logger.info("=========================================")
low_idf_inds = np.argsort(idft.LOG_IDF)
for i in range(0,N):
logger.info("{0},{1}".format(index2name[low_idf_inds[i]],idft.LOG_IDF[low_idf_inds[i]]))
logger.info("")
logger.info("TOKENS CORRESPONDING TO HIGHEST IDF VALUES")
logger.info("==========================================")
high_idf_inds = np.argsort(-idft.LOG_IDF)
for i in range(0,N):
logger.info("{0},{1}".format(index2name[high_idf_inds[i]],idft.LOG_IDF[high_idf_inds[i]]))
logger.info("")
if __name__ == "__main__":
# Input and Output Filenames
exampledir = os.path.dirname(os.path.realpath(__file__))
fnamein = os.path.join(exampledir,"data/input/idf_training_data.txt")
fnameout = os.path.join(exampledir,"data/output/models/english_socialmedia_idf.pckl")
#
# Train IDF from file handle
# (i.e. for large training sets)
#
idft = idf_trainer.IDFTrainer()
idft.compute_idf(open(fnamein,"r"))
idft.save_model(fnameout)
idf_display(idft,20)
#
# Train IDF from python list instance
# (i.e. for training sets that can fit in memory)
#
# load-in training data
training_data = list()
fo = open(fnamein,"r")
for line in fo: training_data.append(line.rstrip())
fo.close()
# compute IDF
idft = idf_trainer.IDFTrainer()
idft.compute_idf(training_data)
idft.save_model(fnameout)
idf_display(idft,20)
|
{"/llstring/llstring/training/__init__.py": ["/llstring/llstring/training/idf_trainer.py"], "/llstring/llstring/matching/__init__.py": ["/llstring/llstring/matching/mitll_string_matcher.py", "/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/matching/mitll_string_matcher.py": ["/llstring/llstring/matching/softtfidf.py"], "/llstring/llstring/utilities/normalization/__init__.py": ["/llstring/llstring/utilities/normalization/text_normalization.py", "/llstring/llstring/utilities/normalization/latin_normalization.py"], "/llstring/llstring/utilities/sampling/__init__.py": ["/llstring/llstring/utilities/sampling/reservoir_sampler.py"], "/llstring/llstring/utilities/normalization/latin_normalization.py": ["/llstring/llstring/utilities/normalization/text_normalization.py"]}
|
15,757
|
pg815/Fake_News_Prediction_And_Summarization
|
refs/heads/main
|
/summa/preprocessing/util.py
|
def suffix_replace(original, old, new):
"""
Replaces the old suffix of the original string by a new suffix
"""
return original[: -len(old)] + new
def prefix_replace(original, old, new):
"""
Replaces the old prefix of the original string by a new suffix
:param original: string
:param old: string
:param new: string
:return: string
"""
return new + original[len(old) :]
|
{"/app.py": ["/newsscraper.py"], "/newsscraper.py": ["/model.py"]}
|
15,758
|
pg815/Fake_News_Prediction_And_Summarization
|
refs/heads/main
|
/app.py
|
from flask import Flask,render_template
from newsscraper import get_news,get_titles
app = Flask("__WorldTime__")
@app.route("/")
def root():
channels = get_news()
titles = get_titles()
return render_template("index.html",channels = channels,titles = titles)
app.run(host='0.0.0.0')
|
{"/app.py": ["/newsscraper.py"], "/newsscraper.py": ["/model.py"]}
|
15,759
|
pg815/Fake_News_Prediction_And_Summarization
|
refs/heads/main
|
/model.py
|
from getEmbeddings import getEmbeddings
from sklearn.naive_bayes import GaussianNB
import scikitplot.plotters as skplt
from sklearn.svm import SVC
import numpy as np
import pickle
import os
class Models:
def __init__(self):
if not os.path.isfile('./xtr.npy') or \
not os.path.isfile('./xte.npy') or \
not os.path.isfile('./ytr.npy') or \
not os.path.isfile('./yte.npy'):
xtr, xte, ytr, yte = getEmbeddings("datasets/train.csv")
np.save('./xtr', xtr)
np.save('./xte', xte)
np.save('./ytr', ytr)
np.save('./yte', yte)
self.xtr = np.load('./xtr.npy')
self.xte = np.load('./xte.npy')
self.ytr = np.load('./ytr.npy')
self.yte = np.load('./yte.npy')
def train_svc_classifier(self):
clf = SVC()
clf.fit(self.xtr, self.ytr)
#pickle.dump(clf, open("moddel.sav", "wb"))
y_pred = clf.predict(self.xte)
m = self.yte.shape[0]
n = (self.yte != y_pred).sum()
print("Accuracy of Support Vector Machine Classifier = " + format((m - n) / m * 100, '.2f') + "%")
def train_nb_classifier(self):
gnb = GaussianNB()
gnb.fit(self.xtr, self.ytr)
y_pred = gnb.predict(self.xte)
m = self.yte.shape[0]
n = (self.yte != y_pred).sum()
print("Accuracy of Gaussian Naive Bayes Classifier = " + format((m - n) / m * 100, '.2f') + "%") # 72.94%
def predict_truthfullness(self,news):
load_model = pickle.load(open('model.sav', 'rb'))
prediction = load_model.predict([news])
prob = load_model.predict_proba([news])
return ["The given statement is " + str(prediction[0]),"The truth probability score is " + str(prob[0][1])]
if __name__ == "__main__":
model = Models()
# model.train_svc_classifier()
# model.train_nb_classifier()
result = model.predict_truthfullness("obama is running for president in 2022")
print(result[0])
print(result[1])
|
{"/app.py": ["/newsscraper.py"], "/newsscraper.py": ["/model.py"]}
|
15,760
|
pg815/Fake_News_Prediction_And_Summarization
|
refs/heads/main
|
/newsscraper.py
|
import sys
import json
from time import mktime
from datetime import datetime
import feedparser as fp
import newspaper
from newspaper import Article
from model import Models
from summarizers import summarize_textrank,summarize_tfidf,summarize_wf
data = {}
data["newspapers"] = {}
model = Models()
def parse_config(fname):
# Loads the JSON files with news sites
with open(fname, "r") as data_file:
cfg = json.load(data_file)
for company, value in cfg.items():
if "link" not in value:
raise ValueError(f"Configuration item {company} missing obligatory 'link'.")
return cfg
def _handle_rss(company, value, count, limit):
fpd = fp.parse(value["rss"])
print(f"Downloading articles from {company}")
news_paper = {"rss": value["rss"], "link": value["link"], "articles": []}
for entry in fpd.entries:
if not hasattr(entry, "published"):
continue
if count > limit:
break
article = {}
article["link"] = entry.link
date = entry.published_parsed
article["published"] = datetime.fromtimestamp(mktime(date)).isoformat()
try:
content = Article(entry.link)
content.download()
content.parse()
except Exception as err:
print(err)
print("continuing...")
continue
article["title"] = content.title
article["text"] = content.text
news_paper["articles"].append(article)
print(f"{count} articles downloaded from {company}, url: {entry.link}")
count = count + 1
return count, news_paper
def _handle_fallback(company, value, count, limit):
print(f"Building site for {company}")
paper = newspaper.build(value["link"], memoize_articles=False)
news_paper = {"link": value["link"], "articles": []}
none_type_count = 0
for content in paper.articles:
if count > limit:
break
try:
content.download()
content.parse()
except Exception as err:
print(err)
print("continuing...")
continue
if content.publish_date is None:
print(f"{count} Article has date of type None...")
none_type_count = none_type_count + 1
if none_type_count > 10:
print("Too many noneType dates, aborting...")
none_type_count = 0
break
count = count + 1
continue
article = {
"title": content.title,
"text": content.text,
"link": content.url,
"published": content.publish_date.isoformat(),
}
news_paper["articles"].append(article)
print(
f"{count} articles downloaded from {company} using newspaper, url: {content.url}"
)
count = count + 1
none_type_count = 0
return count, news_paper
def run(config, limit=4):
for company, value in config.items():
count = 1
if "rss" in value:
count, news_paper = _handle_rss(company, value, count, limit)
else:
count, news_paper = _handle_fallback(company, value, count, limit)
data["newspapers"][company] = news_paper
return data
def main():
try:
config = parse_config("NewsPapers.json")
except Exception as err:
sys.exit(err)
return run(config, limit=3)
def get_image(image_link):
url= image_link
article = Article(url, language="en") # en for English
article.download()
article.parse()
article.nlp()
return article.top_image
def get_news():
data = main()
bbcNews = data['newspapers']['bbc']['articles']
cnnNews = data['newspapers']['cnn']['articles']
foxNews = data['newspapers']['foxnews']['articles']
nytimesNews = data['newspapers']['nytimes_international']['articles']
washingtonpostNews = data['newspapers']['washingtonpost']['articles']
channels = [bbcNews, cnnNews, foxNews, nytimesNews, washingtonpostNews]
cnt1 = 1;cnt2 = 2
for channel in channels:
for news in channel:
cnt2 += cnt1
link = news['link']
news["img_link"] = get_image(link)
news["full_text"] = news['text']
updated_text = news['text'].splitlines()
news['text'] = updated_text[0]
news['index'] = cnt2
cnt1 +=1
get_summaries(channels)
return channels
def get_summaries(channels):
for channel in channels:
for news in channel:
news['textrank'] = summarize_textrank(news['full_text'])
news['tf_idf'] = summarize_tfidf(news['full_text'])
news['wf'] = summarize_wf(news['full_text'])
result = model.predict_truthfullness(news['full_text'])
news['truthfullness'] = result[0]
news['truthfullnessscore'] = result[1]
def get_titles():
titles = " "
channels = get_news()
for channel in channels:
for news in channel:
titles += news['title'] + ","
return titles
if __name__ == "__main__":
print(get_news())
print(get_titles())
channels = get_news()
for channel in channels:
for news in channel:
print(f"News : {news['text']}")
print(f" Tf_idf : {news['tf_idf']}")
print(f" textrank : {news['textrank']}")
print(f" wf :{news['wf']}")
|
{"/app.py": ["/newsscraper.py"], "/newsscraper.py": ["/model.py"]}
|
15,763
|
pcaravelli-sr/milestone-2-challenge
|
refs/heads/master
|
/milestone2/merge_sort.py
|
def merge_sort(items):
"""
Uses merge sort algorithm to sort items from input list and return new list in sorted order
:param items: list of comparable items, e.g. [3, 1, 2] or ['z', 'x', 'y']
:return: a sorted list containing every item from the input list
"""
return []
|
{"/milestone2/benchmarks/benchmarks.py": ["/milestone2/merge_sort.py"]}
|
15,764
|
pcaravelli-sr/milestone-2-challenge
|
refs/heads/master
|
/milestone2/benchmarks/benchmarks.py
|
from random import randint
from time import time
from milestone2.merge_sort import merge_sort
from milestone2.insertion_sort import insertion_sort
# Dictionary of labeled sort functions. If you decide to try writing a more optimized sort
# function, you can add it in an entry in this dictionary to see how it compares to the others.
sort_functions = {
'merge_sort': merge_sort,
'insertion_sort': insertion_sort
}
def benchmark(sort_fns, list_length, num_passes):
"""
For each sorting function provided, generate a list with the specified length, then
track the time to sort it. Repeat the specified number of times, then return the average
run time for each sorting function.
Because lists with shorter lengths can be sorted extremely quickly on modern hardware,
it is helpful to take the average of many run times to avoid noisiness in the data.
:param sort_fns: dictionary of sorting functions
:param list_length: length of the list to generate that will be passed to sorting functions
:param num_passes: number of times to run each sorting function
:return: dictionary of sorting function name to average run time for given list length
"""
raise NotImplementedError('Must complete TODOs in `milestone2/benchmarks/benchmarks.py before running!')
times = {} # TODO: add entries to dictionary with the same keys as `sort_fns`, and all values as `0`
for _ in range(num_passes):
items = generate_list(list_length)
# TODO: loop over each entry in the `sort_fns` dictionary
# items_copy = list(items) # insertion sort changes input array, so work with a copy
# start = time()
# TODO: call sort function with `items_copy` argument
# end = time()
# TODO: add the time duration (end - start) to the corresponding entry in the `times` dictionary
# TODO: loop over each entry in `sort_fns` dictionary and divide the time duration by `num_passes`
return times
def generate_list(length):
"""
Generates a list of random integers between 0 and 100,000 to help with our benchmarking.
:param length: length of list to be returned
:return: list of random integers between 0 and 100,000
"""
example = randint(0, 100000)
# TODO: fill array with random numbers, up to specified `length`;
# call `randint(0, 1000000)` for each number to generate
return []
if __name__ == '__main__':
list_length = 10
num_passes = 10000
# Since this can run rather slowly, you may want to skip the last benchmark by doing:
# while num_passes >= 10
while num_passes >= 1:
results = benchmark(sort_functions, list_length, num_passes)
print('Time to sort list length %s:\n%s\n' % (list_length, results))
list_length = list_length * 10
num_passes = int(num_passes / 10)
|
{"/milestone2/benchmarks/benchmarks.py": ["/milestone2/merge_sort.py"]}
|
15,777
|
jonberliner/jordan_e
|
refs/heads/master
|
/rotationExperiment.py
|
from numpy import sum, concatenate, repeat, linspace, abs, ndarray, arange, mean
from numpy.random import RandomState, permutation
from numpy import array as npa
def rotationExperiment(domainbounds, rotmag, nPerXOpt,\
mindegArcPool, maxdegArcPool, nEpicycle, radwrtxArc,\
maxrotmag=None, degWhereRotIsZero=None, edgebuf=None,\
rngseed=None, blockTypes=None, agTypes=None,\
xOrigin=0.5, yOrigin=0.5):
"""FIXME: need to include the params used for making clickArcQueue:
mindegArcPool, maxdegArcPool, nEpicycle, radwrtx, xOrigin, and yOrigin.
def rotationExperiment(domainbounds, rotmag, nPerXOpt,
maxrotmag=None, degWhereRotIsZero=None, edgebuf=None,
rngseed=None, blockTypes=None, agTypes=None)
inputs:
(note: - lo* means list of *
- nparray means numpy array
- lo* can usually be an nparray of *
- float is a non-integer real scalar)
- domainbounds (lofloat): min and max of domain
- rotmag (float or lofloat): rotation magnitude
- nPerXOpt (loint): n trials for each block
- maxrotmag (float, default=None): max rotation used in this experiment
by any subject (not necessarily this sub). Useful for matching
degWhereRotIsZero between conditions, which is done randomly.
if None, maxrotmag=rotmag
- degWhereRotIsZero (float, default=None): where on the line it means
rotation equals zero. If None, will be set randomly to fall
within edgebuf of domain bounds.
- rngseed (int, default=None): random number generator seed for
matching between subjects. If None, will init rng w/o seed.
- blockTypes (lostring, default=None): tells whether each block is
'baseline' (no rotation), 'rotation', or 'counterrotation'.
If None, assumes all are rotation, and that the rotations are
explicitely provided in rotmag as a lofloat
- agTypes (lostring, default=None): whether each block is 'abrupt' or
'gradual'. If None, sets all blocks to abrupt. Last block must
always be 'abrupt' (because nothing to gradually transition to)
outputs:
- xOptQueue (nparray): optimal location in the domain for each trial"""
nBlock = len(nPerXOpt)
mindomain, maxdomain = domainbounds
# ambuiguous what rotation or counterrotation mean when multiple rots
if type(rotmag) is list: assert not blockTypes
if not degWhereRotIsZero: # random valid degWhereRotIsZero (i.e. veridical location)
if rngseed: rng = RandomState(rngseed) # use seed if given
else: rng = RandomState()
if not edgebuf: edgebuf = 0. # no edge buffer by default
if not maxrotmag: maxrotmag = rotmag
# ensure rotations will fall in within edgebuf of domain
# (wrt maxrotmag for counterbalancing b.t. groups)
good = False
while not good:
degWhereRotIsZero = rng.uniform(low=mindomain, high=maxdomain)
if degWhereRotIsZero - maxrotmag > mindomain + edgebuf:
if degWhereRotIsZero + maxrotmag < maxdomain - edgebuf:
good = True
# default rotation for all blocks (so you can pass vector of custom rots)
if not blockTypes:
blockTypes = ['rotation' for _ in xrange(nBlock)]
xOpts = []
# get xOpt for each block relative to degWhereRotIsZero
for bt in blockTypes:
basenames = ['baseline', 'base', 'b']
rotnames = ['rotation', 'rot', 'r']
crotnames = ['counterrotation', 'crot', 'c']
if bt in basenames:
xOpt = degWhereRotIsZero
elif bt in rotnames:
xOpt = degWhereRotIsZero + rotmag
elif bt in crotnames:
xOpt = degWhereRotIsZero - rotmag
else:
raise ValueError('invalid blockType name %s' % (bt))
xOpts.append(xOpt)
if not agTypes:
agTypes = ['abrupt' for _ in xrange(nBlock)]
assert len(blockTypes) == len(xOpts) == len(nPerXOpt) == len(agTypes)
xOptQueue = make_mixed_xOptQueue(xOpts, nPerXOpt, agTypes)
# get the arcline for the experiment
clickArcQueue = make_clickArcQueue(mindegArcPool, maxdegArcPool,\
nEpicycle, radwrtxArc,\
xOrigin, yOrigin)
# package in dict and ship off
experParams = {}
experParams['xOptQueue'] = xOptQueue
for ff in clickArcQueue: # extract params in clickArcQueue
experParams[ff] = clickArcQueue[ff]
return experParams
def make_mixed_xOptQueue(xOpts, nPerXOpt, agBlockTypes):
"""def make_mixed_xOptQueue(xOpts, nPerXOpt, agBlockTypes)
input:
xOpts (float): a list of optimal aim locatations
nPerXOpt (int): how many times each xOpt should be repeated
agBlockTypes (str): 'a' (abrupt) or 'g' (gradual) block
output:
xOptQueue (lofloats): opt location for each trial
"""
abruptnames = ['abrupt', 'a']
gradualnames = ['gradual', 'g']
nBlock = len(xOpts)
assert nBlock == len(nPerXOpt)
nTrial = sum(nPerXOpt)
blockqueues = []
for b in xrange(nBlock):
agThisBlock = agBlockTypes[b]
if agThisBlock in abruptnames:
blockqueue = repeat(xOpts[b], nPerXOpt[b])
elif agThisBlock in gradualnames:
blockqueue = linspace(xOpts[b], xOpts[b+1], nPerXOpt[b])
else: raise ValueError('invalid agBlockType %s' % (agThisBlock))
blockqueues.append(blockqueue)
xOptQueue = concatenate(blockqueues)
return xOptQueue
def make_abrupt_xOptQueue(xOpts, nPerXOpt):
"""def make_abrupt_xOptQueue(xOpts, nPerXOpt)
input:
xOpts (float): a list of optimal aim locatations
nPerXOpt (int): how many times each xOpt should be repeated
output:
xOptQueue (lofloats): opt location for each trial
"""
nBlock = len(xOpts)
assert nBlock == len(nPerXOpt)
nTrial = sum(nPerXOpt)
miniqueues = [repeat(xOpts[b], nPerXOpt[b]) for b in xrange(nBlock)]
xOptQueue = concatenate(miniqueues)
return xOptQueue
def make_gradual_xOptQueue(xOpts, nPerXOpt):
"""def make_abrupt_xOptQueue(xOpts, nPerXOpt)
input:
xOpts (float): a list of optimal aim locatations
nPerXOpt (int): how many steps to move from xOpt[i] to xOpt[i+1]
for final block, nPerXOpt repeats final value nPerXOpt[-1] times
output:
xOptQueue (lofloats): opt location for each trial
"""
nBlock = len(xOpts)
assert nBlock == len(nPerXOpt)
nTrial = sum(nPerXOpt)
miniqueues = [linspace(xOpts[b], xOpts[b+1], nPerXOpt[b])
for b in xrange(nBlock)-1]
miniqueues += repeat(xOpts[-1], nPerXOpt[-1])
xOptQueue = concatenate(miniqueues)
return xOptQueue
def repeatIfScalar(thing, n):
"""def repeatIfScalar(thing, n)
input:
thing (anything): thing checking if scalar
n (int): times to repeat if scalar
output:
thing (list): thing repeated n times is scalar, else thing"""
if not hasattr(thing, "__len__"): # if not list or nparray
thing = repeat(thing, n)
return thing
def make_clickArcQueue(mindegArcPool, maxdegArcPool, nEpicycle, radwrtxArc,\
xOrigin=0.5, yOrigin=0.5):
"""make_clickArcQueue(mindegArcPool, maxdegArcPool, nEpicycle, radwrtxArc,\
xStart=0.5, yStart=0.5)
input:
- mindegArcPool (lofloat): degrees of cw-most edge of choice arc
- mindegArcPool (lofloat): degrees of ccw-most edge of choice arc
must be same size as mindegArcPool
- nEpicycle (loint): number of rand perms of mindegArcPool
- radwrtxArc (float in [0., 1.]): radius, in terms of percentage of
width (x) of screen
- xOrigin (float or lofloat in [0., 1.], default 0.5): arc origin as
percent of screen width
- yOrigin (float or lofloat in [0., 1.], default 0.5): arc origin as
percent of screen height
output:
- out w fields [mindegqueue, maxdegqueue, radwrtxqueue,
xoriginqueue, yoriginqueue],
which specify the startpoint and choice arc for every trial of
the experiment
"""
iInPool = len(mindegArcPool)
assert len(maxdegArcPool) == iInPool
radwrtxArcPool = repeatIfScalar(radwrtxArc, iInPool)
xOriginPool = repeatIfScalar(xOrigin, iInPool)
yOriginPool = repeatIfScalar(yOrigin, iInPool)
# ensure lengths all kosher
assert len(radwrtxArcPool) == iInPool
assert len(xOriginPool) == iInPool
assert len(yOriginPool) == iInPool
iDegPool = arange(iInPool)
iDegPoolQueue = concatenate([permutation(iDegPool)
for _ in xrange(nEpicycle)])
out = {}
out['mindegarcqueue'] = npa([mindegArcPool[ii] for ii in iDegPoolQueue])
out['maxdegarcqueue'] = npa([maxdegArcPool[ii] for ii in iDegPoolQueue])
out['radwrtxarcqueue'] = npa([radwrtxArcPool[ii] for ii in iDegPoolQueue])
out['xoriginqueue'] = npa([xOriginPool[ii] for ii in iDegPoolQueue])
out['yoriginqueue'] = npa([yOriginPool[ii] for ii in iDegPoolQueue])
return out
|
{"/custom.py": ["/rotationExperiment.py"]}
|
15,778
|
jonberliner/jordan_e
|
refs/heads/master
|
/custom.py
|
# this file imports custom routes into the experiment server
from flask import Blueprint, render_template, request, jsonify, Response, abort, current_app
from jinja2 import TemplateNotFound
from functools import wraps
from sqlalchemy import or_
from psiturk.psiturk_config import PsiturkConfig
from psiturk.experiment_errors import ExperimentError
from psiturk.user_utils import PsiTurkAuthorization, nocache
# # Database setup
from psiturk.db import db_session, init_db
from psiturk.models import Participant
from json import dumps, loads
# for basic experiment setup
from numpy import linspace
from numpy import array as npa
# load the configuration options
config = PsiturkConfig()
config.load_config()
config.SECREY_KEY = 'my_secret_key'
myauth = PsiTurkAuthorization(config) # if you want to add a password protect route use this
# explore the Blueprint
custom_code = Blueprint('custom_code', __name__, template_folder='templates', static_folder='static')
from rotationExperiment import rotationExperiment
## GET SUBJECT EXPERIMENT PARAMS
@custom_code.route('/init_experiment', methods=['GET'])
def init_experiment():
if not request.args.has_key('condition'):
raise ExperimentError('improper_inputs') # i don't like returning HTML to JSON requests... maybe should change this
CONDITION = int(request.args['condition'])
COUNTERBALANCE = int(request.args['counterbalance'])
## FREE VARS
# made with numpy.random.randint(4294967295, size=100) # (number is max allowed on amazon linux)
RNGSEEDPOOL =\
npa([3298170796, 2836114699, 599817272, 4120600023, 2084303722,
3397274674, 422915931, 1268543322, 4176768264, 3919821713,
1110305631, 1751019283, 2477245129, 658114151, 3344905605,
1041401026, 232715019, 326334289, 2686111309, 2708714477,
737618720, 1961563934, 2498601877, 210792284, 474894253,
4028779193, 237326432, 3676530999, 529417255, 3092896686,
169403409, 2615770170, 1339086861, 3757383830, 2082288757,
4170457367, 371267289, 3248256753, 1696640091, 2779201988,
492501592, 2278560761, 2146121483, 772692697, 2009991030,
1917110505, 621292942, 1900862326, 3924210345, 2834685808,
2782250785, 3978659517, 230589819, 3266844848, 1789706566,
1926158994, 3334290749, 2564647456, 2780425615, 2453304773,
2867246165, 2853230235, 3943014068, 1849702346, 1006440977,
326290567, 779365638, 2796156127, 2850718974, 4250010213,
1627130691, 3538373920, 1807938670, 2430758838, 1678867555,
515849939, 323252975, 1062571753, 551895230, 1003551997,
902827309, 2496798931, 4165811834, 88322007, 1998993400,
3260624632, 2504021401, 915464428, 2503603945, 1138822767,
1487903826, 3534352433, 2793970570, 3696596236, 3057302268,
2924494158, 1308408238, 2181850436, 2485685726, 1958873721])
MINDEG = 0. # minumum degree of choiceSet
MAXDEG = 360. # max degree of choiceSet
RANGEDEG = MAXDEG - MINDEG
ROTMAGPOOL = npa([15., 30., 45., 60.]) # proxy for 15, 30, 45, 60 degree rots
ROTMAG = ROTMAGPOOL[CONDITION]
NPERXOPT = [90, 40, 40, 90] # how many trials per block?
NTRIAL = sum(NPERXOPT) # total number trials in experiment
MAXROTMAG = 60. # maximum rotation considered for these experiments
DEGWHEREROTISZERO = None # if none, will be random
EDGEBUF = 10. # random degWhereRotIsZero will be between [MINDOMAIN+EDGEBUF, MAXDOMAIN-EDGEBUF]
RNGSEED = RNGSEEDPOOL[COUNTERBALANCE]
# 'b' for base, 'r' for rot, 'c' for counterrot
BLOCKTYPES = ['b', 'r', 'c', 'b']
# if None, all abrupt blocks.
# (can be explicitely written as ['a', 'a', 'a', 'a'])
AGTYPES = None
# params for make_clickArcQueue, which determines startpoint and heading ang
# NTARGET = 4
# MINDEGARCPOOL = linspace(0., 360., NTARGET+1)[:-1] # ccw-most part of choice arc
MINDEGARCPOOL = npa([0.])
NTARGET = len(MINDEGARCPOOL)
MAXDEGARCPOOL = MINDEGARCPOOL + RANGEDEG # cw-most part of choice arc
assert NTRIAL % NTARGET == 0
NEPICYCLE = NTRIAL / NTARGET # how many epicycles through each target loc
RADWRTXARC = 0.3 # percent of window width that determines dist(start, arc)
XORIGIN = 0.5 # x startpoint as percentage of window width
YORIGIN = 0.5 # y startpoint as percentage of window height
experParams = {# needed for make_mixed_xOptQueue
'domainbounds': [MINDEG, MAXDEG],
'rotmag': ROTMAG,
'nPerXOpt': NPERXOPT,
'radwrtxArc': RADWRTXARC,
'maxrotmag': MAXROTMAG,
'degWhereRotIsZero': DEGWHEREROTISZERO,
'edgebuf': EDGEBUF,
'rngseed': RNGSEED,
'blockTypes': BLOCKTYPES,
'agTypes': AGTYPES,
# needed for make_clickArcQueue
'mindegArcPool': MINDEGARCPOOL,
'maxdegArcPool': MAXDEGARCPOOL,
'nEpicycle': NEPICYCLE,
'radwrtxArc': RADWRTXARC,
'xOrigin': XORIGIN,
'yOrigin': YORIGIN}
# make experiment params for this subject!
# (** means unpack and pass in params in a dict)
subParams = rotationExperiment(**experParams)
# add experiment params used on client side
MSMINTIMEINSTART = 500 # ms to spend in startpoint before choice
MSMAXTIMETOCHOICE = None
MSSHOWFEEDBACK = 1000
# must be in ['aboveStartPoint', 'clickLocation']
FEEDBACKTYPE = 'aboveStartPoint'
SDPERCENTRADWIGGLEFB = 0.1 # sd for wiggling rad wrt radius of choiceArc
SDPERCENTDEGWIGGLEFB = 0.1 # sd for wiggling angle wrt RANGEDEG of choiceArc
# number of prev scores to show on arc.
# must be None if fbtype != 'clickLocation'
NLASTTOSHOW = None
# how far bt sp and arc should fb be? must be > 0
# must be None if fbtype != 'aboveStartPoint'
PERCENTBETWEENSPANDARC = 0.6
if FEEDBACKTYPE=='aboveStartPoint': assert not NLASTTOSHOW
if FEEDBACKTYPE=='clickLocation': assert not PERCENTBETWEENSPANDARC
experParams['msmintimeinstart'] = MSMINTIMEINSTART
experParams['msmaxtimetochoice'] = MSMAXTIMETOCHOICE
experParams['msshowfeedback'] = MSSHOWFEEDBACK
experParams['feedbacktype'] = FEEDBACKTYPE
experParams['nlasttoshow'] = NLASTTOSHOW
experParams['percentbetweenspandarc'] = PERCENTBETWEENSPANDARC
experParams['sdpercentradwigglefb'] = SDPERCENTRADWIGGLEFB
experParams['sdpercentdegwigglefb'] = SDPERCENTDEGWIGGLEFB
experParams['ntrial'] = NTRIAL
# bundle response to send
resp = {}
for f in subParams:
fname = f
if f == 'xOptQueue': fname = 'degoptqueue'
try:
resp[fname] = subParams[f].tolist()
except:
resp[fname] = subParams[f]
for f in experParams:
try: # convet numpy array to list if possible
resp[f] = experParams[f].tolist()
except:
resp[f] = experParams[f]
resp['inititrial'] = 0 # start at trial 0
resp['rotmag'] = ROTMAG
resp['rngseed'] = RNGSEED
resp['initscore'] = 0 # start w 0 points
resp['mindeg'] = MINDEG
resp['maxdeg'] = MAXDEG
return jsonify(**resp)
|
{"/custom.py": ["/rotationExperiment.py"]}
|
15,789
|
hemengf/my_python_lib
|
refs/heads/master
|
/door_position/batch_doorposition.py
|
import matplotlib.pyplot as plt
import processbar
import os
import subprocess
import time
from door_position.disks import *
for batchiter in range(8):
print 'processing iteration {:d}'.format(batchiter)
start = time.time()
env = Environment(boxsize=(0.6,0.4), \
lower_doorbnd=np.array([0,batchiter*0.02+0.01]), \
upper_doorbnd=np.array([0,batchiter*0.02+0.06+0.01]), \
totnum=500, \
dt=0.005, \
repel_coeff=100, \
friction_coeff=0.5, \
belt_velocity=np.array([-0.05,0]))
#env.create_disks(mass = 10, radius = 5)
env.read_positions(mass = 0.005, radius = 0.010)
#for disk in env.particle_list:
# print disk.position
totframe = 1200
passnumber_list = []
if not os.path.exists('./passnumber_door_position_v5cm'):
os.makedirs('./passnumber_door_position_v5cm')
for i in range(totframe):
env.update()
if i%3==0:
#env.visualize()
#plt.savefig('./movie32/'+'{:4.0f}'.format(i)+'.tif', dpi = 300)
#plt.close()
pass_number = env.measure_pass()
passnumber_list.append(pass_number)
#if i == 1000:
# np.save('initial_positions', env.particle_position_array)
processbar.processbar(i+1, totframe, 1)
#subprocess.call('less resultsfile.txt', shell=False)
#g = open('passnumber.txt', 'w')
#print >> g, passnumber_list
np.save('./passnumber_door_position_v5cm/passnumber_list_append {:d}'.format(batchiter), passnumber_list)
end = time.time()
print 'time consumption', end-start,'s'
#plt.plot(passnumber_list)
#plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,790
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/findroot.py
|
import scipy.optimize
def F(x):
return x[0], x[1]
def g(x):
return x-1
if __name__ == "__main__":
import numpy as np
sol = scipy.optimize.fsolve(F, np.array([1,1]))
x0 = scipy.optimize.root(g, 0)
print sol
print x0.x[0]
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,791
|
hemengf/my_python_lib
|
refs/heads/master
|
/door_position/passnumber_door_position_v5cm/data_analysis.py
|
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
iternum = 8
p = [0]*iternum
fig, ax = plt.subplots()
for i in range(iternum):
s = np.load('passnumber_list {:d}.npy'.format(i))
#ax.plot(range(len(s)), s)
pp = np.polyfit(range(len(s)),s, 1)
p[i] = pp[0]
plt.plot(range(iternum), p)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,792
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/stripes_counting.py
|
#!/usr/bin/env python
import cookb_signalsmooth
import numpy as np
import matplotlib.pyplot as plt
import sys
from find_peaks import exact_local_maxima1D, exact_local_minima1D
def stripes_counting(datafile_name):
"""
Given a 1-D array of grayscale data, find the peak number
and the valley number.
Data could be obtained by imagej grayscale measurement.
"""
pixel_values = np.loadtxt(datafile_name, skiprows = 1)
window_len = 10
smooth_values = cookb_signalsmooth.smooth(pixel_values[:,1], window_len)
plt.plot(smooth_values)
plt.plot(pixel_values[:,1])
plt.show()
s = raw_input("Is this smoothing (window_len = %d) good enough? (y/n)"%window_len)
sys.stdout.flush()
if s == "n":
unsatisfied = 1
while unsatisfied:
t = raw_input("Keep adjusting window length. New window_len = ")
window_len = int(t)
smooth_values = cookb_signalsmooth.smooth(pixel_values[:,1], window_len)
plt.plot(smooth_values)
plt.plot(pixel_values[:,1])
plt.show()
u = raw_input("Is this smoothing (window_len = %d) good enough? (y/n)"%window_len)
if u=="y":
true_values_maxima = exact_local_maxima1D(smooth_values)
maxima_number = np.sum(true_values_maxima)
true_values_minima = exact_local_minima1D(smooth_values)
minima_number = np.sum(true_values_minima)
break
elif s == "y":
true_values_maxima = exact_local_maxima1D(smooth_values)
maxima_number = np.sum(true_values_maxima)
true_values_minima = exact_local_minima1D(smooth_values)
minima_number = np.sum(true_values_minima)
else:
print "You didn't press anything..."
return maxima_number, minima_number
if __name__ == "__main__":
import os
import sys
s = ""
while not os.path.exists(s+".xls"):
s = raw_input("Give me a correct data file name: ")
sys.stdout.flush()
maxima_number, minima_number = stripes_counting(s + ".xls")
print "%d maxima"%maxima_number
print "%d minima"%minima_number
raw_input('press enter')
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,793
|
hemengf/my_python_lib
|
refs/heads/master
|
/Utape.py
|
from __future__ import division
import numpy as np
import sys
dt = sys.argv[1] #0.005
while 1:
try:
intv = input('intervels(pix): ')
s = np.mean(intv)
percenterr = np.std(intv)/s
break
except Exception as e:
print e
while 1:
try:
R = input('mm/pix ratio: ')
r = float(R[0])/float(R[1])
U = s*r/float(dt)
dU = percenterr*U
break
except Exception as e:
print e
print '[average intv pix', s, 'pix]'
print 'U=', U,'mm/s'
print 'dU=', dU, 'mm/s'
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,794
|
hemengf/my_python_lib
|
refs/heads/master
|
/easyprompt.py
|
import sys
from colorama import init, Fore, Style
class easyprompt:
def __init__(self):
init()
self.count = 0
def __str__(self):
self.count += 1
print(Fore.GREEN + '(%d)>>>>>>>>>>>>>>>' % self.count)
print(Style.RESET_ALL)
sys.ps1 = easyprompt()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,795
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/piecewise/basinhopping_mask_foodfill_wpreprocess_bot.py
|
#!/usr/bin/env python
from __future__ import division
import sys
from scipy import interpolate
import time
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import cv2
from skimage import exposure
from scipy.optimize import basinhopping
from scipy import fftpack
from scipy import signal
from scipy.ndimage import gaussian_filter
def equalize(img_array):
"""
returns array with float 0-1
"""
img_array = img_array/(img_array.max()+1e-6)
#equalized = exposure.equalize_adapthist(img_array,kernal_size = (5,5))
equalized = exposure.equalize_hist(img_array)
#equalized = img_array/img_array.max()
return equalized
def difference(data_img, generated_img,mask_patch):
"""
both images have to be 0-1float
"""
data_img = gaussian_filter(data_img,sigma=0.3)
generated_img = gaussian_filter(generated_img, sigma=0)
diff_value = np.sum(mask_patch*(data_img-generated_img)**2)
diff_value /= (mask_patch.sum())#percentage of white area
return diff_value
def surface_polynomial(size, coeff,(zoomfactory,zoomfactorx)):
def poly(x, y):
x*=zoomfactorx
y*=zoomfactory
poly = coeff[0]*x**2+coeff[1]*y**2+coeff[2]*x*y+coeff[3]*x+coeff[4]*y+coeff[5]#+coeff[6]*x**3+coeff[7]*y**3+coeff[8]*x*y**2+coeff[9]*y*x**2
return poly
x = np.linspace(0,size[1]-1, size[1])
y = np.linspace(0,size[0]-1, size[0])
zz = poly(x[None,:],y[:,None])
return zz
def nl(coeff, data_img,(zoomfactory,zoomfactorx),mask_patch):
"""
negative likelyhood-like function; aim to minimize this
data_img has to be 0-1float
"""
height = surface_polynomial(data_img.shape,coeff,(zoomfactory,zoomfactorx))
expected= 1+ np.cos((4*np.pi/0.532)*height)
expected /= expected.max()#normalize to 0-1float
#expected = equalize(expected)
return difference(data_img, expected,mask_patch)
def accept_test(f_new,x_new,f_old,x_old):
return True
if abs(x_new[3])>0.05 or abs(x_new[4])>0.05:
return False
else:
return True
def callback(x,f,accept):
#print x[3],x[4],f,accept
pass
def find_tilequeue4(processed_tiles):
tilequeue = []
for tile in processed_tiles:
tilequeue.append((tile[0]+1,tile[1])) #right
tilequeue.append((tile[0]-1,tile[1])) #left
tilequeue.append((tile[0],tile[1]+1)) #down
tilequeue.append((tile[0],tile[1]-1)) #up
#tilequeue.append((tile[0]+1,tile[1]-1)) #upperright
#tilequeue.append((tile[0]-1,tile[1]+1)) #lowerleft
#tilequeue.append((tile[0]+1,tile[1]+1)) #lowerright
#tilequeue.append((tile[0]-1,tile[1]-1)) #upperleft
tilequeue = [tile for tile in tilequeue if tile not in processed_tiles]
return list(set(tilequeue))
def find_tilequeue8(processed_tiles):
tilequeue = []
for tile in processed_tiles:
tilequeue.append((tile[0]+1,tile[1])) #right
tilequeue.append((tile[0]-1,tile[1])) #left
tilequeue.append((tile[0],tile[1]+1)) #down
tilequeue.append((tile[0],tile[1]-1)) #up
tilequeue.append((tile[0]+1,tile[1]-1)) #upperright
tilequeue.append((tile[0]-1,tile[1]+1)) #lowerleft
tilequeue.append((tile[0]+1,tile[1]+1)) #lowerright
tilequeue.append((tile[0]-1,tile[1]-1)) #upperleft
tilequeue = [tile for tile in tilequeue if tile not in processed_tiles]
return list(set(tilequeue))
def fittile(tile, dxx,dyy,zoomfactorx, zoomfactory, data_img, mask_img,xstore, abquadrant, white_threshold):
yy = tile[0]*dyy
xx = tile[1]*dxx
data_patch = data_img[yy:yy+dyy,xx:xx+dxx]
data_patch = data_patch[::zoomfactory,::zoomfactorx]
mask_patch = mask_img[yy:yy+dyy,xx:xx+dxx]
mask_patch = mask_patch[::zoomfactory,::zoomfactorx]
data_patch= equalize(data_patch)#float0-1
white_percentage = (mask_patch.sum()/len(mask_patch.flat))
if white_percentage < white_threshold:
goodness = threshold/white_percentage
return [np.nan,np.nan,np.nan,np.nan,np.nan, np.nan],goodness, white_percentage
initcoeff_extendlist = []
if (int(yy/dyy)-1,int(xx/dxx)) in xstore:
#print 'found up'
up = xstore[(int(yy/dyy)-1,int(xx/dxx))]
initcoeff_extendlist.append(np.array([up[0],up[1],up[2],up[2]*dyy+up[3],2*up[1]*dyy+up[4],up[1]*dyy*dyy+up[4]*dyy+up[5]]))
if (int(yy/dyy)+1,int(xx/dxx)) in xstore:
#print 'found down'
up = xstore[(int(yy/dyy)+1,int(xx/dxx))]
initcoeff_extendlist.append(np.array([up[0],up[1],up[2],-up[2]*dyy+up[3],-2*up[1]*dyy+up[4],up[1]*dyy*dyy-up[4]*dyy+up[5]]))
if (int(yy/dyy),int(xx/dxx)-1) in xstore:
#print 'found left'
left = xstore[(int(yy/dyy),int(xx/dxx)-1)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],2*left[0]*dxx+left[3],left[2]*dxx+left[4],left[0]*dxx*dxx+left[3]*dxx+left[5]]))
if (int(yy/dyy),int(xx/dxx)+1) in xstore:
#print 'found right'
left = xstore[(int(yy/dyy),int(xx/dxx)+1)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],-2*left[0]*dxx+left[3],-left[2]*dxx+left[4],left[0]*dxx*dxx-left[3]*dxx+left[5]]))
if (int(yy/dyy)-1,int(xx/dxx)-1) in xstore:
#print 'found upperleft'
left = xstore[(int(yy/dyy)-1,int(xx/dxx)-1)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],2*left[0]*dxx+left[2]*dyy+left[3],left[2]*dxx+2*left[1]*dyy+left[4],left[0]*dxx*dxx+left[1]*dyy*dyy+left[2]*dxx*dyy+left[3]*dxx+left[4]*dyy+left[5]]))
if (int(yy/dyy)+1,int(xx/dxx)-1) in xstore:
#print 'found lowerleft'
left = xstore[(int(yy/dyy)+1,int(xx/dxx)-1)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],2*left[0]*dxx-left[2]*dyy+left[3],left[2]*dxx-2*left[1]*dyy+left[4],left[0]*dxx*dxx+left[1]*dyy*dyy-left[2]*dxx*dyy+left[3]*dxx-left[4]*dyy+left[5]]))
if (int(yy/dyy)+1,int(xx/dxx)+1) in xstore:
#print 'found lowerright'
left = xstore[(int(yy/dyy)+1,int(xx/dxx)+1)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],-2*left[0]*dxx-left[2]*dyy+left[3],-left[2]*dxx-2*left[1]*dyy+left[4],left[0]*dxx*dxx+left[1]*dyy*dyy+left[2]*dxx*dyy-left[3]*dxx-left[4]*dyy+left[5]]))
if (int(yy/dyy)-1,int(xx/dxx)+1) in xstore:
#print 'found upperright'
left = xstore[(int(yy/dyy)-1,int(xx/dxx)+1)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],-2*left[0]*dxx+left[2]*dyy+left[3],-left[2]*dxx+2*left[1]*dyy+left[4],left[0]*dxx*dxx+left[1]*dyy*dyy-left[2]*dxx*dyy-left[3]*dxx+left[4]*dyy+left[5]]))
"""
#######################################################
if (int(yy/dyy)-2,int(xx/dxx)) in xstore:
#print 'found up'
up = xstore[(int(yy/dyy)-2,int(xx/dxx))]
initcoeff_extendlist.append(np.array([up[0],up[1],up[2],up[2]*dyy*2+up[3],2*up[1]*dyy*2+up[4],up[1]*dyy*dyy*4+up[4]*dyy*2+up[5]]))
if (int(yy/dyy)+2,int(xx/dxx)) in xstore:
#print 'found down'
up = xstore[(int(yy/dyy)+2,int(xx/dxx))]
initcoeff_extendlist.append(np.array([up[0],up[1],up[2],-up[2]*dyy*2+up[3],-2*up[1]*dyy*2+up[4],up[1]*dyy*dyy*4-up[4]*dyy*2+up[5]]))
if (int(yy/dyy),int(xx/dxx)-2) in xstore:
#print 'found left'
left = xstore[(int(yy/dyy),int(xx/dxx)-2)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],2*left[0]*dxx*2+left[3],left[2]*dxx*2+left[4],left[0]*dxx*dxx*4+left[3]*dxx*2+left[5]]))
if (int(yy/dyy),int(xx/dxx)+2) in xstore:
#print 'found right'
left = xstore[(int(yy/dyy),int(xx/dxx)+2)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],-2*left[0]*dxx*2+left[3],-left[2]*dxx*2+left[4],left[0]*dxx*dxx*4-left[3]*dxx*2+left[5]]))
if (int(yy/dyy)-2,int(xx/dxx)-1) in xstore:
#print 'found upperleft'
left = xstore[(int(yy/dyy)-2,int(xx/dxx)-1)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],2*left[0]*dxx+left[2]*dyy*2+left[3],left[2]*dxx+2*left[1]*dyy*2+left[4],left[0]*dxx*dxx+left[1]*dyy*dyy*4+left[2]*dxx*dyy*2+left[3]*dxx+left[4]*dyy*2+left[5]]))
if (int(yy/dyy)-1,int(xx/dxx)-2) in xstore:
#print 'found upperleft'
left = xstore[(int(yy/dyy)-1,int(xx/dxx)-2)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],2*left[0]*dxx*2+left[2]*dyy+left[3],left[2]*dxx*2+2*left[1]*dyy+left[4],left[0]*dxx*dxx*4+left[1]*dyy*dyy+left[2]*dxx*2*dyy+left[3]*dxx*2+left[4]*dyy+left[5]]))
if (int(yy/dyy)+2,int(xx/dxx)-1) in xstore:
#print 'found lowerleft'
left = xstore[(int(yy/dyy)+2,int(xx/dxx)-1)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],2*left[0]*dxx-left[2]*dyy*2+left[3],left[2]*dxx-2*left[1]*dyy*2+left[4],left[0]*dxx*dxx+left[1]*dyy*dyy*4-left[2]*dxx*dyy*2+left[3]*dxx-left[4]*dyy*2+left[5]]))
if (int(yy/dyy)+1,int(xx/dxx)-2) in xstore:
#print 'found lowerleft'
left = xstore[(int(yy/dyy)+1,int(xx/dxx)-2)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],2*left[0]*dxx*2-left[2]*dyy+left[3],left[2]*dxx*2-2*left[1]*dyy+left[4],left[0]*dxx*dxx*4+left[1]*dyy*dyy-left[2]*dxx*2*dyy+left[3]*dxx*2-left[4]*dyy+left[5]]))
if (int(yy/dyy)+1,int(xx/dxx)+2) in xstore:
#print 'found lowerright'
left = xstore[(int(yy/dyy)+1,int(xx/dxx)+2)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],-2*left[0]*dxx*2-left[2]*dyy+left[3],-left[2]*dxx*2-2*left[1]*dyy+left[4],left[0]*dxx*dxx*2+left[1]*dyy*dyy+left[2]*dxx*2*dyy-left[3]*dxx*2-left[4]*dyy+left[5]]))
if (int(yy/dyy)+2,int(xx/dxx)+1) in xstore:
#print 'found lowerright'
left = xstore[(int(yy/dyy)+2,int(xx/dxx)+1)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],-2*left[0]*dxx-left[2]*dyy*2+left[3],-left[2]*dxx-2*left[1]*dyy*2+left[4],left[0]*dxx*dxx+left[1]*dyy*dyy*2+left[2]*dxx*dyy*2-left[3]*dxx-left[4]*dyy*2+left[5]]))
if (int(yy/dyy)-2,int(xx/dxx)+1) in xstore:
#print 'found upperright'
left = xstore[(int(yy/dyy)-2,int(xx/dxx)+1)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],-2*left[0]*dxx+left[2]*dyy*2+left[3],-left[2]*dxx+2*left[1]*dyy*2+left[4],left[0]*dxx*dxx+left[1]*dyy*dyy*4-left[2]*dxx*dyy*2-left[3]*dxx+left[4]*dyy*2+left[5]]))
if (int(yy/dyy)-1,int(xx/dxx)+2) in xstore:
#print 'found upperright'
left = xstore[(int(yy/dyy)-1,int(xx/dxx)+2)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],-2*left[0]*dxx*2+left[2]*dyy+left[3],-left[2]*dxx*2+2*left[1]*dyy+left[4],left[0]*dxx*dxx*2+left[1]*dyy*dyy-left[2]*dxx*2*dyy-left[3]*dxx*2+left[4]*dyy+left[5]]))
###############################################################
"""
if len(initcoeff_extendlist) > 0:
initcoeff_extend = np.mean(initcoeff_extendlist,axis=0)
initcoeff = initcoeff_extend
else: #if no touching tiles are detected, should be only for the starting tile
if abquadrant == 1:
alist = np.linspace(0, sample_size, N) # x direction
blist = np.linspace(0, sample_size, N) # y direction
if abquadrant == 2:
alist = np.linspace(-sample_size, 0, N) # x direction
blist = np.linspace(0, sample_size, N) # y direction
if abquadrant == 3:
alist = np.linspace(-sample_size, 0, N) # x direction
blist = np.linspace(-sample_size, 0, N) # y direction
if abquadrant == 4:
alist = np.linspace(0, sample_size, N) # x direction
blist = np.linspace(-sample_size, 0, N) # y direction
aa, bb = np.meshgrid(alist,blist)
nl_1storder = np.empty(aa.shape)
for i in np.arange(alist.size):
for j in np.arange(blist.size):
if (j-0.5*len(blist))**2+(i)**2<=(0.1*len(alist))**2:#remove central region to avoid 0,0 global min
nl_1storder[j,i] = np.nan
else:
nl_1storder[j,i] = nl([0,0,0,aa[j,i],bb[j,i],0],data_patch,(zoomfactory,zoomfactorx),mask_patch)
sys.stdout.write('\r%i/%i ' % (i*blist.size+j+1,alist.size*blist.size))
sys.stdout.flush()
sys.stdout.write('\n')
index = np.unravel_index(np.nanargmin(nl_1storder), nl_1storder.shape)
index = (alist[index[1]], blist[index[0]])
index = np.array(index)
initcoeff_linear= np.array([0,0,0,index[0],index[1],0])
initcoeff = initcoeff_linear
print initcoeff
iternumber = 0
while 1:
#print 'iternumber =', iternumber,'for',yy,xx
result = basinhopping(nl, initcoeff, niter = 8, T=0.01, stepsize=5e-5, interval=50,accept_test=accept_test,minimizer_kwargs={'method': 'Nelder-Mead', 'args': (data_patch,(zoomfactory,zoomfactorx), mask_patch)}, disp=False, callback=callback)
print result.fun
if result.fun <threshold:
xopt = result.x
break
else:
initcoeff = result.x
iternumber+=1
if iternumber == 5:
xopt = initcoeff_extend
break
goodness = result.fun
return xopt, goodness, white_percentage
def tilewithinbound(tile, dxx, dyy, data_img):
if tile[0]<0 or tile[1]<0:
return False
elif (tile[1]+1)*dxx>data_img.shape[1] or (tile[0]+1)*dyy>data_img.shape[0]:
return False
else:
return True
if __name__ == "__main__":
from scipy.ndimage import gaussian_filter
import time
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
from time import localtime, strftime
start = time.time()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
N = 100 #a,b value resolution; a, b linear term coeff
sample_size = 0.2 #a, b value range
abquadrant = 3
data_img = cv2.imread('sample4.tif', 0)
mask_img = cv2.imread('mask_bot_v2.tif', 0)
data_img = data_img.astype('float64')
mask_img = mask_img.astype('float64')
mask_img /= 255.
fitimg = np.copy(data_img)
xstore = {}
xstore_badtiles = {}
hstore_upperright = {}
hstore_lowerright = {}
hstore_lowerleft = {}
hstore_upperleft= {}
dyy,dxx = 81,81
threshold = 0.08
white_threshold = 0.4
startingposition = (928,2192)
startingtile = (int(startingposition[0]/dyy),int(startingposition[1]/dxx))
zoomfactory,zoomfactorx = 1,1
tilequeue = find_tilequeue8([startingtile])
tilequeue = [startingtile]+tilequeue
processed_tiles = []
bad_tiles= []
black_tiles= []
tilequeue = [tile for tile in tilequeue if tilewithinbound(tile,dxx, dyy, data_img)]
goodness_dict= {}
while any(tilequeue):
print tilequeue
# check queue for a collection of goodness and get the best tile
for tile in tilequeue:
if tile not in goodness_dict: #avoid double checking the tiles shared by the old tilequeue
print 'prechecking tile: ',tile
xopttrial, goodness,white_percentage = fittile(tile,dxx,dyy,zoomfactorx, zoomfactory, data_img, mask_img,xstore,abquadrant,white_threshold)
print 'white percentage:', white_percentage
goodness_dict[tile] = goodness
if white_percentage >= white_threshold:
if goodness <= threshold:
xstore[tile] = xopttrial
elif goodness > threshold:
bad_tiles.append(tile) #never used it
print 'bad tile:', tile
else:
black_tiles.append(tile)
print 'black tile:', tile
goodness_queue = {tile:goodness_dict[tile] for tile in tilequeue}
best_tile = min(goodness_queue,key=goodness_queue.get)
yy,xx = best_tile[0]*dyy, best_tile[1]*dxx
print 'processing best tile', (int(yy/dyy),int(xx/dxx))
processed_tiles.append((int(yy/dyy),int(xx/dxx)))#update processed tiles
tilequeue = find_tilequeue8(processed_tiles)#update tilequeue
tilequeue = [tile for tile in tilequeue if tilewithinbound(tile,dxx, dyy, data_img)]
if best_tile in black_tiles:
break
data_patch = data_img[yy:yy+dyy,xx:xx+dxx]
data_patch = data_patch[::zoomfactory,::zoomfactorx]
mask_patch = mask_img[yy:yy+dyy,xx:xx+dxx]
mask_patch = mask_patch[::zoomfactory,::zoomfactorx]
data_patch= equalize(data_patch)#float0-1
xopt, goodness, white_percentage = fittile(best_tile, dxx,dyy,zoomfactorx, zoomfactory, data_img, mask_img,xstore, abquadrant, white_threshold)
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial(data_patch.shape, xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
#plt.imshow(np.concatenate((generated_intensity,data_patch,(generated_intensity-data_patch)**2),axis=1))
#plt.show()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
if best_tile in bad_tiles:
fitimg[yy:yy+5,xx:xx+dxx] = 0
fitimg[yy+dyy-5:yy+dyy,xx:xx+dxx] = 0
fitimg[yy:yy+dyy,xx:xx+5] = 0
fitimg[yy:yy+dyy,xx+dxx-5:xx+dxx] = 0
height = surface_polynomial(data_patch.shape, xopt,(zoomfactory,zoomfactorx))
hupperright = height[0,-1]
hlowerright = height[-1,-1]
hlowerleft = height[-1,0]
hupperleft = height[0,0]
clist = []
#upperleft node
if (int(yy/dyy),int(xx/dxx)-1) in hstore_upperright:
clist.append(hstore_upperright[(int(yy/dyy),int(xx/dxx)-1)])
if (int(yy/dyy)-1,int(xx/dxx)) in hstore_lowerleft:
clist.append(hstore_lowerleft[(int(yy/dyy)-1,int(xx/dxx))])
if (int(yy/dyy)-1,int(xx/dxx)-1) in hstore_lowerright:
clist.append(hstore_lowerright[(int(yy/dyy)-1,int(xx/dxx)-1)])
#lowerleft node
if (int(yy/dyy),int(xx/dxx)-1) in hstore_lowerright:
correction_to_currentc = hstore_lowerright[(int(yy/dyy),int(xx/dxx)-1)]-hlowerleft
clist.append(xopt[5]+correction_to_currentc)
if (int(yy/dyy)+1,int(xx/dxx)-1) in hstore_upperright:
correction_to_currentc = hstore_upperright[(int(yy/dyy)+1,int(xx/dxx)-1)]-hlowerleft
clist.append(xopt[5]+correction_to_currentc)
if (int(yy/dyy)+1,int(xx/dxx)) in hstore_upperleft:
correction_to_currentc = hstore_upperleft[(int(yy/dyy)+1,int(xx/dxx))]-hlowerleft
clist.append(xopt[5]+correction_to_currentc)
#lowerright node
if (int(yy/dyy),int(xx/dxx)+1) in hstore_lowerleft:
correction_to_currentc = hstore_lowerleft[(int(yy/dyy),int(xx/dxx)+1)]-hlowerright
clist.append(xopt[5]+correction_to_currentc)
if (int(yy/dyy)+1,int(xx/dxx)+1) in hstore_upperleft:
correction_to_currentc = hstore_upperleft[(int(yy/dyy)+1,int(xx/dxx)+1)]-hlowerright
clist.append(xopt[5]+correction_to_currentc)
if (int(yy/dyy)+1,int(xx/dxx)) in hstore_upperright:
correction_to_currentc = hstore_upperright[(int(yy/dyy)+1,int(xx/dxx))]-hlowerright
clist.append(xopt[5]+correction_to_currentc)
#upperright node
if (int(yy/dyy),int(xx/dxx)+1) in hstore_upperleft:
correction_to_currentc = hstore_upperleft[(int(yy/dyy),int(xx/dxx)+1)]-hupperright
clist.append(xopt[5]+correction_to_currentc)
if (int(yy/dyy)-1,int(xx/dxx)+1) in hstore_lowerleft:
correction_to_currentc = hstore_lowerleft[(int(yy/dyy)-1,int(xx/dxx)+1)]-hupperright
clist.append(xopt[5]+correction_to_currentc)
if (int(yy/dyy)-1,int(xx/dxx)) in hstore_lowerright:
correction_to_currentc = hstore_lowerright[(int(yy/dyy)-1,int(xx/dxx))]-hupperright
clist.append(xopt[5]+correction_to_currentc)
if len(clist)>0:
#print 'clist=', clist
#if max(clist)-np.median(clist)>0.532/2:
# clist.remove(max(clist))
# print 'maxremove'
#if np.median(clist)-min(clist)>0.532/2:
# clist.remove(min(clist))
# print 'minremove'
xopt[5] = np.mean(clist)
height = surface_polynomial(data_patch.shape, xopt,(zoomfactory,zoomfactorx))
hupperright = height[0,-1]
hlowerright = height[-1,-1]
hlowerleft = height[-1,0]
hupperleft = height[0,0]
#if iternumber <20:
if 1:
#print 'coeff & corner heights stored'
xstore[(int(yy/dyy),int(xx/dxx))]=xopt
hstore_upperright[(int(yy/dyy),int(xx/dxx))] = hupperright
hstore_lowerright[(int(yy/dyy),int(xx/dxx))] = hlowerright
hstore_lowerleft[(int(yy/dyy),int(xx/dxx))] = hlowerleft
hstore_upperleft[(int(yy/dyy),int(xx/dxx))] = hupperleft
else:
xstore_badtiles[(int(yy/dyy),int(xx/dxx))]=xopt
print (int(yy/dyy),int(xx/dxx)), 'is a bad tile'
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(data_img.shape[0]-yy,data_img.shape[0]-yy-dyy,-zoomfactory))
ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1))
ax.set_aspect('equal')
plt.draw()
plt.pause(0.01)
cv2.imwrite('fitimg_bot.tif', fitimg.astype('uint8'))
print '\n'
np.save('xoptstore_bot',xstore)
#np.save('xoptstore_badtiles'+strftime("%Y%m%d_%H_%M_%S",localtime()),xstore_badtiles)
print 'time used', time.time()-start, 's'
print 'finished'
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,796
|
hemengf/my_python_lib
|
refs/heads/master
|
/saffman_taylor.py
|
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
w = 236/519
y = np.arange(-w+0.0005,w-0.0005,0.001)
plt.plot(y, ((1-w)/np.pi)*np.log((1+np.cos(np.pi*y/w))/2))
plt.axes().set_aspect('equal')
plt.xlim(-1,1)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,797
|
hemengf/my_python_lib
|
refs/heads/master
|
/concatenate.py
|
from __future__ import division
import numpy as np
import sys
def split_concatenate(img1, img2, angle, sp):
"""
Takes two pictures of (e.g. red and green) interference patterns and
concatenate them in a split screen fashion for easy comparison.
The split line is the line that passes sp===split_point and with an
inclination of angle.
"""
img1cp = np.copy(img1)
img2cp = np.copy(img2)
if img1cp.shape != img2cp.shape:
print "I can't deal with pictures of difference sizes..."
sys.exit(0)
angle = angle*np.pi/180
for j in range(img1cp.shape[1]):
ic = -np.tan(angle)*(j-sp[0])+sp[1]
for i in range(img1cp.shape[0]):
if i>=ic:
img1cp[i,j] = 0
else:
img2cp[i,j] = 0
img = np.maximum(img1cp,img2cp)
return img
if __name__ == "__main__":
"""
img1 is above img2
"""
import numpy as np
import cv2
img1 = cv2.imread('catreference.tif', 0)
img2 = cv2.imread('greenveo2_f358enhanced.tif',0)
img = split_concatenate(img1,img2, angle =96.759,\
sp=(674,175))
cv2.imwrite('catreference.tif', img)
print "Finished!"
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,798
|
hemengf/my_python_lib
|
refs/heads/master
|
/contrast.py
|
from __future__ import division
import sys
contrast='uncalculated'
if len(sys.argv)>1:
contrast = (float(sys.argv[1])-float(sys.argv[2]))/(float(sys.argv[1])+float(sys.argv[2]))
print contrast
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,799
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/check.py
|
import numpy as np
d = np.load('goodness.npy').item()
print d
print min(d, key=d.get)
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,800
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/red_amber_green/red_amber.py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
cmap = plt.get_cmap('tab10')
x = np.arange(0,20, 0.001)
red = 1+np.cos(4*np.pi*(x+0.630/4)/0.630)
amber = 1+ np.cos(4*np.pi*(x+0.59/4)/0.590)
#plt.plot(x, red+amber)
plt.title('red and amber')
plt.plot(x, red,color=cmap(3))
plt.plot(x, amber, color=cmap(1))
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,801
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/pattern_shift1D.py
|
from __future__ import division
import scipy.optimize
import scipy.spatial.distance
#from scipy.misc import derivative
import partial_derivative
import math
import sys
#@profile
def shape_function(x):
#return np.exp(-0.00002*((x+250)**2))
#return -0.000008*(x**2)+ float(sys.argv[1])
return 0.00000001*x + float(sys.argv[1])
#return 0.00000001*x +68.362
#@profile
def find_k_refracting(k_incident, x1, n1,n2):
gradient = partial_derivative.derivative(shape_function, x1, dx=1e-6)
n = np.empty((2,))
n[0] = -gradient
n[1] = 1
#print "n = ", n
#print "x1 = ", x1
norm =np.linalg.norm(n)
n = n/norm # n is the unit normal vector pointing 'upward'
c = -np.dot(n, k_incident)
r = n1/n2
sqrtterm = (1-r**2*(1-c**2))
if sqrtterm < 0:
print(Fore.RED)
print "Total internal reflection occurred."
print "1-r**2*(1-c**2) = \n", sqrtterm
print(Style.RESET_ALL)
sys.exit(0)
factor = (r*c- math.sqrt(sqrtterm))
k_refracting = r*k_incident + factor*n
#print 'c =',c
#print "factor", factor
#print "k_refracting = ", k_refracting
return k_refracting
#@profile
def find_x0(k_incident, x1, n1,n2):
# def Fx(x):
# k_refracting = find_k_refracting(k_incident, x, n1, n2)
# return k_refracting[0]*(shape_function(*x1)+shape_function(*x))+k_refracting[2]*(x1-x)[0]
# def Fy(x):
# k_refracting = find_k_refracting(k_incident, x, n1, n2)
# return k_refracting[1]*(shape_function(*x1)+shape_function(*x))+k_refracting[2]*(x1-x)[1]
# def F(x):
# return Fx(x), Fy(x)
def F(x):
k_refracting = find_k_refracting(k_incident, x, n1, n2)
return k_refracting[0]*(shape_function(x1)+shape_function(x))+k_refracting[1]*(x1-x)
#x0 = scipy.optimize.newton_krylov(F,x1,f_tol = 1e-3)
x0 = scipy.optimize.root(F,x1)
x0 = x0.x[0]
return x0
#@profile
def optical_path_diff(k_incident, x1, n1,n2):
x0 = find_x0(k_incident, x1, n1, n2)
p0 = np.empty((2,))
p1 = np.empty((2,))
p1_image_point = np.empty((2,))
p0[0] = x0
p1[0] = x1
p1_image_point[0] = x1
p0[1] = shape_function(x0)
p1[1] = shape_function(x1)
p1_image_point[1] = -shape_function(x1)
vec_x0x1 = p1-p0
norm = np.linalg.norm(vec_x0x1)
if norm == 0:
norm = 1
vec_x0x1 = vec_x0x1/norm
cos = np.dot(vec_x0x1, k_incident)
dist1 = np.linalg.norm(p0-p1)
dist2 = np.linalg.norm(p0-p1_image_point)
#print "vec_x0x1 = ", vec_x0x1
#print "cos = ", cos
#print "p0 = ", p0
#print "p1 = ", p1
#print "dist1 = ", dist1
#print "dist2 = ", dist2
OPD_part1 = dist1*cos*n1
OPD_part2 = dist2*n2
OPD = OPD_part2-OPD_part1
return OPD
#@profile
def pattern(opd):
intensity = 1+np.cos((2*np.pi/0.532)*opd)
return intensity
if __name__ == "__main__":
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.mlab import griddata
import numpy as np
import progressbar
import os
from itertools import product
import time
from colorama import Style, Fore
import find_center
import cookb_signalsmooth
start = time.time()
print "starting..."
i = 0
framenumber = 50
pltnumber = 300
pltlength = 500
center = 0
center_array = np.empty((framenumber, ))
coordinates = np.linspace(-pltlength, pltlength, pltnumber)
intensity = np.empty((pltnumber, ))
intensity2 = np.empty((pltnumber, ))
for theta in np.linspace(0.,0.0416,framenumber):
i += 1
#coordinates = np.array(list(product(np.linspace(-pltlength,pltlength,pltnumber), np.linspace(-pltlength, pltlength, pltnumber))))
q = 0
for detecting_point in coordinates:
opd = optical_path_diff(k_incident = np.array([np.sin(theta), -np.cos(theta)]),\
x1 = detecting_point,\
n1 = 1.5,\
n2 = 1)
intensity[q] = pattern(opd)
opd2= 2*68.362*np.cos(np.arcsin(1.5*np.sin(theta)))# from simple formula 2nhcos(j) for air gap for sanity check; should be close
intensity2[q] = pattern(opd2)
q+=1
#opd_expected = 2*shape_function(0)*np.cos(np.arcsin(np.sin(angle-0.0000001)*1.5)+0.0000001)
#print pattern(opd)
#print "error in OPD = " ,(opd-opd_expected)/0.532, "wavelength"
#fig = plt.figure(num=None, figsize=(8, 7), dpi=100, facecolor='w', edgecolor='k')
#np.save('intensity.npy', intensity)
#intensity_smooth = cookb_signalsmooth.smooth(intensity, 15)
#xcenter = find_center.center_position(intensity, coordinates,center)
#center = xcenter
#plt.plot(coordinates,intensity_smooth)
#plt.plot(coordinates,intensity)
#plt.show()
#center_array[i-1] = center
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('$x,\mu m$')
ax.set_ylim(0,2.5)
ax.plot(coordinates, intensity)
#ax.plot(coordinates[int(len(coordinates)/2):], intensity2[int(len(coordinates)/2):],'r') #for sanity check
ax.text(0, 2.2, r'$rotated : %.4f rad$'%theta, fontsize=15)
dirname = "./movie/"
if not os.path.exists(dirname):
os.makedirs(dirname)
plt.savefig(dirname+'{:4.0f}'.format(i)+'.tif')
plt.close()
progressbar.progressbar_tty(i, framenumber, 1)
if not os.path.exists("./output_test"):
os.makedirs("./output_test")
#np.save("./output_test/center_array_%d.npy"%int(sys.argv[1]), center_array)
print(Fore.CYAN)
print "Total running time:", time.time()-start, "seconds"
print(Style.RESET_ALL)
print "center height:", sys.argv[1]
print "Finished!"
#plt.plot(np.linspace(0,0.06, framenumber), center_array)
#plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,802
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/ffttest2.py
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
image = cv2.imread('ideal.tif',0)
print image.shape
nrows = np.shape(image)[0]
ncols = np.shape(image)[1]
ftimage = np.fft.fft2(image)
ftimage = np.fft.fftshift(ftimage)
logftimage = np.log(ftimage)
plt.imshow(np.abs(logftimage))
sigmax, sigmay = 10, 50
cy, cx = nrows/2, ncols/2
y = np.linspace(0, nrows, nrows)
x = np.linspace(0, ncols, ncols)
X, Y = np.meshgrid(x, y)
gmask = np.exp(-(((X-cx)/sigmax)**2 + ((Y-cy)/sigmay)**2))
ftimagep = ftimage * gmask
#plt.imshow(np.abs(np.log(ftimagep)))
imagep = np.fft.ifft2(ftimagep)
#plt.imshow(np.abs(imagep))
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,803
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/piecewise/plotheight_interp_whole_grayscale.py
|
from __future__ import division
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib as mpl
from scipy import interpolate
import os
data_img = cv2.imread('sample4.tif',0)
data_img = data_img.astype('float64')
cl_img = cv2.imread('cl.tif',0)
cl2_img = cv2.imread('cl2_larger.tif',0)
cl3_img = cv2.imread('cl3.tif',0)
edge_img = cv2.imread('cl_edge.tif',0)
thin_img = cv2.imread('thin.tif',0)
cl_img = cl_img.astype('float64')
cl_img /= 255.
cl2_img = cl2_img.astype('float64')
cl2_img /= 255.
cl3_img = cl3_img.astype('float64')
cl3_img /= 255.
edge_img = edge_img.astype('float64')
edge_img /= 255.
thin_img = thin_img.astype('float64')
thin_img /= 255.
fitimg_whole = np.copy(data_img)
xstorebot = np.load('./xoptstore_bot.npy').item()
xstoreright = np.load('./xoptstore_right.npy').item()
xstoreleft = np.load('./xoptstore_left.npy').item()
xstoretopright= np.load('./xoptstore_top_right.npy').item()
xstoretopleft= np.load('./xoptstore_top_left.npy').item()
floor = -86
def surface_polynomial(size, coeff,(zoomfactory,zoomfactorx)):
def poly(x, y):
x*=zoomfactorx
y*=zoomfactory
poly = coeff[0]*x**2+coeff[1]*y**2+coeff[2]*x*y+coeff[3]*x+coeff[4]*y+coeff[5]#+coeff[6]*x**3+coeff[7]*y**3+coeff[8]*x*y**2+coeff[9]*y*x**2
return poly
x = np.linspace(0,size[1]-1, size[1])
y = np.linspace(0,size[0]-1, size[0])
zz = poly(x[None,:],y[:,None])
return zz
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111,projection='3d')
ax.set_aspect(adjustable='datalim',aspect='equal')
ax.set_zlim(floor,0)
width = 0.8
xxx = []
yyy = []
zzz = []
ddd=1
#bot
dyy,dxx = 81,81
dd=15
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstorebot:
xopt = xstorebot[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy,dxx), xopt,(zoomfactory,zoomfactorx))
if ((int(yy/dyy)+1,int(xx/dxx)) not in xstorebot) or ((int(yy/dyy)-1,int(xx/dxx)) not in xstorebot):
pass
xxx+=list(X.flat[::dd])
yyy+=list(Y.flat[::dd])
zzz+=list(height.flat[::dd])
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
ax.plot_wireframe(X,Y,height,rstride=int(dyy/2),cstride=int(dxx/2),lw=width)
#ax.plot_surface(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=0,cmap = 'ocean',norm= mpl.colors.Normalize(vmin=-90,vmax=1))
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
plotheight = height-floor
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = plotheight
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
#right
dyy,dxx =int(41*np.tan(np.pi*52/180)),41
zoomfactory,zoomfactorx = 1,1
dd = 5
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if xx > 3850:
continue
if (int(yy/dyy),int(xx/dxx)) in xstoreright:
xopt = xstoreright[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy,dxx), xopt,(zoomfactory,zoomfactorx))
height-=35
xxx+=list(X.flat[::dd])
yyy+=list(Y.flat[::dd])
zzz+=list(height.flat[::dd])
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
#ax.plot_surface(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=0,cmap = 'ocean',norm= mpl.colors.Normalize(vmin=-90,vmax=1))
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
plotheight = height-floor
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = plotheight
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
#left
dyy,dxx =int(42*np.tan(np.pi*53/180)),42
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if xx>1430 or xx<332:
continue
if (int(yy/dyy),int(xx/dxx)) in xstoreleft:
xopt = xstoreleft[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=44
xxx+=list(X.flat[::dd])
yyy+=list(Y.flat[::dd])
zzz+=list(height.flat[::dd])
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
#ax.plot_surface(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=0,cmap = 'ocean',norm= mpl.colors.Normalize(vmin=-90,vmax=1))
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
plotheight = height-floor
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = plotheight
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
#topright
dyy, dxx = 35,42
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstoretopright:
xopt = xstoretopright[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=82
xxx+=list(X.flat[::dd])
yyy+=list(Y.flat[::dd])
zzz+=list(height.flat[::dd])
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
#ax.plot_surface(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=0,cmap = 'ocean',norm= mpl.colors.Normalize(vmin=-90,vmax=1))
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
plotheight = height-floor
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = plotheight
else:
pass
#topleft
dyy, dxx = 35,42
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstoretopleft:
xopt = xstoretopleft[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=80.3
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
xxx+=list(X.flat[::dd])
yyy+=list(Y.flat[::dd])
zzz+=list(height.flat[::dd])
ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
#ax.plot_surface(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=0,cmap = 'ocean',norm= mpl.colors.Normalize(vmin=-90,vmax=1))
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
plotheight = height-floor
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = plotheight
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
dyy,dxx =60,60
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if thin_img[yy,xx] == 0:
xxx.append(xx)
yyy.append(yy)
zzz.append(floor+3)
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
Z = (floor+3)*np.ones(X.shape)
Z*= 1-thin_img[yy:yy+dyy,xx:xx+dxx]
Z[Z==0] = np.nan
#ax.plot_wireframe(X,Y,Z,rstride=int(dyy/1),cstride=int(dxx/1),colors='k',lw=0.4)
if os.path.exists('./znew.npy'):
xstart,xend = 0,data_img.shape[1]
ystart,yend = 0,data_img.shape[0]
xnew,ynew = np.mgrid[xstart:xend,ystart:yend]
znew = np.load('znew.npy')
znew[znew<floor] = np.nan
znew*=(thin_img).T
znew*=(cl2_img).T
znew[znew == 0] =np.nan
znew[:,:250] = np.nan
plotheight = znew-floor
print np.nanmax(plotheight)
plotheight /= np.nanmax(plotheight)
plotheight[np.isnan(plotheight)] = 0
fitimg_whole[ystart:yend,xstart:xend] = (255*(plotheight)).T
#ax.plot_wireframe(xnew[:2132,:],ynew[:2132,:],znew[:2132,:],rstride =80, cstride = 80, colors='k',lw = 0.4)
#ax.plot_surface(xnew,ynew,znew,rstride=30,cstride=30,lw=0,cmap = 'RdBu',norm= mpl.colors.Normalize(vmin=-90,vmax=1))
else:
for i in range(0,cl_img.shape[0],ddd):
for j in range(0,cl_img.shape[1],ddd):
if cl_img[i,j] == 1:
xxx.append(j)
yyy.append(i)
zzz.append(floor)
xstart,xend = 0,data_img.shape[1]
ystart,yend = 0,data_img.shape[0]
xnew,ynew = np.mgrid[xstart:xend,ystart:yend]
print 'interpolating'
f = interpolate.bisplrep(xxx,yyy,zzz,kx=5,ky=3)
print 'finished'
znew = interpolate.bisplev(xnew[:,0],ynew[0,:],f)
znew[znew<floor] =np.nan
#znew*=(thin_img).T
znew*=(cl2_img).T
znew[znew == 0] =np.nan
#znew[:,:300] = np.nan
np.save('znew.npy',znew)
#ax.plot_wireframe(xnew,ynew,znew,rstride =60, cstride = 60, colors='k',lw = 0.4)
#bot
dyy,dxx = 81,81
dd=15
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstorebot:
xopt = xstorebot[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy,dxx), xopt,(zoomfactory,zoomfactorx))
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
plotheight = height-floor
plotheight /= 89.253
#fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*plotheight
#right
dyy,dxx =int(41*np.tan(np.pi*52/180)),41
zoomfactory,zoomfactorx = 1,1
dd = 5
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if xx > 3850:
continue
if (int(yy/dyy),int(xx/dxx)) in xstoreright:
xopt = xstoreright[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy,dxx), xopt,(zoomfactory,zoomfactorx))
height-=35
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
plotheight = height-floor
plotheight /= 89.253
#fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*plotheight
#left
dyy,dxx =int(42*np.tan(np.pi*53/180)),42
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if xx>1430 or xx<332:
continue
if (int(yy/dyy),int(xx/dxx)) in xstoreleft:
xopt = xstoreleft[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=44
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
plotheight = height-floor
plotheight /= 89.253
#fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*plotheight
#topleft
dyy, dxx = 35,42
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstoretopleft:
xopt = xstoretopleft[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=80.3
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
plotheight = height-floor
plotheight /= 89.253
#fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*plotheight
#topright
dyy, dxx = 35,42
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstoretopright:
xopt = xstoretopright[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=82
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
plotheight = height-floor
plotheight /= 89.253
#fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*plotheight
#thin
dyy,dxx =10,10
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if thin_img[yy,xx] == 0:
xxx.append(xx)
yyy.append(yy)
zzz.append(floor+5)
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
Z = (floor+3)*np.ones(X.shape)
Z*= 1-thin_img[yy:yy+dyy,xx:xx+dxx]
Z[Z==0] = np.nan
plotheight = Z-floor
plotheight /= 89
plotheight[np.isnan(plotheight)] = 0
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = (255*(plotheight)).T
#ax.plot_wireframe(X,Y,Z,rstride=int(dyy/1),cstride=int(dxx/1),colors='k',lw=0.4)
x = []
y = []
for j in range(0,cl_img.shape[1]-1,5):
for i in range(cl_img.shape[0]-1,-1,-5):
if cl_img[i,j] == 1 and i>200:
x.append(j)
y.append(i)
break
ax.plot(x,y, 'k',zs=floor)
#x_edge=[]
#y_edge=[]
#z_edge=[]
#for i in range(0,edge_img.shape[0],2):
# for j in range(0,edge_img.shape[1],2):
# if edge_img[i,j] == 1:
# x_edge.append(j)
# y_edge.append(i)
# z_edge.append(znew[j,i])
#ax.scatter(x_edge,y_edge,z_edge,c='k',s=0.01)
ax.view_init(azim=128,elev=75)
plt.axis('off')
plt.tight_layout()
cv2.imwrite('fitimg_whole.tif', fitimg_whole.astype('uint8'))
#plt.imshow(fitimg_whole.astype('uint8'),cmap='cubehelix')
#plt.contour(fitimg_whole.astype('uint8')[::-1],20)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,804
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/basinhopping_abcheck.py
|
#!/usr/bin/env python
from __future__ import division
import sys
import time
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from mpl_toolkits.axes_grid1.colorbar import colorbar
import numpy as np
import cv2
from skimage import exposure
from scipy.optimize import basinhopping
from scipy import signal
def equalize(img_array):
"""
returns array with float 0-1
"""
equalized = exposure.equalize_hist(img_array)
return equalized
def difference(data_img, generated_img):
"""
both images have to be 0-1float
"""
diff_value = np.sum((data_img-generated_img)**2)
return diff_value
def surface_polynomial(size, coeff):
def poly(x, y):
poly = coeff[0]*x**2+coeff[1]*y**2+coeff[2]*x*y+coeff[3]*x+coeff[4]*y+coeff[5]
return poly
x = np.linspace(0,size[1]-1, size[1])
y = np.linspace(0,size[0]-1, size[0])
zz = poly(x[None,:],y[:,None])
return zz
def nl(coeff, data_img):
"""
negative likelyhood-like function; aim to minimize this
data_img has to be 0-1float
"""
height = surface_polynomial(data_img.shape, coeff)
expected= 1+ np.cos(4*np.pi*height/0.532)
#expected= 1+ signal.square((4*np.pi/0.532)*height)
expected /= expected.max()#normalize to 0-1float
#expected = equalize(expected)
return difference(data_img, expected)
def surface_polynomial_dc(size, coeff,c):
def poly(x, y):
poly = coeff[0]*x**2+coeff[1]*y**2+coeff[2]*x*y+coeff[3]*x+coeff[4]*y+c/1000.
return poly
x = np.linspace(0,size[1]-1, size[1])
y = np.linspace(0,size[0]-1, size[0])
zz = poly(x[None,:],y[:, None])
return zz
def nl_dc(coeff, data_img):
"""
constant decoupled
"""
clist =range(0,int(532/4),40)#varying c term in surface_polynomial to make stripes change at least 1 cycle
difflist = [0]*len(clist)
for cindx,c in enumerate(clist):
height = surface_polynomial_dc(data_img.shape,coeff,c)
expected= 1+ np.cos(4*np.pi*height/0.532)
expected /= expected.max()#normalize to 0-1float
#expected = equalize(expected)
difflist[cindx] = difference(data_img, expected)
return min(difflist)/max(difflist)
if __name__ == "__main__":
from scipy.ndimage import gaussian_filter
import time
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
N = 50 #a,b value resolution; a, b linear term coeff
sample_size = 60#a, b value range
start = time.time()
data_img = cv2.imread('sample5.tif', 0)
fitimg = np.copy(data_img)
xstore = {}
dyy,dxx = 100,100
yy,xx = 0,0
patchysize, patchxsize = 100,100
zoomfactory,zoomfactorx = 1,1
data_patch = data_img[yy:yy+patchysize,xx:xx+patchxsize]
data_patch= gaussian_filter(data_patch,sigma=0)
data_patch = data_patch[::zoomfactory,::zoomfactorx]
data_patch= equalize(data_patch)#float0-1
alist = np.linspace(-sample_size,sample_size,2*N) # x direction
blist = np.linspace(0, sample_size,N) # y direction
#alist = np.linspace(-0.030,0.030,150) # x direction
#blist = np.linspace(-0.030,0.030,150) # y direction
aa, bb = np.meshgrid(alist,blist)
nl_1storder = np.empty(aa.shape)
for i in np.arange(alist.size):
for j in np.arange(blist.size):
if (j-0.5*len(blist))**2+(i)**2<=(0.*len(alist))**2:#remove central region to avoid 0,0 global min
nl_1storder[j,i] = np.nan
else:
nl_1storder[j,i] = nl([0,0,0,aa[j,i],bb[j,i],0],data_patch)
#nl_1storder[j,i] = nl_dc([0,0,0,aa[j,i],bb[j,i]],data_patch)
sys.stdout.write('\r%i/%i ' % (i*blist.size+j+1,alist.size*blist.size))
sys.stdout.flush()
sys.stdout.write('\n')
elapsed = time.time() - start
print "took %.2f seconds to compute the negative likelihood" % elapsed
index = np.unravel_index(np.nanargmin(nl_1storder), nl_1storder.shape)
index = (alist[index[1]], blist[index[0]])
index = np.array(index)
initcoeff_linear= np.array([0,0,0,index[0],index[1],0])
print initcoeff_linear
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial(data_patch.shape, initcoeff_linear))
generated_intensity /= generated_intensity.max()
#generated_intenity = equalize(generated_intensity)
plt.imshow(np.concatenate((generated_intensity,data_patch,(generated_intensity-data_patch)**2),axis=1))
plt.show()
nlmin = nl_1storder[~np.isnan(nl_1storder)].min()
nlmax = nl_1storder[~np.isnan(nl_1storder)].max()
fig = plt.figure()
print nl_1storder.shape
nl_1storder[np.isnan(nl_1storder)] = 0
ax = fig.add_subplot(111)
plt.tick_params(bottom='off',labelbottom='off',left='off',labelleft='off')
ax.set_aspect('equal')
print nlmin,nlmax
im = ax.imshow(nl_1storder,cmap='RdBu',norm=mpl.colors.Normalize(vmin=nlmin,vmax=nlmax))
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes('right',size='3%',pad='2%')
cbar = colorbar(im,cax = cax,ticks=[nlmin,nlmax])
#cbar.ax.set_yticklabels(['%.1fmm/s'%lowlim,'%.1fmm/s'%78,'%.1fmm/s'%highlim])
#fig = plt.figure()
#plt.contour(aa, bb, nl_1storder, 100)
#ax = fig.add_subplot(111, projection='3d')
#ax.plot_wireframe(aa,bb,nl_1storder)
#plt.ylabel("coefficient a")
#plt.xlabel("coefficient b")
#plt.gca().set_aspect('equal', adjustable = 'box')
#plt.colorbar()
plt.show()
print 'time used', time.time()-start, 's'
print 'finished'
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,805
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/basinhopping_2steps_onepiece.py
|
#!/usr/bin/env python
from __future__ import division
import sys
from scipy import interpolate
import time
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import cv2
from skimage import exposure
from scipy.optimize import basinhopping
from scipy import fftpack
from scipy import signal
def equalize(img_array):
"""
returns array with float 0-1
"""
equalized = exposure.equalize_hist(img_array)
#equalized = img_array/img_array.max()
return equalized
def difference(data_img, generated_img):
"""
both images have to be 0-1float
"""
diff_value = np.sum((data_img-generated_img)**2)
return diff_value
def surface_polynomial(size, coeff,(zoomfactory,zoomfactorx)):
def poly(x, y):
x*=zoomfactorx
y*=zoomfactory
poly = coeff[0]*x**2+coeff[1]*y**2+coeff[2]*x*y+coeff[3]*x+coeff[4]*y+coeff[5]#+coeff[6]*x**3+coeff[7]*y**3+coeff[8]*x*y**2+coeff[9]*y*x**2
return poly
x = np.linspace(0,size[1]-1, size[1])
y = np.linspace(0,size[0]-1, size[0])
zz = poly(x[None,:],y[:,None])
return zz
def nl(coeff, data_img,(zoomfactory,zoomfactorx)):
"""
negative likelyhood-like function; aim to minimize this
data_img has to be 0-1float
"""
height = surface_polynomial(data_img.shape,coeff,(zoomfactory,zoomfactorx))
expected= 1+ np.cos((4*np.pi/0.532)*height)
expected /= expected.max()#normalize to 0-1float
#expected = equalize(expected)
return difference(data_img, expected)
def accept_test(f_new,x_new,f_old,x_old):
#return True
if abs(x_new[3])>0.05 or abs(x_new[4])>0.05:
return False
else:
return True
def callback(x,f,accept):
#print x[3],x[4],f,accept
pass
if __name__ == "__main__":
from scipy.ndimage import gaussian_filter
import time
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
from time import localtime, strftime
N = 30 #a,b value resolution; a, b linear term coeff
sample_size = 0.05#a, b value range
start = time.time()
data_img = cv2.imread('sample.tif', 0)
fitimg = np.copy(data_img)
xstore = {}
xstore_badtiles = {}
hstore_upperright = {}
hstore_lowerright = {}
hstore_lowerleft = {}
dyy,dxx = 200,200
zoomfactory,zoomfactorx = 2,2
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
print 'processing', (int(yy/dyy),int(xx/dxx))
data_patch = data_img[yy:yy+dyy,xx:xx+dxx]
data_patch= gaussian_filter(data_patch,sigma=0)
data_patch = data_patch[::zoomfactory,::zoomfactorx]
data_patch= equalize(data_patch)#float0-1
initcoeff_extendlist = []
if (int(yy/dyy)-1,int(xx/dxx)) in xstore:
print 'found up'
up = xstore[(int(yy/dyy)-1,int(xx/dxx))]
initcoeff_extendlist.append(np.array([up[0],up[1],up[2],up[2]*dyy+up[3],2*up[1]*dyy+up[4],up[1]*dyy*dyy+up[4]*dyy+up[5]]))
if (int(yy/dyy),int(xx/dxx)-1) in xstore:
print 'found left'
left = xstore[(int(yy/dyy),int(xx/dxx)-1)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],2*left[0]*dxx+left[3],left[2]*dxx+left[4],left[0]*dxx*dxx+left[3]*dxx+left[5]]))
if len(initcoeff_extendlist) > 0:
initcoeff_extend = np.mean(initcoeff_extendlist,axis=0)
initcoeff = initcoeff_extend
else:
alist = np.linspace(-sample_size,sample_size,2*N) # x direction
blist = np.linspace(0, sample_size,N) # y direction
aa, bb = np.meshgrid(alist,blist)
nl_1storder = np.empty(aa.shape)
for i in np.arange(alist.size):
for j in np.arange(blist.size):
if (j-0.5*len(blist))**2+(i)**2<=(0.1*len(alist))**2:#remove central region to avoid 0,0 global min
nl_1storder[j,i] = np.nan
else:
nl_1storder[j,i] = nl([0,0,0,aa[j,i],bb[j,i],0],data_patch,(zoomfactory,zoomfactorx))
sys.stdout.write('\r%i/%i ' % (i*blist.size+j+1,alist.size*blist.size))
sys.stdout.flush()
sys.stdout.write('\n')
elapsed = time.time() - start
index = np.unravel_index(np.nanargmin(nl_1storder), nl_1storder.shape)
index = (alist[index[1]], blist[index[0]])
index = np.array(index)
initcoeff_linear= np.array([0,0,0,index[0],index[1],0])
initcoeff = initcoeff_linear
#generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial(data_patch.shape, initcoeff_linear),(zoomfactory,zoomfactorx))
#generated_intensity /= generated_intensity.max()
#plt.imshow(np.concatenate((generated_intensity,data_patch,(generated_intensity-data_patch)**2),axis=1))
#plt.show()
#initcoeff = np.array([0,0,0,0,0,0])
iternumber = 0
while 1:
print 'iternumber =', iternumber,'for',yy,xx
result = basinhopping(nl, initcoeff, niter = 50, T=100, stepsize=2e-5, interval=50,accept_test=accept_test,minimizer_kwargs={'method': 'Nelder-Mead', 'args': (data_patch,(zoomfactory,zoomfactorx))}, disp=False, callback=callback)
print result.fun
if result.fun <560:
xopt = result.x
break
else:
initcoeff = result.x
iternumber+=1
if iternumber == 20:
xopt = initcoeff_extend
break
initcoeff_extend = initcoeff_linear
#print 'using linear coefficients'
#if iternumber == 20:
# xopt = initcoeff_extend
# break
#print xopt
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial(data_patch.shape, xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
#plt.imshow(np.concatenate((generated_intensity,data_patch,(generated_intensity-data_patch)**2),axis=1))
#plt.show()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
clist = []
if (int(yy/dyy),int(xx/dxx)-1) in hstore_upperright:
print 'found upperright'
clist.append(hstore_upperright[(int(yy/dyy),int(xx/dxx)-1)])
if (int(yy/dyy)-1,int(xx/dxx)) in hstore_lowerleft:
print 'found lowerleft'
clist.append(hstore_lowerleft[(int(yy/dyy)-1,int(xx/dxx))])
if (int(yy/dyy)-1,int(xx/dxx)-1) in hstore_lowerright:
print 'found lowerright'
clist.append(hstore_lowerright[(int(yy/dyy)-1,int(xx/dxx)-1)])
if len(clist)>0:
print 'clist=', clist
if max(clist)-np.median(clist)>0.532/2:
clist.remove(max(clist))
print 'maxremove'
if np.median(clist)-min(clist)>0.532/2:
clist.remove(min(clist))
print 'minremove'
xopt[5] = np.mean(clist)
height = surface_polynomial(data_patch.shape, xopt,(zoomfactory,zoomfactorx))
hupperright = height[0,-1]
hlowerright = height[-1,-1]
hlowerleft = height[-1,0]
if iternumber <20:
print 'coeff & corner heights stored'
xstore[(int(yy/dyy),int(xx/dxx))]=xopt
hstore_upperright[(int(yy/dyy),int(xx/dxx))] = hupperright
hstore_lowerright[(int(yy/dyy),int(xx/dxx))] = hlowerright
hstore_lowerleft[(int(yy/dyy),int(xx/dxx))] = hlowerleft
else:
xstore_badtiles[(int(yy/dyy),int(xx/dxx))]=xopt
print (int(yy/dyy),int(xx/dxx)), 'is a bad tile'
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(data_img.shape[0]-yy,data_img.shape[0]-yy-dyy,-zoomfactory))
ax.plot_wireframe(X,Y,height,rstride=20,cstride=20)
ax.set_aspect('equal')
plt.draw()
plt.pause(0.01)
cv2.imwrite('fitimg.tif', fitimg.astype('uint8'))
print '\n'
np.save('xoptstore'+strftime("%Y%m%d_%H_%M_%S",localtime()),xstore)
np.save('xoptstore_badtiles'+strftime("%Y%m%d_%H_%M_%S",localtime()),xstore_badtiles)
print 'time used', time.time()-start, 's'
print 'finished'
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,806
|
hemengf/my_python_lib
|
refs/heads/master
|
/crosscenter.py
|
from __future__ import division
import numpy as np
import cv2
import matplotlib.pyplot as plt
import time
import statsmodels.api as sm
from collections import namedtuple
def roughcenter(img,ilwindow,jlwindow,i0,j0):
""" Returns icenter, jcenter only using 4 tips of the cross shape.
img needs to be blurred;
Starts from i0, j0, draws a window of height and width of lwindow, jlwindow;
Gets 4 intersections with the window edge;
Gets ic, jc by cross connecting the 4 intersection points.
"""
edge1 = img[i0-int(ilwindow/2) : i0+int(ilwindow/2), j0-int(jlwindow/2)]
indx = np.argmin(edge1)
i1, j1 = i0-int(ilwindow/2)+indx, j0-int(jlwindow/2)
x1, y1 = j1,i1
edge2 = img[i0-int(ilwindow/2) , j0-int(jlwindow/2) : j0+int(jlwindow/2)]
indx = np.argmin(edge2)
i2, j2 = i0-int(ilwindow/2), j0-int(jlwindow/2)+indx
x2, y2 = j2,i2
edge3 = img[i0-int(ilwindow/2) : i0+int(ilwindow/2) , j0+int(jlwindow/2)]
indx = np.argmin(edge3)
i3, j3 = i0-int(ilwindow/2)+indx, j0+int(jlwindow/2)
x3, y3 = j3,i3
edge4 = img[i0+int(ilwindow/2) ,j0-int(jlwindow/2) : j0+int(jlwindow/2)]
indx = np.argmin(edge4)
i4, j4 = i0+int(ilwindow/2), j0-int(jlwindow/2)+indx
x4, y4 = j4,i4
if (x2 == x4) or (y1 == y3):
xc = x2
yc = y1
else:
s13 = (y3-y1)/(x3-x1)
s24 = (y4-y2)/(x4-x2)
yc = (s13*s24*(x2-x1) + s24*y1-s13*y2)/(s24-s13)
xc = (yc-y1)/s13+x1
ic,jc = int(yc),int(xc)
Res = namedtuple('Res','xc,yc,ic,jc,i1,j1,i2,j2,i3,j3,i4,j4')
res = Res(xc, yc, ic, jc, i1,j1, i2, j2, i3, j3, i4, j4)
return res
def mixture_lin(img,ilwindow,jlwindow,i0,j0,thresh):
"""Returns xcenter, ycenter of a cross shape using mixture linear regression.
img doesn't have to be bw; but training points are 0 intensity;
ilwindow, jlwindow,i0,j0 for target area;
Use thresh (e.g., 0.6) to threshold classification;
Best for two bars making a nearly vertical crossing.
"""
img = img[i0-int(ilwindow/2):i0+int(ilwindow/2), j0-int(jlwindow/2):j0+int(jlwindow/2)]
X_train = np.argwhere(img == 0 )
n = np.shape(X_train)[0] #number of points
y = X_train[:,0]
x = X_train[:,1]
w1 = np.random.normal(0.5,0.1,n)
w2 = 1-w1
start = time.time()
for i in range(100):
pi1_new = np.mean(w1)
pi2_new = np.mean(w2)
mod1= sm.WLS(y,sm.add_constant(x),weights = w1) #vertical
res1 = mod1.fit()
mod2= sm.WLS(x,sm.add_constant(y),weights = w2) #horizontal
res2 = mod2.fit()
y1_pred_new= res1.predict(sm.add_constant(x))
sigmasq1 = np.sum(res1.resid**2)/n
a1 = pi1_new * np.exp((-(y-y1_pred_new)**2)/sigmasq1)
x2_pred_new = res2.predict(sm.add_constant(y))
sigmasq2 = np.sum(res2.resid**2)/n
a2 = pi2_new * np.exp((-(x-x2_pred_new)**2)/sigmasq2)
if np.max(abs(a1/(a1+a2)-w1))<1e-5:
#print '%d iterations'%i
break
w1 = a1/(a1+a2)
w2 = a2/(a1+a2)
#print '%.3fs'%(time.time()-start)
#plt.scatter(x, y,10, c=w1,cmap='RdBu')
#w1thresh = (w1>thresh)+0
#w2thresh = (w2>thresh)+0
x1 = x[w1>thresh]
x2 = x[w2>thresh]
y1 = y[w1>thresh]
y2 = y[w2>thresh]
mod1 = sm.OLS(y1,sm.add_constant(x1))
res1 = mod1.fit()
sigmasq1 = np.sum(res1.resid**2)/len(x1)
y1_pred= res1.predict(sm.add_constant(x1))
#plt.plot(x1, y1_pred)
mod2 = sm.OLS(x2,sm.add_constant(y2))
res2 = mod2.fit()
sigmasq2= np.sum(res2.resid**2)/len(x2)
x2_pred= res2.predict(sm.add_constant(y2))
#plt.plot(x2_pred,y2)
b1,k1 = res1.params # y = k1x + b1
b2,k2 = res2.params # x = k2y + b2
yc = (k1*b2+b1)/(1-k1*k2)
xc = k2*yc + b2
#plt.scatter(xc,yc)
# all above values are wrt small cropped picture
xc += j0-jlwindow/2
x1 = x1 + j0-jlwindow/2
x2_pred = x2_pred + j0-jlwindow/2
yc += i0-ilwindow/2
y1_pred = y1_pred + i0-ilwindow/2
y2 = y2 + i0-ilwindow/2
Res = namedtuple('Res','xc, yc,x1,y1_pred,x2_pred,y2,sigmasq1,sigmasq2')
res = Res(xc, yc,x1,y1_pred,x2_pred,y2,sigmasq1,sigmasq2)
return res
if __name__ == "__main__":
img = cv2.imread('c:/Users/Mengfei/nagellab/forcedwetting/velocity_tracking/sample8.tif',0)
(_, img) = cv2.threshold(img, 30, 255, cv2.THRESH_BINARY)
thresh = 0.6
ilwindow,jlwindow = 50, 50
x0, y0 = 421,371
i0, j0 = y0,x0
res = mixture_lin(img,ilwindow,jlwindow, i0,j0,thresh)
print res.sigmasq1
plt.imshow(img,'gray')
plt.scatter(res.xc,res.yc)
plt.plot(res.x1,res.y1_pred)
plt.plot(res.x2_pred,res.y2)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,807
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/basinhopping_2steps.py
|
#!/usr/bin/env python
from __future__ import division, print_function
import sys
from scipy import interpolate
import time
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import cv2
from skimage import exposure
from scipy.optimize import basinhopping
def normalize(img_array,normrange):
#elementmax = np.amax(img_array)
#elementmin = np.amin(img_array)
#ratio = (elementmax-elementmin)/normrange
#normalized_array = (img_array-elementmin)/(ratio+0.00001)
test = exposure.equalize_hist(img_array)
return test
def difference(reference_img, generated_img, normrange):
reference_img = normalize(reference_img, normrange)
generated_img = normalize(generated_img, normrange)
diff_value = np.sum((reference_img-generated_img)**2)
return diff_value
def surface_polynomial_1storder(size, max_variation, coeff1storder):
def poly(x, y):
poly = max_variation*(coeff1storder[0]*x+coeff1storder[1]*y)
return poly
x = np.linspace(0,size[0]-1, size[0])
y = np.linspace(0,size[1]-1, size[1])
zz = poly(x[:,None],y[None, :])
return zz
def nll_1storder(coeff1storder, max_variation, data, normrange):
#data = normalize(data, normrange)
height = surface_polynomial_1storder(data.shape, max_variation, coeff1storder)
#expected = normalize(1+np.cos((2/0.532)*height), normrange)
expected = 1+np.cos((2/0.532)*height)
# normalize to [0,1]
expected /= expected.max()
return difference(data, expected, normrange)
def surface_polynomial(size, max_variation, coeffhi,coeff1storder):
def poly(x, y):
#poly = max_variation*(coeff[0]*x+coeff[1]*y)
poly = max_variation*(1*coeffhi[0]*x**2+1*coeffhi[1]*y**2+1*coeffhi[2]*x*y+coeff1storder[0]*x+coeff1storder[1]*y+coeffhi[3])
return poly
x = np.linspace(0,size[0]-1, size[0])
y = np.linspace(0,size[1]-1, size[1])
zz = poly(x[:,None],y[None, :])
return zz
def nll(coeffhi,coeff1storder, max_variation, data, normrange):
#data = normalize(data, normrange)
height = surface_polynomial(data.shape, max_variation, coeffhi,coeff1storder)
#expected = normalize(1+np.cos((2/0.532)*height), normrange)
expected = 1+np.cos((2/0.532)*height)
# normalize to [0,1]
expected /= expected.max()
return difference(data, expected, normrange)
if __name__ == "__main__":
from scipy.optimize import fmin
import time
normrange=1
N = 14
sample_size = 15
t0 = time.time()
max_variation = 0.012
reference_intensity = cv2.imread('crop_small.tif', 0)
reference_intensity = normalize(reference_intensity,1)
#cv2.imwrite('normalized_crop.tif',255*reference_intensity)
alist = np.linspace(0,sample_size,N) # x direction
blist = np.linspace(-sample_size, sample_size,2*N) # y direction
aa, bb = np.meshgrid(alist,blist)
diff = np.empty(aa.shape)
for i in np.arange(alist.size):
for j in np.arange(blist.size):
if (j-0.5*len(blist))**2+(i)**2<=(0.*len(alist))**2:
diff[j,i] = np.nan
else:
coeff1storder = [aa[j,i],bb[j,i]]
diff[j,i] = nll_1storder(coeff1storder,max_variation,reference_intensity,1.0)
sys.stdout.write('\r%i/%i ' % (i*blist.size+j+1,alist.size*blist.size))
sys.stdout.flush()
sys.stdout.write('\n')
elapsed = time.time() - t0
print("took %.2f seconds to compute the likelihood" % elapsed)
index = np.unravel_index(np.nanargmin(diff), diff.shape)
index = (alist[index[1]], blist[index[0]])
index = np.array(index)
initcoeffhi = np.array([[0,0,0,0]])
coeff1storder = index
print(index)
simplex = 0.1*np.identity(4)+np.tile(initcoeffhi,(4,1))
simplex = np.concatenate((initcoeffhi,simplex),axis=0)
#xopt= fmin(nll, initcoeffhi, args = (coeff1storder,max_variation, reference_intensity, normrange))#, initial_simplex=simplex)
#print(xopt)
result = basinhopping(nll, initcoeffhi, niter = 4, T=200, stepsize=.1, minimizer_kwargs={'method': 'Nelder-Mead', 'args': (coeff1storder,max_variation, reference_intensity, normrange)}, disp=True)#, callback = lambda x, convergence, _: print('x = ', x))
xopt = result.x
print(result.x)
#fig = plt.figure()
##plt.contour(aa, bb, diff, 100)
#ax = fig.add_subplot(111, projection='3d')
#ax.plot_wireframe(aa,bb,diff)
#plt.ylabel("coefficient a")
#plt.xlabel("coefficient b")
#plt.gca().set_aspect('equal', adjustable = 'box')
#plt.colorbar()
#plt.show()
generated_intensity = normalize(1+np.cos((2/0.532)*surface_polynomial(reference_intensity.shape, max_variation,xopt,coeff1storder)), 1.0)#works for n=1 pocket
#cv2.imwrite('ideal_pattern.tif', 255*generated_intensity)
cv2.imshow('', np.concatenate((generated_intensity, reference_intensity), axis = 1))
cv2.waitKey(0)
#ax = fig.add_subplot(111, projection = '3d')
#ax.plot_surface(xx[::10,::10], yy[::10,::10], zz[::10,::10])
#plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,808
|
hemengf/my_python_lib
|
refs/heads/master
|
/door_position/try.py
|
class trythis:
""" Don't have to initialize data attributes; they can be defined directly in method attributes.
"""
attr_directly_under_class_def = 30
def seeattr(self):
self.attr = 20
def seeagain(self):
self.attr = 200
if __name__ == "__main__":
print trythis.__doc__
x = trythis()
x.seeattr()
print x.attr
x.seeagain()
print x.attr
print x.attr_directly_under_class_def
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,809
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/piecewise/plotheight_interp.py
|
from __future__ import division
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy import interpolate
data_img = cv2.imread('sample4.tif',0)
data_img = data_img.astype('float64')
xstore = np.load('./xoptstore_bot.npy').item()
xstorebot = np.load('./xoptstore_bot.npy').item()
xstoreright = np.load('./xoptstore_right.npy').item()
xstoreleft = np.load('./xoptstore_left.npy').item()
xstoretopright= np.load('./xoptstore_top_right.npy').item()
xstoretopleft= np.load('./xoptstore_top_left.npy').item()
cl_img = cv2.imread('cl.tif',0)
cl2_img = cv2.imread('mask_bot_v2.tif',0)
fitimg_whole = np.copy(data_img)
cl2_img = cl2_img.astype('float64')
cl2_img /= 255.
def surface_polynomial(size, coeff,(zoomfactory,zoomfactorx)):
def poly(x, y):
x*=zoomfactorx
y*=zoomfactory
poly = coeff[0]*x**2+coeff[1]*y**2+coeff[2]*x*y+coeff[3]*x+coeff[4]*y+coeff[5]#+coeff[6]*x**3+coeff[7]*y**3+coeff[8]*x*y**2+coeff[9]*y*x**2
return poly
x = np.linspace(0,size[1]-1, size[1])
y = np.linspace(0,size[0]-1, size[0])
zz = poly(x[None,:],y[:,None])
return zz
#dyy,dxx =int(41*np.tan(np.pi*52/180)),41
floor = -89
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect(adjustable='datalim',aspect='equal')
ax.set_zlim(floor,0)
width = 0.8
dd=80
ddd=20
xxx = []
yyy = []
zzz = []
for i in range(0,cl_img.shape[0],ddd):
for j in range(0,cl_img.shape[1],ddd):
if cl_img[i,j] == 255:
xxx.append(j)
yyy.append(i)
zzz.append(floor)
#bot
dyy,dxx = 81,81
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstore:
xopt = xstore[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy,dxx), xopt,(zoomfactory,zoomfactorx))
xxx+=list(X.flat[::dd])
yyy+=list(Y.flat[::dd])
zzz+=list(height.flat[::dd])
ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
#xstart,xend = 1698,1942
#ystart,yend = 1726,2323
xstart,xend = 0,data_img.shape[1]
ystart,yend = 0,data_img.shape[0]
print 'interpolating'
f = interpolate.interp2d(xxx,yyy,zzz,kind='quintic')
print 'finish'
XX,YY = np.meshgrid(range(xstart,xend),range(ystart,yend))
ZZ = f(range(xstart,xend),range(ystart,yend))
ZZ*=cl2_img[ystart:yend,xstart:xend]
ZZ[ZZ == 0] =np.nan
ZZ[:,:300] = np.nan
ax.plot_wireframe(XX,YY,ZZ,rstride =80, cstride = 80, colors='k',lw=0.4)
#ax.contour3D(XX,YY,ZZ,50,cmap='binary')
cv2.imwrite('fitimg_whole.tif', fitimg_whole.astype('uint8'))
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,810
|
hemengf/my_python_lib
|
refs/heads/master
|
/left_partial.py
|
from __future__ import division
def derivative(f, x, dx=1e-2):
return (f(x+dx)-f(x-dx))/(2*dx)
if __name__ == "__main__":
from mpmath import *
mp.dps =2
def f(x):
return x**4
print derivative(f, 1, dx=1e-8)-4
print derivative(f, 1, dx=-1e-8)-4
print diff(f,1.)
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,811
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/normalization_test.py
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import gaussian_kde
from skimage import exposure
ideal_img = cv2.imread('ideal.tif', 0)
crop_img = cv2.imread('crop.tif',0)
crop_eq = exposure.equalize_hist(crop_img)
crop_eq2 = exposure.equalize_hist(crop_eq)
crop_adapteq = exposure.equalize_adapthist(crop_img, clip_limit = 0.03)
plt.imshow(crop_eq-crop_eq2)
#plt.imshow(np.concatenate((crop_eq,crop_eq2),axis=1))
plt.show()
#print np.amax(crop_eq)
#cv2.imwrite('crop_eq.tif',crop_eq)
#cv2.imwrite('crop_adapteq.tif', crop_adapteq)
#cv2.imwrite('crop_contrast_stre', crop_contrast_stre)
#density_ideal= gaussian_kde(ideal_img.flatten())
#density_crop= gaussian_kde(crop_img.flatten())
#density_ideal.covariance_factor = lambda:0.01
#density_crop.covariance_factor = lambda:0.1
#density_ideal._compute_covariance()
#density_crop._compute_covariance()
#x = np.linspace(0,255, 256)
hist_ideal, _ = np.histogram(ideal_img.flatten(), bins = np.amax(ideal_img))
hist_crop, _ = np.histogram(crop_img.flatten(), bins = np.amax(crop_img))
hist_crop_eq, _ = np.histogram(crop_eq.flatten(), bins = np.amax(crop_eq))
#plt.plot(ideal_img.size*density_ideal(x))
#plt.plot(hist_ideal)
#plt.plot(crop_img.size*density_crop(x)[:len(hist_crop)])
#plt.plot(hist_crop)
plt.plot(hist_crop_eq)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,812
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/plotheight.py
|
from __future__ import division
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
data_img = cv2.imread('sample5.tif')
xstore = np.load('xoptstore_sample5.npy').item()
print xstore
#xstore_badtiles=np.load('xoptstore_badtiles20180513_21_22_42.npy').item()
def surface_polynomial(size, coeff,(zoomfactory,zoomfactorx)):
def poly(x, y):
x*=zoomfactorx
y*=zoomfactory
poly = coeff[0]*x**2+coeff[1]*y**2+coeff[2]*x*y+coeff[3]*x+coeff[4]*y+coeff[5]#+coeff[6]*x**3+coeff[7]*y**3+coeff[8]*x*y**2+coeff[9]*y*x**2
return poly
x = np.linspace(0,size[1]-1, size[1])
y = np.linspace(0,size[0]-1, size[0])
zz = poly(x[None,:],y[:,None])
return zz
dyy,dxx = 100,100
zoomfactory,zoomfactorx = 1,1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect('equal','box')
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstore:
xopt = xstore[(int(yy/dyy),int(xx/dxx))]
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(data_img.shape[0]-yy,data_img.shape[0]-yy-dyy,-zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
ax.plot_wireframe(X,Y,height,rstride=int(dxx/1),cstride=int(dyy/1))
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,813
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/OPDcorrection/plotcorrection.py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
theta = np.arange(0,0.02,0.001)
n1 = 1.5
n2 = 1
a1= np.pi/2
OB =500*1000
a2 = np.arccos((n2/n1)*np.sin(np.arcsin((n1/n2)*np.cos(a1)+2*theta)))
s = (np.sin((a1-a2)/2))**2
dL = -2*n1*OB*s
fig, ax = plt.subplots()
plt.subplots_adjust(bottom=0.2)
l, = plt.plot(theta,dL)
ax.set_ylim(-600,600)
ax.set_xlabel(r'$\theta$')
ax.set_ylabel('nm')
xa1slider = plt.axes([0.25,0.02,0.65,0.03])
xOBslider = plt.axes([0.25,0.05,0.65,0.03])
a1slider = Slider(xa1slider,'a1',np.pi/2-0.5,np.pi/2,valinit=np.pi/2-0.5)
OBslider = Slider(xOBslider,'OB',-500,1000,valinit=0)
def update(val):
OB = OBslider.val*1000
a1 = a1slider.val
a2 = np.arccos((n2/n1)*np.sin(np.arcsin((n1/n2)*np.cos(a1)+2*theta)))
s = (np.sin((a1-a2)/2))**2
dL = -2*n1*OB*s
#fig.canvas.draw_idle()
l.set_ydata(dL)
ax.set_ylim(-600,600)
a1slider.on_changed(update)
OBslider.on_changed(update)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,814
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/basinhopping_2steps_version1.py
|
#!/usr/bin/env python
from __future__ import division
import sys
from scipy import interpolate
import time
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import cv2
from skimage import exposure
from scipy.optimize import basinhopping
def equalize(img_array):
"""
returns array with float 0-1
"""
equalized = exposure.equalize_hist(img_array)
return equalized
def difference(data_img, generated_img):
"""
both images have to be 0-1float
"""
diff_value = np.sum((data_img-generated_img)**2)
return diff_value
def surface_polynomial(size, max_variation, coeff,c):
def poly(x, y):
poly = max_variation*(coeff[0]*x**2+coeff[1]*y**2+coeff[2]*x*y+coeff[3]*x+coeff[4]*y)+c/1000.
return poly
x = np.linspace(0,size[0]-1, size[0])
y = np.linspace(0,size[1]-1, size[1])
zz = poly(x[None,:],y[:,None])
return zz
def nl(coeff, max_variation, data_img):
"""
negative likelyhood-like function; aim to minimize this
data_img has to be 0-1float
"""
clist =range(0,int(532/4),66)#varying c term in surface_polynomial to make stripes change at least 1 cycle
difflist = [0]*len(clist)
for cindx,c in enumerate(clist):
height = surface_polynomial(data_img.shape, max_variation,coeff,c)
expected= 1+ np.cos(4*np.pi*height/0.532)
expected /= expected.max()#normalize to 0-1float
difflist[cindx] = difference(data_img, expected)
return min(difflist)/max(difflist)
if __name__ == "__main__":
from scipy.ndimage import gaussian_filter
import time
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
N = 40 #a,b value resolution; a, b linear term coeff
sample_size = 40#a, b value range
start = time.time()
max_variation = 0.001
data_img = cv2.imread('sample.tif', 0)
fitimg = np.copy(data_img)
for yy in range(100,1400,100):
for xx in range(200,700,100):#xx,yy starting upper left corner of patch
patchysize, patchxsize = 100,100
zoomfactory,zoomfactorx = 1,1
data_patch = data_img[yy:yy+patchysize,xx:xx+patchxsize]
data_patch= gaussian_filter(data_patch,sigma=0)
data_patch = data_patch[::zoomfactory,::zoomfactorx]
data_patch= equalize(data_patch)#float0-1
alist = np.linspace(0,sample_size,N) # x direction
blist = np.linspace(-sample_size, sample_size,2*N) # y direction
aa, bb = np.meshgrid(alist,blist)
nl_1storder = np.empty(aa.shape)
for i in np.arange(alist.size):
for j in np.arange(blist.size):
if (j-0.5*len(blist))**2+(i)**2<=(0.2*len(alist))**2:#remove central region to avoid 0,0 gloabal min
nl_1storder[j,i] = np.nan
else:
nl_1storder[j,i] = nl([0,0,0,aa[j,i],bb[j,i]],max_variation,data_patch)
sys.stdout.write('\r%i/%i ' % (i*blist.size+j+1,alist.size*blist.size))
sys.stdout.flush()
sys.stdout.write('\n')
elapsed = time.time() - start
print "took %.2f seconds to compute the negative likelihood" % elapsed
index = np.unravel_index(np.nanargmin(nl_1storder), nl_1storder.shape)
index = (alist[index[1]], blist[index[0]])
index = np.array(index)
initcoeff= np.array([0,0,0,index[0],index[1]])
print initcoeff
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial(data_patch.shape, max_variation,initcoeff,0))
generated_intensity /= generated_intensity.max()
plt.imshow(np.concatenate((generated_intensity,data_patch,(generated_intensity-data_patch)**2),axis=1))
plt.show()
iternumber = 0
itermax = 3
while 1:
print 'iternumber =', iternumber
result = basinhopping(nl, initcoeff, niter = 50, T=2000, stepsize=.01, minimizer_kwargs={'method': 'Nelder-Mead', 'args': (max_variation, data_patch)}, disp=True)#, callback = lambda x, convergence, _: print('x = ', x))
if result.fun < 0.25:
break
else:
iternumber+=1
if iternumber == itermax:
break
initcoeff = result.x
xopt = result.x
print xopt
clist =range(0,int(532/2),4)
difflist = [0]*len(clist)
for cindx,c in enumerate(clist):
height = surface_polynomial(data_patch.shape, max_variation,xopt,c)
expected= 1+ np.cos(4*np.pi*height/0.532)
expected /= expected.max()
difflist[cindx] = difference(data_patch, expected)
c = clist[np.argmin(difflist)]
print [int(x) for x in difflist]
print 'c =', c
#fig = plt.figure()
##plt.contour(aa, bb, diff, 100)
#ax = fig.add_subplot(111, projection='3d')
#ax.plot_wireframe(aa,bb,diff)
#plt.ylabel("coefficient a")
#plt.xlabel("coefficient b")
#plt.gca().set_aspect('equal', adjustable = 'box')
#plt.colorbar()
#plt.show()
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial(data_patch.shape, max_variation,xopt,c))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
plt.imshow(np.concatenate((generated_intensity,data_patch,(generated_intensity-data_patch)**2),axis=1))
plt.show()
fitimg[yy:yy+patchysize,xx:xx+patchxsize] = 255*generated_intensity
cv2.imwrite('fitimg.tif', fitimg.astype('uint8'))
#cv2.imshow('', np.concatenate((generated_intensity, data_patch), axis = 1))
#cv2.waitKey(0)
#ax = fig.add_subplot(111, projection = '3d')
#ax.plot_surface(xx[::10,::10], yy[::10,::10], zz[::10,::10])
#plt.show()
print 'time used', time.time()-start, 's'
print 'finished'
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,815
|
hemengf/my_python_lib
|
refs/heads/master
|
/boundaryv/brownian_gas.py
|
from __future__ import division
import progressbar
import matplotlib.pyplot as plt
import numpy as np
class gas:
def __init__(self):
pass
class Dimer(gas):
def __init__(self, mass, radius, restlength):
self.position1 = np.zeros(2)
self.position2 = np.zeros(2)
self.positionCOM = (self.position1 + self.position2)/2.0
self.restlength = restlength
self.length = restlength
self.orientation = 0.
self.force1 = np.array((0.,0.))
self.force2 = np.array((0.,0.))
self.velocity1= np.array((0.,0.))
self.velocity2= np.array((0.,0.))
self.kickforce1= np.array((0.,0.))
self.kickforce2= np.array((0.,0.))
self.repelforce1= np.array((0.,0.))
self.repelforce2= np.array((0.,0.))
self.bondforce1= np.array((0.,0.))
self.bondforce2= np.array((0.,0.))
self.dissipation1= np.array((0.,0.))
self.dissipation2= np.array((0.,0.))
self.radius = radius
self.mass = mass
def interact():
pass
def accelerate(self, acceleration1, acceleration2, anglechange):
self.velocity1 += acceleration1
self.velocity2 += acceleration2
def move(self, velocity1, velocity2):
self.position1 += self.velocity1
self.position2 += self.velocity2
def touch(particle1pos, particle2pos, particle1size, particle2size):
""" Calculate overlap of 2 particles """
overlap = -np.linalg.norm(particle1pos-particle2pos)+(particle1size + particle2size)
if overlap > 0.:
return overlap
else:
return 0.
def touchbnd(particle_position, radius, box_size):
""" Tells if a particle touches the boundary """
tchbndlist = [0,0,0,0] # [W,N,E,S]
xtemp = particle_position[0]
ytemp = particle_position[1]
if xtemp<=radius:
tchbndlist[0] = 1
if xtemp>=(box_size-radius):
#if xtemp>=8*radius:
tchbndlist[2] = 1
if ytemp>=(box_size-radius):
tchbndlist[1] = 1
if ytemp<=radius:
tchbndlist[3] = 1
return tchbndlist
def findnearest(particle, particle_array):
""" Returns the nearest particle index """
dist_array = np.sum((particle - particle_array)**2, axis=1)
return np.nanargmin(dist_array)
class Environment:
def __init__(self, boxsize, totnum, dt):
self.boxsize = boxsize
self.totnum = totnum
self.particle_position_array = np.empty((2*self.totnum,2))
self.particle_position_array[:] = np.nan
self.dimer_list = [0]*self.totnum
self.orientationlist = [0]*self.totnum
self.bondlist = [[(0.,0.),(0.,0.)]]*totnum
self.removallist = []
self.dt = dt
def create_dimers(self, mass, radius, restlength):
# Place the first dimer
dimer = Dimer(mass, radius, restlength)
dimer.position1 = np.random.uniform(radius, self.boxsize-radius, 2)
#dimer.position1 = np.random.uniform(radius, 8*radius, 2)
out_of_bnd = 1
while out_of_bnd:
dimer.orientation = np.random.uniform(0, 2*np.pi)
xtemp = dimer.position1[0] + dimer.length*np.cos(dimer.orientation)
ytemp = dimer.position1[1] + dimer.length*np.sin(dimer.orientation)
# Unless sum of tchbndlist is zero, particle is out of bnd
out_of_bnd = sum(touchbnd((xtemp, ytemp), radius, self.boxsize))
dimer.position2[0] = xtemp
dimer.position2[1] = ytemp
self.orientationlist[0] = dimer.orientation
self.dimer_list[0] = dimer
self.particle_position_array[0,:] = dimer.position1
self.particle_position_array[1,:] = dimer.position2
self.bondlist[0] = (dimer.position1[0],dimer.position2[0]),(dimer.position1[1],dimer.position2[1])
# Create 2nd-nth dimmer without overlapping
for n in range(1,self.totnum):
overlap = 1
# Create particle1
failcount1 = 0
while overlap:
failcount1 += 1
dimer = Dimer(mass, radius, restlength)
dimer.position1 = np.random.uniform(radius+1, self.boxsize-radius-1, 2)
nearest_idx = findnearest(dimer.position1, self.particle_position_array)
overlap = touch(dimer.position1, self.particle_position_array[nearest_idx], radius, radius)
if failcount1 >= 100000:
self.removallist.append(n)
break
# Create particle2
out_of_bnd = 1
overlap = 1
failcount2 = 0
while out_of_bnd or overlap:
failcount2 += 1
dimer.orientation = np.random.uniform(0, 2*np.pi)
xtemp = dimer.position1[0] + dimer.length*np.cos(dimer.orientation)
ytemp = dimer.position1[1] + dimer.length*np.sin(dimer.orientation)
out_of_bnd = sum(touchbnd((xtemp, ytemp), radius, self.boxsize))
nearest_idx = findnearest((xtemp, ytemp), self.particle_position_array)
overlap = touch((xtemp, ytemp), self.particle_position_array[nearest_idx], radius, radius)
if failcount2 >= 100000:
self.removallist.append(n)
break
dimer.position2[0] = xtemp
dimer.position2[1] = ytemp
self.particle_position_array[2*n,:] = dimer.position1
self.particle_position_array[2*n+1, :] = dimer.position2
self.dimer_list[n] = dimer
self.orientationlist[n] = dimer.orientation
self.bondlist[n] = (dimer.position1[0],dimer.position2[0]),(dimer.position1[1],dimer.position2[1])
progressbar.progressbar_tty(n+1,self.totnum,1)
# Update dimer_list and everything related for removal
self.removallist = list(set(self.removallist))
print 'updating dimerlist, removing', self.removallist, len(self.removallist), ''
self.dimer_list = [i for j, i in enumerate(self.dimer_list) if j not in self.removallist]
newlength = len(self.dimer_list)
self.orientationlist = [0]*newlength
self.bondlist = [[(0.,0.),(0.,0.)]]*newlength
self.particle_position_array = np.empty((2*newlength,2))
self.particle_position_array[:] = np.nan
for n, dimer in enumerate(self.dimer_list):
self.particle_position_array[2*n,:] = dimer.position1
self.particle_position_array[2*n+1, :] = dimer.position2
self.orientationlist[n] = dimer.orientation # Given randomly upon creation
self.bondlist[n] = (dimer.position1[0],dimer.position2[0]),(dimer.position1[1],dimer.position2[1])
print 'now length of dimerlist', len(self.dimer_list)
def visualize(self):
fig = plt.figure()
radius = self.dimer_list[0].radius
for dimer in self.dimer_list:
circle = plt.Circle(dimer.position1, radius, fill=False)
fig.gca().add_artist(circle)
circle = plt.Circle(dimer.position2, radius, fill=False)
fig.gca().add_artist(circle)
count = 0
for n, dimer in enumerate(self.dimer_list):
plt.plot(self.bondlist[n][0],self.bondlist[n][1],'k')
count += 1
plt.axis([0, self.boxsize, 0, self.boxsize])
plt.axes().set_aspect('equal')
return count
def kick(self,kickf):
for n, dimer in enumerate(self.dimer_list):
kickangle = self.orientationlist[n]
dimer.kickforce1 = kickf*np.cos(kickangle), kickf*np.sin(kickangle)
dimer.kickforce1 = np.asarray(dimer.kickforce1)
dimer.kickforce2 = dimer.kickforce1
def dissipate(self, coefficient):
for n, dimer in enumerate(self.dimer_list,coefficient):
dimer.disspation1 = -coefficient*dimer.velocity1
dimer.disspation2 = -coefficient*dimer.velocity2
def collide(self,repel_coefficient):
for n, dimer in enumerate(self.dimer_list):
radius = dimer.radius
dimer.repelforce1 = np.zeros(2)
dimer.repelforce2 = np.zeros(2)
for i, particle_position in enumerate(self.particle_position_array):
if i != 2*n: # for particle1, make sure to exclude itself
overlap1 = touch(dimer.position1, particle_position, radius, radius)
unit_vector = (dimer.position1-particle_position)/np.linalg.norm((dimer.position1-particle_position))
dimer.repelforce1 += repel_coefficient*unit_vector*overlap1
if i != 2*n+1: # for particle2, exclude itself
overlap2 = touch(dimer.position2, particle_position, radius, radius)
unit_vector = (dimer.position2-particle_position)/np.linalg.norm((dimer.position2-particle_position))
dimer.repelforce2 += repel_coefficient*unit_vector*overlap2
def bounce(self):
radius = self.dimer_list[0].radius
for dimer in self.dimer_list:
tchbndlist = touchbnd(dimer.position1, radius, self.boxsize)
if tchbndlist[0] * dimer.velocity1[0] < 0:
dimer.velocity1[0] = 0.
if tchbndlist[2] * dimer.velocity1[0] > 0:
dimer.velocity1[0] = 0.
if tchbndlist[1] * dimer.velocity1[1] > 0:
dimer.velocity1[1] = 0.
if tchbndlist[3] * dimer.velocity1[1] < 0:
dimer.velocity1[1] = 0.
tchbndlist = touchbnd(dimer.position2, radius, self.boxsize)
if tchbndlist[0] * dimer.velocity2[0] < 0:
dimer.velocity2[0] = 0.
if tchbndlist[2] * dimer.velocity2[0] > 0:
dimer.velocity2[0] = 0.
if tchbndlist[1] * dimer.velocity2[1] > 0:
dimer.velocity2[1] = 0.
if tchbndlist[3] * dimer.velocity2[1] < 0:
dimer.velocity2[1] = 0.
def bond_deform(self,coefficient):
for n, dimer in enumerate(self.dimer_list):
bondlength = np.linalg.norm(dimer.position2-dimer.position1)
deform = bondlength - dimer.restlength
unit_vector = np.asarray((np.cos(self.orientationlist[n]), np.sin(self.orientationlist[n])))
dimer.bondforce1 = coefficient*unit_vector*deform
dimer.bondforce2 = -coefficient*unit_vector*deform
def accelerate(self):
for dimer in self.dimer_list:
dimer.force1 = dimer.kickforce1 + dimer.dissipation1 + dimer.bondforce1 + dimer.repelforce1
dimer.velocity1 += self.dt*dimer.force1/dimer.mass
dimer.force2 = dimer.kickforce2 + dimer.dissipation2 + dimer.bondforce2 + dimer.repelforce2
dimer.velocity2 += self.dt*dimer.force2/dimer.mass
def move(self):
for dimer in self.dimer_list:
dimer.position1 += self.dt*dimer.velocity1
dimer.position2 += self.dt*dimer.velocity2
def update(self,kickf,collide_coeff,dissipate_coeff,bond_coeff):
self.kick(kickf)
self.collide(collide_coeff)
self.bond_deform(bond_coeff)
self.dissipate(dissipate_coeff)
self.accelerate()
self.bounce()
self.move()
for n, dimer in enumerate(self.dimer_list):
self.particle_position_array[2*n,:] = dimer.position1
self.particle_position_array[2*n+1, :] = dimer.position2
bond = dimer.position2-dimer.position1
dimer.orientation = np.angle(bond[0]+1j*bond[1])
self.orientationlist[n] = dimer.orientation
self.bondlist[n] = (dimer.position1[0],dimer.position2[0]),(dimer.position1[1],dimer.position2[1])
if __name__ == '__main__':
import matplotlib.pyplot as plt
import progressbar
env = Environment(500,totnum = 110, dt = 0.02)
env.create_dimers(mass=10., radius=10., restlength=30.)
print env.removallist
print len(env.orientationlist)
totframe = 30000
for i in range(totframe):
env.update(kickf=1,collide_coeff=10,dissipate_coeff=1,bond_coeff=10)
if i%30 == 0 and i>3000:
env.visualize()
plt.savefig('./movie5/'+'{:4.0f}'.format(i/10)+'.tif')
plt.close()
progressbar.progressbar_tty(i+1,totframe,1)
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,816
|
hemengf/my_python_lib
|
refs/heads/master
|
/leastsq.py
|
from __future__ import division
from scipy import stats
import numpy as np
def leastsq_unweighted(x,y):
"""
y = A + Bx
all inputs are np arrays
"""
N = len(x)
delta_unweighted = N*((x**2).sum())-(x.sum())**2
A_unweighted = ((x*x).sum()*(y.sum())-x.sum()*((x*y).sum()))/delta_unweighted
B_unweighted = (N*((x*y).sum())-(x.sum())*(y.sum()))/delta_unweighted
sigmay_unweighted = np.sqrt((1/(N-2))*np.square(y-A_unweighted-B_unweighted*x).sum())
sigmaA = sigmay_unweighted*np.sqrt((x**2).sum()/delta_unweighted)
sigmaB = sigmay_unweighted*np.sqrt(N/delta_unweighted)
return A_unweighted, B_unweighted,sigmaA,sigmaB,sigmay_unweighted
def leastsq_weighted(x,y,sigmax_exp, sigmay_exp):
_,B_unweighted,_,_,sigmay_unweighted = leastsq_unweighted(x,y)
sigmay_max = np.array([max(s,t) for (s,t) in zip(sigmay_unweighted*y/y,sigmay_exp)])
sigmay_eff = np.sqrt((sigmay_max)**2+np.square(B_unweighted*sigmax_exp)) # use sigmay_unweighted or sigmay_exp of sigmay_max????
w = 1/np.square(sigmay_eff)
delta_weighted = w.sum()*((w*x*x).sum()) - np.square((w*x).sum())
A_weighted = ((w*x*x).sum()*((w*y).sum())-(w*x).sum()*((w*x*y).sum()))/delta_weighted
B_weighted = (w.sum()*((w*x*y).sum()) - (w*x).sum()*((w*y).sum()))/delta_weighted
sigmaA_weighted = np.sqrt((w*x*x).sum()/delta_weighted)
sigmaB_weighted = np.sqrt(w.sum()/delta_weighted)
return A_weighted, B_weighted, sigmaA_weighted, sigmaB_weighted
def leastsq_unweighted_thru0(x,y):
""" y = Bx """
N = len(y)
numerator = (x*y).sum()
denominator = (x**2).sum()
B_unweighted = numerator/denominator
sigmay_unweighted = np.sqrt(((y-B_unweighted*x)**2).sum()/(N-1))
sigmaB = sigmay_unweighted/np.sqrt((x**2).sum())
return B_unweighted, sigmaB, sigmay_unweighted
def leastsq_weighted_thru0(x,y,sigmax_exp,sigmay_exp):
B_unweighted,_,sigmay_unweighted = leastsq_unweighted_thru0(x,y)
sigmay_max = np.array([max(s,t) for (s,t) in zip(sigmay_unweighted*y/y,sigmay_exp)])
sigmay_eff = np.sqrt((sigmay_max)**2+np.square(B_unweighted*sigmax_exp)) # use sigmay_unweighted or sigmay_exp of sigmay_max????
w = 1/np.square(sigmay_eff)
numerator = (w*x*y).sum()
denominator = (w*x*x).sum()
B_weighted = numerator/denominator
sigmaB_weighted = 1/np.sqrt((w*x*x).sum())
return B_weighted, sigmaB_weighted
def chi2test(x,y,sigmax_exp,sigmay_exp):
_,_,_,_,sigmay_unweighted = leastsq_unweighted(x,y)
A_weighted,B_weighted,_,_ = leastsq_weighted(x,y,sigmax_exp,sigmay_exp)
chi2 = (np.square((y-A_weighted-B_weighted*x)/(sigmay_exp))).sum()#has to use sigmay_exp, a reasonable estimate of exp error is crucial
N = len(x)
c = 2 # sigmay_unweighted is calculated from data;1 constraint
reduced_chi2 = chi2/(N-c)
prob = (1-stats.chi2.cdf(chi2,(N-c)))
return reduced_chi2
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,817
|
hemengf/my_python_lib
|
refs/heads/master
|
/intensity2height.py
|
from __future__ import division
import cv2
import numpy as np
import matplotlib.pyplot as plt
colorimg = cv2.imread('DSC_5311.jpg').astype(float)
#colorimg = cv2.imread('crop.tif').astype(float)
blue, green, red = cv2.split(colorimg)
#red = red*90/80
cutoff = 100
ratio = green/(red+1e-6) #prevent diverging
ratio[ratio<1] = 1 #ratio<1 not real
lratio = np.log(ratio)
hist, bins = np.histogram(lratio.flat, bins=np.arange(0,2,0.01))
hist[np.where(hist <=cutoff)] = 0 # throw away count < cutoff
idx = np.nonzero(hist)
center = (bins[:-1] + bins[1:]) / 2
rmax = max(center[idx]) #rightmost barcenter for nonzero hist
rmin = np.min(lratio)
lratio[lratio<rmin] = rmin
lratio[lratio>rmax] = rmax
img = (255*(lratio-rmin)/(rmax-rmin))
#width = 0.1 * (bins[1] - bins[0])
#plt.hist(lratio.flat, bins=np.arange(0,4,0.01),color='red',alpha=1)
#plt.bar(center,hist,width=width)
#plt.show()
img = img.astype('uint8')
cv2.imwrite('img.tif',img)
cv2.imwrite('green.tif', green)
cv2.imwrite('red.tif', red)
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,818
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/printtime.py
|
#import os
#if os.getenv("TZ"):
# os.unsetenv("TZ")
from time import strftime, localtime,gmtime,timezone
print strftime("%H_%M_%S",localtime())
print timezone/3600.
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,819
|
hemengf/my_python_lib
|
refs/heads/master
|
/trythisfromlabcomputer.py
|
print 'try this from the lab computer'
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,820
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/piecewise/thin/readthin.py
|
from __future__ import division
import numpy as np
import cv2
from scipy import interpolate
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
left0_img = cv2.imread('left0.tif',0)
left1_img = cv2.imread('left1.tif',0)
left2_img = cv2.imread('left2.tif',0)
left3_img = cv2.imread('left3.tif',0)
left4_img = cv2.imread('left4.tif',0)
leftflat_img = cv2.imread('leftflat.tif',0)
right0_img = cv2.imread('right0.tif',0)
right1_img = cv2.imread('right1.tif',0)
right2_img = cv2.imread('right2.tif',0)
right3_img = cv2.imread('right3.tif',0)
right4_img = cv2.imread('right4.tif',0)
xl=[]
yl=[]
zl=[]
xr=[]
yr=[]
zr=[]
dd=1
offsetl = 0
offsetr = 0
for i in range(252,1046,dd):
for j in range(505,1672,dd):
if left0_img[i,j] == 255:
xl.append(j)
yl.append(i)
zl.append(0+offsetl)
if left1_img[i,j] == 255:
xl.append(j)
yl.append(i)
zl.append(1*0.532/2+offsetl)
if left2_img[i,j] == 255:
xl.append(j)
yl.append(i)
zl.append(2*0.532/2+offsetl)
if left3_img[i,j] == 255:
xl.append(j)
yl.append(i)
zl.append(3*0.532/2+offsetl)
if left4_img[i,j] == 255:
xl.append(j)
yl.append(i)
zl.append(4*0.532/2+offsetl)
#if leftflat_img[i,j] == 255:
# xl.append(j)
# yl.append(i)
# zl.append(2.5*0.532/2)
for i in range(272,1012,dd):
for j in range(2579,3703,dd):
if right0_img[i,j] == 255:
xr.append(j)
yr.append(i)
zr.append(0+offsetr)
if right1_img[i,j] == 255:
xr.append(j)
yr.append(i)
zr.append(1*0.532/2+offsetr)
if right2_img[i,j] == 255:
xr.append(j)
yr.append(i)
zr.append(2*0.532/2+offsetr)
if right3_img[i,j] == 255:
xr.append(j)
yr.append(i)
zr.append(3*0.532/2+offsetr)
if right4_img[i,j] == 255:
xr.append(j)
yr.append(i)
zr.append(4*0.532/2+offsetr)
np.save('xleft.npy',xl)
np.save('yleft.npy',yl)
np.save('zleft.npy',zl)
np.save('xright.npy',xr)
np.save('yright.npy',yr)
np.save('zright.npy',zr)
"""
slicing = 1128
yslice = [y[i] for i in range(len(x)) if x[i] == slicing]
zslice = [z[i] for i in range(len(x)) if x[i] == slicing]
f = interpolate.interp1d(yslice,zslice,kind='linear')
xnew = np.arange(min(x),max(x))
ynew = np.arange(min(yslice),max(yslice))
znew = f(ynew)
#XX,YY = np.meshgrid(xnew,ynew)
#fig = plt.figure(figsize=(7,7))
#ax = fig.add_subplot(111,projection='3d')
#ax.set_zlim(0,1000)
#ax.plot_wireframe(XX,YY,znew)
#ax.scatter(x,y,z)
plt.plot(ynew,znew)
plt.scatter(yslice, zslice)
plt.show()
"""
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,821
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/pattern_shift1D_vectorized.py
|
from __future__ import division
from scipy.misc import derivative
import scipy.optimize
import scipy.spatial.distance
def shape_function(x):
return 0.000005*(x**2)+68
#return 0.00000001*x + 68
#@profile
def find_k_refracting(k_incident, x1, n1,n2):
#n = np.array([[-derivative(shape_function, x, dx=1e-6), 1]for x in x1])
#above method in creating n is too slow
n = np.empty((len(x1), 2))
n[:,0] = -derivative(shape_function, x1, dx=1e-6)
#n[:,0] = -partial_derivative.derivative(shape_function, x1, dx=1e-6)
n[:,1] = 1
norm = np.linalg.norm(n, axis = 1)
n = n/norm[:,np.newaxis]
c = -np.dot(n, k_incident)
r = n1/n2
if ((1-r**2*(1-c**2)) < 0).any():
print(Fore.RED)
print "Total internal reflection occurred."
print "1-r**2*(1-c**2) = \n", 1-r**2*(1-c**2)
print(Style.RESET_ALL)
sys.exit(0)
factor = (r*c- np.sqrt(1-r**2*(1-c**2)))
#print "n = ", n
#print 'c =',c
#print "factor", factor
#print "tile", np.tile(r*k_incident,(len(x1), 1))
#print k_refracting
k_refracting = np.tile(r*k_incident,(len(x1), 1)) + n*factor[:,np.newaxis]
return k_refracting
#@profile
def find_x0(k_incident, x1, n1,n2):
#def g(x):
# k_refracting = find_k_refracting(k_incident, x, n1, n2)
# #return -k_refracting[:,1]/k_refracting[:,0]
# return k_refracting[:,0], k_refracting[:,1]
def F(x):
k_refracting = find_k_refracting(k_incident, x, n1, n2)
#return shape_function(x1)+shape_function(x)-(x1-x)*g(x)
return k_refracting[:,0]*(shape_function(x1)+shape_function(x))+k_refracting[:,1]*(x1-x)
x0 = scipy.optimize.newton_krylov(F,x1, f_tol = 1e-3)
return x0
#@profile
def optical_path_diff(k_incident, x1, n1,n2):
x0 = find_x0(k_incident, x1, n1, n2)
p0 = np.empty((len(x1),2))
p1 = np.empty((len(x1),2))
p1_image_point = np.empty((len(x1),2))
p0[:,0] = x0
p0[:,1] = shape_function(x0)
p1[:,0] = x1
p1[:,1] = shape_function(x1)
p1_image_point[:,0] = x1
p1_image_point[:,1] = -shape_function(x1)
#p0 = np.array([x0, shape_function(x0)])
#p1 = np.array([x1, shape_function(x1)])
#p1_image_point = np.array([x1, -shape_function(x1)])
vec_x0x1 = p1-p0
norm = np.linalg.norm(vec_x0x1, axis = 1)
norm[norm == 0] = 1
vec_x0x1 = vec_x0x1/norm[:,np.newaxis]
cos = np.dot(vec_x0x1, k_incident)
dist1 = np.linalg.norm(p0-p1, axis = 1)
dist2 = np.linalg.norm(p0-p1_image_point, axis = 1)
#dist1 = scipy.spatial.distance.cdist(p0.T,p1.T,'euclidean')
#dist2 = scipy.spatial.distance.cdist(p0.T,p1_image_point.T,'euclidean')
#dist1 = np.diagonal(dist1)
#dist2 = np.diagonal(dist2)
#print "vec_x0x1 = ", vec_x0x1
#print "cos = ", cos
#print "p0 = ", p0
#print "p1 = ", p1
#print "dist1 = ", dist1
#print "dist2 = ", dist2
OPD_part1 = dist1*cos*n1
OPD_part2 = dist2*n2
OPD = OPD_part2-OPD_part1
return OPD
def pattern(opd):
intensity = 1+np.cos((2*np.pi/0.532)*opd)
return intensity
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
import sys
import progressbar
import os
import time
from colorama import Fore, Style
start = time.time()
print "starting..."
i = 0
framenumber = 50
pltnumber = 300
pltlength = 500
detecting_range = np.linspace(-pltlength,pltlength,pltnumber)
for angle in np.linspace(0,0.0625,framenumber):
fig = plt.figure()
ax = fig.add_subplot(111)
i += 1
opd = optical_path_diff(k_incident = np.array([np.sin(angle),-np.cos(angle)]),\
x1 = detecting_range,\
n1 = 1.5,\
n2 = 1)
intensity = pattern(opd)
#opd_expected = 2*shape_function(0)*np.cos(np.arcsin(np.sin(angle-0.00000001)*1.5)+0.00000001)
#print "error in OPD = " ,(opd-opd_expected)/0.532, "wavelength"
ax.plot(detecting_range, intensity)
plt.ylim((0,2.5))
ax.set_xlabel('$\mu m$')
ax.text(0, 2.2, r'$rotated : %.4f rad$'%angle, fontsize=15)
dirname = "./movie/"
if not os.path.exists(dirname):
os.makedirs(dirname)
plt.savefig(dirname+'{:4.0f}'.format(i)+'.tif')
plt.close()
progressbar.progressbar_tty(i, framenumber, 1)
print(Fore.CYAN)
print "Total running time:", time.time()-start, "seconds"
print(Style.RESET_ALL)
print "finished!"
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,822
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/red_amber_green/amber_green.py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
cmap = plt.get_cmap('tab10')
x = np.arange(0,20, 0.001)
red = 1+np.cos(4*np.pi*(x+0.630/4)/0.630)
amber = 1+ np.cos(4*np.pi*(x+0.59/4)/0.590)
green = 1+ np.cos(4*np.pi*(x+0.534/4)/0.534)
#plt.plot(x, red+amber)
#plt.plot(x, amber+green)
plt.title('green and amber')
#plt.plot(x, red, color=cmap(3))
plt.plot(x, green , color=cmap(2))
plt.plot(x, amber, color=cmap(1))
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,823
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/red_amber_green/red_amber_8bit.py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0,20, 0.001)
red = 1+np.cos(4*np.pi*(x+0.630/4)/0.630)
amber = 1+ np.cos(4*np.pi*(x+0*0.59/4)/0.590)
plt.plot(x, red+amber)
plt.title('red and amber 8bit')
plt.plot(x, red, 'r')
plt.plot(x, amber, 'y')
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,824
|
hemengf/my_python_lib
|
refs/heads/master
|
/door_position/disks.py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from boundaryv.brownian_gas import findnearest
class Particle:
def __init__(self):
self.position = np.array([0.,0.])
self.velocity = np.array([0.,0.])
self.repelforce = np.zeros(2)
def accelerate(self, acceleration):
self.velocity += acceleration
def move(self,velocity):
self.position += self.velocity
class Disk(Particle):
def __init__(self, mass, radius):
Particle.__init__(self) # __init__ of base class is overwritten by subclass __init__
self.radius = radius
self.mass = mass
def touch(particle1pos, particle2pos, particle1size, particle2size):
""" Calculate overlap of 2 particles """
overlap = -np.linalg.norm(particle1pos-particle2pos)+(particle1size + particle2size)
if overlap > 0.:
return overlap
else:
return 0.
def tchbnd(particle_position, radius, boxsize):
# boxsize is a tuple: horizontal, vertical
tchbndlist = [0,0,0,0] # [W,N,E,S]
xtemp = particle_position[0]
ytemp = particle_position[1]
if xtemp<=radius:
tchbndlist[0] = 1
if xtemp>=(boxsize[0]-radius):
tchbndlist[2] = 1
if ytemp>=(boxsize[1]-radius):
tchbndlist[1] = 1
if ytemp<=radius:
tchbndlist[3] = 1
return tchbndlist
class Environment:
def __init__(self, boxsize, lower_doorbnd, upper_doorbnd, totnum, dt, repel_coeff, friction_coeff, belt_velocity):
# boxsize is a tuple: horizontal, vertical
# lower_doorbnd is a np array coordinate
self.boxsize = boxsize
self.lower_doorbnd = lower_doorbnd
self.upper_doorbnd = upper_doorbnd
self.totnum = totnum
self.particle_position_array = np.empty((self.totnum,2))
self.particle_position_array[:] = np.nan
self.particle_list = [0]*self.totnum
self.dt = dt
self.repel_coeff = repel_coeff
self.friction_coeff = friction_coeff
self.belt_velocity = belt_velocity
def create_disks(self, mass, radius):
print 'Creating particles...'
for n in range(0,self.totnum):
overlap = 1
out_of_bnd = 1
while overlap or out_of_bnd:
disk = Disk(mass, radius)
disk.position[0] = np.random.uniform(radius, self.boxsize[0]-radius)
disk.position[1] = np.random.uniform(radius, self.boxsize[1]-radius)
try:
nearest_idx = findnearest(disk.position, self.particle_position_array)
overlap = touch(disk.position, self.particle_position_array[nearest_idx], radius, radius)
tchbndlist = tchbnd(disk.position, disk.radius, self.boxsize)
out_of_bnd = sum(tchbndlist)
except ValueError:
# just for the first particle creation, self.particle_position_array could be all nan, which would raise a ValueError when using findnearest
break
self.particle_position_array[n,:] = disk.position
self.particle_list[n] = disk
processbar.processbar(n+1, self.totnum, 1)
def read_positions(self, mass, radius):
self.particle_position_array = np.load('initial_positions_real_try.npy')
for n in range(0, self.totnum):
disk = Disk(mass, radius)
disk.position = self.particle_position_array[n,:]
self.particle_list[n] = disk
def visualize(self):
fig = plt.figure(figsize=(8.0,5.0))
for disk in self.particle_list:
circle = plt.Circle(disk.position, disk.radius, fill = False, linewidth=0.3)
fig.gca().add_artist(circle)
plt.plot((0,0),(0,self.lower_doorbnd[1]), 'k', linewidth=0.3)
plt.plot((0,0),(self.upper_doorbnd[1], self.boxsize[1]), 'k', linewidth=0.3)
plt.axis([-0.3*self.boxsize[0],self.boxsize[0], 0,self.boxsize[1]])
plt.axes().set_aspect('equal')
#plt.show()
def assign_repel(self):
repel_list = []
overlap_list = []
overlapsum = 0.
for particle in self.particle_list:
particle.repelforce = np.zeros(2)
# Clear assigned forces from the last iteration.
for n, particle in enumerate(self.particle_list):
for i, particle_position in enumerate(self.particle_position_array):
if i != n: # Exclude itself
overlap = touch(particle.position, particle_position, particle.radius, particle.radius)
unit_vector = (particle.position-particle_position)/np.linalg.norm((particle.position-particle_position))
particle.repelforce += self.repel_coeff * unit_vector * overlap
overlapsum += overlap
repel_list.append(particle.repelforce[0])
repel_list.append(particle.repelforce[1])
overlap_list.append(overlapsum)
return repel_list, overlap_list
def assign_beltfriction(self):
friction_list = []
for n, particle in enumerate(self.particle_list):
unit_vector = (self.belt_velocity-particle.velocity)/np.linalg.norm((self.belt_velocity-particle.velocity))
particle.beltfriction = 9.8 * particle.mass * self.friction_coeff * unit_vector
friction_list.append(particle.beltfriction[0])
friction_list.append(particle.beltfriction[1])
return friction_list
def wall_interact(self):
for particle in self.particle_list:
if particle.position[0]<=particle.radius and particle.position[1]<=self.upper_doorbnd[1] and particle.position[1]>=self.lower_doorbnd[1]: # takes care of the situation when a particle hits the corners of the doorbnd
if np.linalg.norm(particle.position-self.lower_doorbnd) <= particle.radius and particle.position[1]>=self.lower_doorbnd[1]:
unit_vector = -(particle.position-self.lower_doorbnd)/np.linalg.norm(particle.position-self.lower_doorbnd)
normal_velocity = np.dot(particle.velocity,unit_vector)
if normal_velocity > 0:
particle.velocity = particle.velocity - unit_vector * normal_velocity
if np.linalg.norm(particle.position-self.upper_doorbnd) <= particle.radius and particle.position[1]<=self.upper_doorbnd[1]:
unit_vector = -(particle.position-self.upper_doorbnd)/np.linalg.norm(particle.position-self.upper_doorbnd)
normal_velocity = np.dot(particle.velocity,unit_vector)
if normal_velocity > 0:
particle.velocity = particle.velocity - unit_vector * normal_velocity
elif particle.position[0] > 0.: # takes care of the situation when a particle hits other part of the wall
tchbndlist = tchbnd(particle.position, particle.radius, self.boxsize)
if tchbndlist[0] * particle.velocity[0] < 0.:
particle.velocity[0] = 0.
if tchbndlist[2] * particle.velocity[0] > 0.:
particle.velocity[0] = 0.
if tchbndlist[1] * particle.velocity[1] > 0.:
particle.velocity[1] = 0.
if tchbndlist[3] * particle.velocity[1] < 0.:
particle.velocity[1] = 0.
def accelerate(self):
for particle in self.particle_list:
particle.force = particle.beltfriction + particle.repelforce
particle.velocity += self.dt*particle.force/particle.mass
def move(self):
for n, particle in enumerate(self.particle_list):
particle.position += self.dt*particle.velocity
self.particle_position_array[n,:] = particle.position
def update(self):
repel_list, overlap_list = self.assign_repel()
#f = open('./resultsfile.txt', 'a')
#print >> f, ''.join('{:<+10.2f}'.format(e) for e in repel_list)
friction_list = self.assign_beltfriction()
#f = open('./resultsfile.txt', 'a')
#print >> f, ''.join('{:<+10.2f}'.format(e) for e in friction_list)
#result_list = overlap_list + repel_list+friction_list
#f = open('./resultsfile.txt', 'a')
#print >> f, ''.join('{:<+7.1f}'.format(e) for e in result_list)
self.accelerate()
self.wall_interact()
self.move()
def measure_pass(self):
pass_number = sum(e<0 for e in self.particle_position_array[:,0])
return pass_number
if __name__ == '__main__':
import matplotlib.pyplot as plt
import processbar
import os
import subprocess
import time
start = time.time()
open('resultsfile.txt', 'w').close()
env = Environment(boxsize=(0.6,0.4), \
lower_doorbnd=np.array([0,0]), \
upper_doorbnd=np.array([0,0.06]), \
totnum=500, \
dt=0.005, \
repel_coeff=100, \
friction_coeff=0.5, \
belt_velocity=np.array([-0.02,0]))
#env.create_disks(mass = 0.005, radius = 0.010)
env.read_positions(mass = 0.005, radius = 0.010)
for disk in env.particle_list:
print disk.position
totframe = 1200
passnumber_list = []
for i in range(totframe):
env.update()
if i%3==0:
env.visualize()
plt.savefig('./movie_try/'+'{:4.0f}'.format(i)+'.tif', dpi = 200)
plt.close()
pass_number = env.measure_pass()
passnumber_list.append(pass_number)
#if i == 2000:
# np.save('initial_positions_real_try', env.particle_position_array)
processbar.processbar(i+1, totframe, 1)
#subprocess.call('less resultsfile.txt', shell=False)
g = open('passnumber.txt', 'w')
print >> g, passnumber_list
np.save('passnumber_list_real', passnumber_list)
end = time.time()
print end-start
#plt.plot(passnumber_list)
#plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,825
|
hemengf/my_python_lib
|
refs/heads/master
|
/door_position/fluid/data_plot.py
|
import matplotlib.pyplot as plt
import numpy as np
fig, ax = plt.subplots()
x1,y1 = np.loadtxt('data_center.txt', delimiter=',', unpack = True)
ax.plot(x1, y1, 'x', color = 'r')
x2,y2 = np.loadtxt('data_wall.txt', delimiter=',', unpack=True)
ax.plot(x2, y2, '+', color = 'g')
plt.axis([0,4, 20, 70])
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,826
|
hemengf/my_python_lib
|
refs/heads/master
|
/partial_derivative.py
|
from __future__ import division
import scipy.misc
import numpy as np
def partial_derivative_wrapper(func, var, point):
"""
Returns the partial derivative of a function 'func' with
respect to 'var'-th variable at point 'point'
Scipy hasn't provided a partial derivative function.
This is a simple wrapper from http://stackoverflow.com/questions/20708038/scipy-misc-derivative-for-mutiple-argument-function
func: callable name
var, point: the variable with respect to which and
the point at which partial derivative is needed.
usage:
df(x,y)/dx|(3,2)
partial_derivative(f, 0, [3,2])
CONFUSION: 'point' has to be a list. Using numpy array
doesn't work.
"""
args = point[:]
def reduce_variable(x):
"""
Returns a function where all except the 'var'-th variable
take the value of 'args'.
"""
args[var] = x
return func(*args)
return scipy.misc.derivative(reduce_variable, point[var], dx=1e-6)
def derivative(f, x, dx=1e-6):
return (f(x+dx)-f(x))/dx
def partial_derivative(f, x, y, dx=1e-6, dy=1e-6):
"""
Usage:
for N points simultaneously:
partial_derivative(f, *'Nx2 array of points'.T)
returns=np.array ([[df/dx1,df/dy1],
[df/dx2,df/dy2],
[df/dx3,df/dy3]
.
.
.
[df/dxN,df/dyN]])
for 1 point:
partial_derivative(f, *np.array([3,2]))
returns np.array([df/dx,df/dy])
"""
dfdx = (f(x+dx,y)-f(x,y))/dx
dfdy = (f(x,y+dy)-f(x,y))/dy
#try:
# result = np.empty((len(x),2))
# result[:,0] = dfdx
# result[:,1] = dfdy
#except TypeError:
# result = np.empty((2,))
# result[0] = dfdx
# result[1] = dfdy
result = np.array((dfdx, dfdy))
return result.T
if __name__ == "__main__":
import time
import numpy as np
def g(x):
return x**2
def f(x,y):
return x**2 + y**3
# df/dx should be 2x
# df/dy should be 3y^2
start = time.time()
result = partial_derivative(f,*np.array([[3,1], [3,1],[3,2],[1,2],[0,2]]).T)
result2 = partial_derivative(f, *np.array([3,1]))
result3 = derivative(g,np.array([1,2,3]))
print time.time()-start
print "vectorized:", result
print "single argument:", result2, type(result2)
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,827
|
hemengf/my_python_lib
|
refs/heads/master
|
/water_glycerol.py
|
from __future__ import division
import numpy as np
from scipy.optimize import fsolve
def mu(Cm,T):
a = 0.705-0.0017*T
b = (4.9+0.036*T)*np.power(a,2.5)
alpha = 1-Cm+(a*b*Cm*(1-Cm))/(a*Cm+b*(1-Cm))
mu_water = 1.790*np.exp((-1230-T)*T/(36100+360*T))
mu_gly = 12100*np.exp((-1233+T)*T/(9900+70*T))
return np.power(mu_water,alpha)*np.power(mu_gly,1-alpha)
def glycerol_mass(T,target_viscosity=200):
def mu_sub(Cm,T):
return mu(Cm,T)-target_viscosity
x = fsolve(mu_sub,1,args=(T),xtol=1e-12)
return x
Temperature = 22.5
Target_viscosity = 100
print 'glycerol mass fraction %0.3f%%'%(glycerol_mass(Temperature,Target_viscosity)[0]*100)
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,828
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/ffttest.py
|
from scipy import fftpack
import cv2
import matplotlib.pyplot as plt
import numpy as np
img = cv2.imread('ideal.tif',0)
absfft2 = np.abs(fftpack.fft2(img))[2:-2,2:-2]
absfft2 /= absfft2.max()
print absfft2.max()
plt.imshow(absfft2)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,829
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/test_peak.py
|
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import cookb_signalsmooth
intensity = np.load("intensity.npy")
intensity = -intensity
coordinates = np.linspace(-500,500,300)
plt.plot(coordinates, intensity)
#intensity = cookb_signalsmooth.smooth(intensity, 10)
#plt.plot(coordinates, intensity)
peakind = signal.find_peaks_cwt(intensity, np.arange(20,150))
plt.plot(coordinates[peakind], intensity[peakind],'+', color = 'r')
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,830
|
hemengf/my_python_lib
|
refs/heads/master
|
/cursor.py
|
import ctypes
import time
start_time = time.time()
# see http://msdn.microsoft.com/en-us/library/ms646260(VS.85).aspx for details
ctypes.windll.user32.SetCursorPos(100, 40)
ctypes.windll.user32.mouse_event(2, 0, 0, 0,0) # left down
ctypes.windll.user32.mouse_event(4, 0, 0, 0,0) # left up
ctypes.windll.user32.mouse_event(2, 0, 0, 0,0) # left down
ctypes.windll.user32.mouse_event(4, 0, 0, 0,0) # left up
time_1 = time.time()
print '1st file opened'
ctypes.windll.user32.SetCursorPos(200, 40)
ctypes.windll.user32.mouse_event(2, 0, 0, 0,0) # left down
ctypes.windll.user32.mouse_event(4, 0, 0, 0,0) # left up
ctypes.windll.user32.mouse_event(2, 0, 0, 0,0) # left down
ctypes.windll.user32.mouse_event(4, 0, 0, 0,0) # left up
print '2nd file opened'
time_2 = time.time()
print start_time
print '%.5f' % time_1
print '%.5f' % time_2
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,831
|
hemengf/my_python_lib
|
refs/heads/master
|
/removeholes.py
|
from skimage import morphology
import mahotas as mh
import matplotlib.pyplot as plt
import numpy as np
#label original image, im=uint8(0 and 255), labeled=uint8
im = plt.imread('../../Downloads/image.tif')
labeled, nr_objects = mh.label(im,np.ones((3,3),bool))
print nr_objects
#an example of removing holes. Should use labeled image
im_clean = morphology.remove_small_objects(labeled)
labeled_clean, nr_objects_clean = mh.label(im_clean,np.ones((3,3),bool))
print nr_objects_clean
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,832
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/warptest.py
|
import cv2
import numpy as np
from skimage import transform as tf
import matplotlib.pyplot as plt
img = cv2.imread('sample6.tif',0)
pointset1 = np.genfromtxt('pointset1.csv', delimiter=',', names=True)
pointset2 = np.genfromtxt('pointset2.csv', delimiter=',', names=True)
pointset1 = np.vstack((pointset1['BX'],pointset1['BY'])).T
pointset2 = np.vstack((pointset2['BX'],pointset2['BY'])).T
tform = tf.PiecewiseAffineTransform()
tform.estimate(pointset1, pointset2) # pointset2 will be warped
warped = 255*tf.warp(img, tform)
warped = warped.astype(np.uint8)
plt.imshow(warped)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,833
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/piecewise/plotheight.py
|
from __future__ import division
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy import interpolate
from scipy.signal import savgol_filter as sg
data_img = cv2.imread('sample4.tif',0)
data_img = data_img.astype('float64')
fitimg_whole = np.copy(data_img)
xstore = np.load('./xoptstore_bot.npy').item()
#xstore_badtiles=np.load('xoptstore_badtiles20180513_21_22_42.npy').item()
def surface_polynomial(size, coeff,(zoomfactory,zoomfactorx)):
def poly(x, y):
x*=zoomfactorx
y*=zoomfactory
poly = coeff[0]*x**2+coeff[1]*y**2+coeff[2]*x*y+coeff[3]*x+coeff[4]*y+coeff[5]#+coeff[6]*x**3+coeff[7]*y**3+coeff[8]*x*y**2+coeff[9]*y*x**2
return poly
x = np.linspace(0,size[1]-1, size[1])
y = np.linspace(0,size[0]-1, size[0])
zz = poly(x[None,:],y[:,None])
return zz
#dyy,dxx =int(41*np.tan(np.pi*52/180)),41
dyy,dxx = 81,81
zoomfactory,zoomfactorx = 1,1
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111, projection='3d')
#ax.set_aspect('equal','box')
hslice=[]
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstore:
xopt = xstore[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(data_img.shape[0]-yy,data_img.shape[0]-yy-dyy,-zoomfactory))
height = surface_polynomial((dyy,dxx), xopt,(zoomfactory,zoomfactorx))
if int(xx/dxx) == 25:
hslice.extend(height[:,0])
ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1))
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
#fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
#cv2.imwrite('fitimg_whole.tif', fitimg_whole.astype('uint8'))
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,834
|
hemengf/my_python_lib
|
refs/heads/master
|
/convertcygpath.py
|
import subprocess
filename = "/cygdrive/c/Lib/site-packages/matplotlib"
cmd = ['cygpath','-w',filename]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = proc.stdout.read()
#output = output.replace('\\','/')[0:-1] #strip \n and replace \\
print output
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,835
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/cannytest.py
|
import cv2
from scipy import ndimage as ndi
from skimage import feature
import numpy as np
from matplotlib import pyplot as plt
from skimage import exposure
def equalize(img_array):
"""
returns array with float 0-1
"""
equalized = exposure.equalize_hist(img_array)
return equalized
img = cv2.imread('sample.tif',0)
img = equalize(img)
img = ndi.gaussian_filter(img,1)
edges = feature.canny(img,low_threshold=0.12,high_threshold=0.2)
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(edges,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,836
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/whitespacetest.py
|
import numpy as np
import cv2
img = cv2.imread('test.tif',0)
img = img.astype('float')
img /= 255.
#print img.sum()/(img.shape[0]*img.shape[1])
print img.sum()/len(img.flat)
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,837
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/piecewise/plotheight_interp_whole_1d.py
|
from __future__ import division
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib as mpl
from scipy.signal import savgol_filter as sg
from scipy import interpolate
import os
from progressbar import progressbar_tty as ptty
data_img = cv2.imread('sample4.tif',0)
data_img = data_img.astype('float64')
cl_img = cv2.imread('cl.tif',0)
cl2_img = cv2.imread('cl2_larger.tif',0)
cl3_img = cv2.imread('cl3.tif',0)
edge_img = cv2.imread('cl_edge.tif',0)
thin_img = cv2.imread('thin.tif',0)
cl_img = cl_img.astype('float64')
cl_img /= 255.
cl2_img = cl2_img.astype('float64')
cl2_img /= 255.
cl3_img = cl3_img.astype('float64')
cl3_img /= 255.
edge_img = edge_img.astype('float64')
edge_img /= 255.
thin_img = thin_img.astype('float64')
thin_img /= 255.
fitimg_whole = np.copy(data_img)
xstorebot = np.load('./xoptstore_bot.npy').item()
xstoreright = np.load('./xoptstore_right.npy').item()
xstoreleft = np.load('./xoptstore_left.npy').item()
xstoretopright= np.load('./xoptstore_top_right.npy').item()
xstoretopleft= np.load('./xoptstore_top_left.npy').item()
floor = -86
def surface_polynomial(size, coeff,(zoomfactory,zoomfactorx)):
def poly(x, y):
x*=zoomfactorx
y*=zoomfactory
poly = coeff[0]*x**2+coeff[1]*y**2+coeff[2]*x*y+coeff[3]*x+coeff[4]*y+coeff[5]#+coeff[6]*x**3+coeff[7]*y**3+coeff[8]*x*y**2+coeff[9]*y*x**2
return poly
x = np.linspace(0,size[1]-1, size[1])
y = np.linspace(0,size[0]-1, size[0])
zz = poly(x[None,:],y[:,None])
return zz
fig = plt.figure(figsize=(7.5,7.5))
ax = fig.add_subplot(111, projection='3d')
#ax = fig.add_subplot(111)
#ax.set_aspect(aspect='equal')
ax.set_zlim(1.5*floor,-0.5*floor)
ax.set_xlim(0,data_img.shape[1])
ax.set_ylim(0,data_img.shape[0])
width = 0.8
xxx = []
yyy = []
zzz = []
ddd=1
#bot
dyy,dxx = 81,81
dd=7
zoomfactory,zoomfactorx = 1,1
print 'Plotting patterned areas...'
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstorebot:
xopt = xstorebot[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy,dxx), xopt,(zoomfactory,zoomfactorx))
if ((int(yy/dyy)+1,int(xx/dxx)) not in xstorebot) or ((int(yy/dyy)-1,int(xx/dxx)) not in xstorebot):
pass
xxx+=list(X.flat[::dd])
yyy+=list(Y.flat[::dd])
zzz+=list(height.flat[::dd])
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
#ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
#ax.plot_surface(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=0,cmap = 'ocean',norm= mpl.colors.Normalize(vmin=-90,vmax=1))
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
#right
dyy,dxx =int(41*np.tan(np.pi*52/180)),41
zoomfactory,zoomfactorx = 1,1
dd =20
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if xx > 3850:
continue
if (int(yy/dyy),int(xx/dxx)) in xstoreright:
xopt = xstoreright[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy,dxx), xopt,(zoomfactory,zoomfactorx))
height-=35
xxx+=list(X.flat[::dd])
yyy+=list(Y.flat[::dd])
zzz+=list(height.flat[::dd])
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
#ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
#ax.plot_surface(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=0,cmap = 'ocean',norm= mpl.colors.Normalize(vmin=-90,vmax=1))
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
#left
dyy,dxx =int(42*np.tan(np.pi*53/180)),42
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if xx>1421 or xx<332:
continue
if (int(yy/dyy),int(xx/dxx)) in xstoreleft:
xopt = xstoreleft[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=44
xxx+=list(X.flat[::dd])
yyy+=list(Y.flat[::dd])
zzz+=list(height.flat[::dd])
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
#ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
#ax.plot_surface(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=0,cmap = 'ocean',norm= mpl.colors.Normalize(vmin=-90,vmax=1))
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
#topright
dyy, dxx = 35,42
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstoretopright:
xopt = xstoretopright[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=82
xxx+=list(X.flat[::dd])
yyy+=list(Y.flat[::dd])
zzz+=list(height.flat[::dd])
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
#ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
#ax.plot_surface(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=0,cmap = 'ocean',norm= mpl.colors.Normalize(vmin=-90,vmax=1))
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
else:
pass
#topleft
dyy, dxx = 35,42
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstoretopleft:
xopt = xstoretopleft[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(yy,yy+dyy,zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=80.3
#height*= 1-cl3_img[yy:yy+dyy,xx:xx+dxx]
#height[height==0] = np.nan
xxx+=list(X.flat[::dd])
yyy+=list(Y.flat[::dd])
zzz+=list(height.flat[::dd])
#ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
#ax.plot_surface(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=0,cmap = 'ocean',norm= mpl.colors.Normalize(vmin=-90,vmax=1))
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
xl = np.load('thin/xleft.npy')
yl = np.load('thin/yleft.npy')
zl = np.load('thin/zleft.npy')
xr = np.load('thin/xright.npy')
yr = np.load('thin/yright.npy')
zr = np.load('thin/zright.npy')
#thinpart
print 'Interpolating thin part...'
dxx=1
offsetl = -82-2.84+1.22
offsetr = -82-1.67
if os.path.exists('xxxthin.npy'):
xxxthin=np.load('xxxthin.npy')
yyythin=np.load('yyythin.npy')
zzzthin=np.load('zzzthin.npy')
print 'Thin part loaded from existing interpolation'
else:
xxxthin=[]
yyythin=[]
zzzthin=[]
for xx in range(505,1672,dxx):
slicing = xx
ylslice = [yl[i] for i in range(len(xl)) if xl[i] == slicing]
if len(ylslice)<2:
continue
zlslice = [zl[i]+offsetl for i in range(len(xl)) if xl[i] == slicing]
f = interpolate.interp1d(ylslice,zlslice,kind='linear')
ynew = np.arange(min(ylslice),max(ylslice),10)
znew = f(ynew)
xxxthin.extend([xx]*len(ynew))
yyythin.extend(ynew)
zzzthin.extend(znew)
#ax.plot_wireframe(X,Y,Z,rstride=int(dyy/1),cstride=int(dxx/1),colors='k',lw=0.4)
for xx in range(2579,3703,dxx):
slicing = xx
yrslice = [yr[i] for i in range(len(xr)) if xr[i] == slicing]
if len(yrslice)<2:
continue
zrslice = [zr[i]+offsetr for i in range(len(xr)) if xr[i] == slicing]
f = interpolate.interp1d(yrslice,zrslice,kind='linear')
ynew = np.arange(min(yrslice),max(yrslice),10)
znew = f(ynew)
xxxthin.extend([xx]*len(ynew))
yyythin.extend(ynew)
zzzthin.extend(znew)
#ax.plot_wireframe(X,Y,Z,rstride=int(dyy/1),cstride=int(dxx/1),colors='k',lw=0.4)
print 'Thin part interpolated and saved'
np.save('xxxthin.npy',xxxthin)
np.save('yyythin.npy',yyythin)
np.save('zzzthin.npy',zzzthin)
xxx.extend(xxxthin)
yyy.extend(yyythin)
zzz.extend(zzzthin)
#contact line
print 'Extracting contact line...'
x = []
y = []
xxxinterp=[]
yyyinterp=[]
zzzinterp=[]
for j in range(0,cl_img.shape[1],ddd):
#for j in range(0,2100,ddd):
for i in range(cl_img.shape[0]-1,0,-ddd):
if cl_img[i,j] == 1:
xxx.append(j)
yyy.append(i)
zzz.append(floor)
xxxinterp.append(j)
yyyinterp.append(i)
zzzinterp.append(floor)
x.append(j)
y.append(i)
break
#ptty(j,cl_img.shape[1]/ddd,1)
ax.plot(x,y, 'C1',zs=floor)
#x_edge=[]
#y_edge=[]
#z_edge=[]
#for i in range(0,edge_img.shape[0],2):
# for j in range(0,edge_img.shape[1],2):
# if edge_img[i,j] == 1:
# x_edge.append(j)
# y_edge.append(i)
# z_edge.append(znew[j,i])
#ax.scatter(x_edge,y_edge,z_edge,c='k',s=0.01)
print 'No.of points:', len(yyy)
print 'Longitudinal slicing...'
for slicing in range(0,4200,70):
#for slicing in (1500,1600,1700):
yyyslice = [yyy[i] for i in range(len(xxx)) if xxx[i]==slicing]
zzzslice = [zzz[i] for i in range(len(xxx)) if xxx[i]==slicing]
if len(yyyslice)<4:
continue
zzzslice = [s for _,s in sorted(zip(yyyslice, zzzslice))]#sort zzzslice according to yyyslice
yyyslice = sorted(yyyslice)
duplicates = dict((i,yyyslice.count(s)) for (i,s) in enumerate(np.unique(yyyslice)) if yyyslice.count(s)>1)
for i in duplicates:
zzzslice[i] = np.mean(zzzslice[i:i+duplicates[i]])
zzzslice[i+1:i+duplicates[i]] = [np.nan]*(duplicates[i]-1)
yyyslice = np.unique(yyyslice)
zzzslice = np.array(zzzslice)
zzzslice = zzzslice[~np.isnan(zzzslice)]
try:
f = interpolate.interp1d(yyyslice,zzzslice,kind='cubic')
except:
continue
#zzzslice_smooth = sg(zzzslice, window_length=5,polyorder=2)
#ax.scatter(yyyslice,zzzslice,s=8)
yyynew = np.arange(min(yyyslice),max(yyyslice))
ax.plot(ys=yyynew,zs=f(yyynew),xs=len(yyynew)*[slicing],zdir='z',color="C1")
#ax.plot(yyynew,f(yyynew))
yyyinterp.extend(yyynew)
zzzinterp.extend(f(yyynew))
xxxinterp.extend(len(yyynew)*[slicing])
ptty(slicing,3850,2)
print 'Re-processing contactline for transverse slicing...'
for i in range(0,cl_img.shape[0],ddd):
#for j in range(0,2100,ddd):
for j in range(cl_img.shape[1]-1,int(cl_img.shape[1]*0.3),-ddd):
if cl_img[i,j] == 1:
xxxinterp.append(j)
yyyinterp.append(i)
zzzinterp.append(floor)
x.append(j)
y.append(i)
break
for j in range(0,int(cl_img.shape[1]*0.7),ddd):
if cl_img[i,j] == 1:
xxxinterp.append(j)
yyyinterp.append(i)
zzzinterp.append(floor)
x.append(j)
y.append(i)
break
#ax.plot(x,y, 'C1',zs=floor)
print 'Transverse slicing...'
for slicing in range(300,2800,500):
xxxslice = [xxxinterp[i] for i in range(len(yyyinterp)) if yyyinterp[i]==slicing]
zzzslice = [zzzinterp[i] for i in range(len(yyyinterp)) if yyyinterp[i]==slicing]
if len(xxxslice)<4:
continue
zzzslice = [s for _,s in sorted(zip(xxxslice, zzzslice))]#sort zzzslice according to yyyslice
xxxslice = sorted(xxxslice)
duplicates = dict((i,xxxslice.count(s)) for (i,s) in enumerate(np.unique(xxxslice)) if xxxslice.count(s)>1)
for i in duplicates:
zzzslice[i] = np.mean(zzzslice[i:i+duplicates[i]])
zzzslice[i+1:i+duplicates[i]] = [np.nan]*(duplicates[i]-1)
xxxslice = list(np.unique(xxxslice))
zzzslice = np.array(zzzslice)
zzzslice = zzzslice[~np.isnan(zzzslice)]
zzzslice= list(zzzslice)
a = xxxslice[:-1:2]+[xxxslice[-1]]
b = zzzslice[:-1:2]+[zzzslice[-1]]
try:
f = interpolate.interp1d(a,b,kind='cubic')
except Exception as e:
print e
continue
ptty(slicing,max(range(300,2800,500)),1)
#zzzslice_smooth = sg(zzzslice, window_length=5,polyorder=2)
#
#ax.scatter(yyyslice,zzzslice,s=5)
xxxnew = np.arange(min(xxxslice[::]),max(xxxslice[::]))
ax.plot(xs=xxxnew,zs=f(xxxnew),ys=len(xxxnew)*[slicing],zdir='z',color="C0")
plt.tight_layout()
plt.axis('off')
#cv2.imwrite('fitimg_whole.tif', fitimg_whole.astype('uint8'))
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,838
|
hemengf/my_python_lib
|
refs/heads/master
|
/plotwithsliders.py
|
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from matplotlib.widgets import Button
def sliders_buttons(pararange,parainit,height = 0.08,incremental=0.001):
xslider = plt.axes([0.25,height,0.65,0.03])
slider = Slider(xslider,'para',pararange[0],pararange[1],valinit=parainit,valfmt='%1.3f')
xbuttonminus= plt.axes([0.1,height,0.02,0.03])
xbuttonplus= plt.axes([0.12,height,0.02,0.03])
buttonplus = Button(xbuttonplus,'+')
buttonminus = Button(xbuttonminus,'-')
def incr_slider(val):
slider.set_val(slider.val+incremental)
def decr_slider(val):
slider.set_val(slider.val-incremental)
buttonplus.on_clicked(incr_slider)
buttonminus.on_clicked(decr_slider)
return slider,buttonplus,buttonminus
def plotwithsliders(slider,buttonplus,buttonminus,ax,x,y,mycolor,pararange,parainit):
para = parainit
lines, = ax.plot(x(*para),y(*para),color=mycolor)
def update(arbitrary_arg):
for i in range(len(slider)):
para[i] = slider[i].val
lines.set_xdata(x(*para))
lines.set_ydata(y(*para))
plt.draw()
#fig.canvas.draw_idle()
for i in range(len(slider)):
slider[i].on_changed(update)
return lines
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,839
|
hemengf/my_python_lib
|
refs/heads/master
|
/find_peaks.py
|
from __future__ import division
import numpy as np
import warnings
def exact_local_maxima1D(a):
"""
Compare adjacent elements of a 1D array.
Returns a np array of true values for each element not counting
the first and last element.
Modified from http://stackoverflow.com/questions/4624970/finding-local-maxima-minima-with-numpy-in-a-1d-numpy-array
"""
true_values = np.greater(a[1:-1], a[:-2]) & np.greater(a[1:-1], a[2:])
return true_values
def exact_local_minima1D(a):
true_values = np.less(a[1:-1], a[:-2]) & np.less(a[1:-1], a[2:])
return true_values
def right_edge_local_maxima1D(a):
"""
For the case of plateaus coexisting with peaks.
Returns a boolean array excluding the first and last
elements of the input array.
In case of a plateau, the right edge is considered
a peak position.
"""
warnings.filterwarnings("ignore")
aa = np.copy(a) # make sure input itself won't be modified
diff= np.diff(aa)
smallest_diff = np.min(abs(diff[np.nonzero(diff)]))
aa[diff==0.] -= smallest_diff/2
true_values = np.greater(aa[1:-1], aa[:-2]) & np.greater(aa[1:-1], aa[2:])
return true_values
def left_edge_local_maxima1D(a):
"""
Similar to right_edge_local_maxima2D().
"""
aa = a.copy()
diff = np.diff(aa)
diff = np.insert(diff, 0, 1)
smallest_diff = np.min(abs(diff[np.nonzero(diff)]))
aa[diff==0.] -= smallest_diff/2
true_values = np.greater(aa[1:-1], aa[:-2]) & np.greater(aa[1:-1], aa[2:])
return true_values
def right_edge_local_minima1D(a):
"""
Similar to right_edge_local_maxima1D().
"""
warnings.filterwarnings("ignore")
aa = np.copy(a) # make sure input itself won't be modified
diff= np.diff(aa)
smallest_diff = np.min(abs(diff[np.nonzero(diff)]))
aa[diff==0.] += smallest_diff/2
true_values = np.less(aa[1:-1], aa[:-2]) & np.less(aa[1:-1], aa[2:])
return true_values
def left_edge_local_minima1D(a):
"""
Similar to right_edge_local_minima2D().
"""
aa = a.copy()
diff = np.diff(aa)
diff = np.insert(diff, 0, 1)
smallest_diff = np.min(abs(diff[np.nonzero(diff)]))
aa[diff==0.] += smallest_diff/2
true_values = np.less(aa[1:-1], aa[:-2]) & np.less(aa[1:-1], aa[2:])
return true_values
def find_indices_max(a):
"""
Find indices of local maxima.
Returns a np array of indices.
"""
true_values = exact_local_maxima1D(a)
indices = [i for i,x in enumerate(true_values) if x== True]
indices = np.array(indices) + 1
return indices
def find_indices_min(a):
true_values = exact_local_minima1D(a)
indices = [i for i,x in enumerate(true_values) if x== True]
indices = np.array(indices) + 1
return indices
def find_indices_all(a):
"""
Find indices of all local extrema.
Returns a np array of indices.
"""
true_values_max = exact_local_maxima1D(a)
true_values_min = exact_local_minima1D(a)
true_values = true_values_max | true_values_min
indices = [i for i,x in enumerate(true_values) if x== True]
indices = np.array(indices) + 1
return indices
def left_find_indices_max(a):
true_values = left_edge_local_maxima1D(a)
indices = [i for i,x in enumerate(true_values) if x== True]
indices = np.array(indices) + 1
return indices
def left_find_indices_min(a):
true_values = left_edge_local_minima1D(a)
indices = [i for i,x in enumerate(true_values) if x== True]
indices = np.array(indices) + 1
return indices
def right_find_indices_max(a):
true_values = right_edge_local_maxima1D(a)
indices = [i for i,x in enumerate(true_values) if x== True]
indices = np.array(indices) + 1
return indices
def right_find_indices_min(a):
true_values = right_edge_local_minima1D(a)
indices = [i for i,x in enumerate(true_values) if x== True]
indices = np.array(indices) + 1
return indices
def left_find_indices_all(a):
true_values_max = left_edge_local_maxima1D(a)
true_values_min = left_edge_local_minima1D(a)
true_values = true_values_max | true_values_min
indices = [i for i,x in enumerate(true_values) if x== True]
indices = np.array(indices) + 1
return indices
def right_find_indices_all(a):
true_values_max = right_edge_local_maxima1D(a)
true_values_min = right_edge_local_minima1D(a)
true_values = true_values_max | true_values_min
indices = [i for i,x in enumerate(true_values) if x== True]
indices = np.array(indices) + 1
return indices
if __name__ == "__main__":
a = np.array([2,3,1,2,3,2,1,2,3,2,1,2,3,2])
s = exact_local_minima1D(a)
s1 = find_indices_min(a)
s2 = find_indices_max(a)
s3 = find_indices_all(a)
b = np.array([-1,4,4,2,3,3,3,3,2,6,1])
b = b.astype("float")
print "if minima(not counting the first the last element)", s, type(s)
print "min indices:", s1, type(s1)
print "max indices:", s2, type(s2)
print "all peaks:", s3, type(s3)
print left_find_indices_all(b)
print b
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,840
|
hemengf/my_python_lib
|
refs/heads/master
|
/oseen.py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
Rmin = 1
Rmax = 5
R = np.arange(Rmin,Rmax,0.01)
for U in np.arange(0.09,0.136,0.01):
v = 438*1e-6
rhs = np.sqrt(1e6*v*U/9.8)*np.sqrt(2/np.log(7.4*v/(2*R*1e-3*U)))
plt.plot(R, rhs)
plt.plot(R, R)
plt.ylim(Rmin,Rmax)
plt.ylim(Rmin,Rmax)
Re = 6*1e-3*0.1/v
print 'Re = ', Re
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,841
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/failed_pattern_shift2D.py
|
from __future__ import division
import scipy.optimize
import scipy.spatial.distance
import partial_derivative
def shape_function(x,y):
return 0.000005*(x**2+y**2)+68
#return 0.00000001*x + 68
def find_k_refracting(k_incident, x1, n1,n2):
#x1 = [[xa,ya],
# [xb,yb],
# [xc,yc]]
gradient = np.array(partial_derivative.partial_derivative(shape_function, *x1.T))
#gradient= [[df/dxa,df/dya],
# [df/dxb,df/dyb],
# [df/dxc,df/dyc]]
n = np.ones((x1.shape[0], 3))
n[:,:-1] = gradient
norm = np.linalg.norm(n, axis = 1)
n = n/norm[:,np.newaxis] # n is the unit normal vector pointing 'upward'
c = -np.dot(n, k_incident)
r = n1/n2
if ((1-r**2*(1-c**2)) < 0).any():
print "Total internal reflection occurred."
print "1-r**2*(1-c**2) = \n", 1-r**2*(1-c**2)
sys.exit(0)
factor = (r*c- np.sqrt(1-r**2*(1-c**2)))
k_refracting = np.tile(r*k_incident,(x1.shape[0], 1)) + n*factor[:,np.newaxis]
#print "n = ", n
#print 'c =',c
#print "factor", factor
#print "tile", np.tile(r*k_incident,(x1.shape[0], 1))
#print "k_refracting = ", k_refracting
return k_refracting
#@profile
def find_x0(k_incident, x1, n1,n2):
def Fx(x):
k_refracting = find_k_refracting(k_incident, x, n1, n2)
return k_refracting[:,0]*(shape_function(*x1.T)+shape_function(*x.T))+k_refracting[:,2]*(x1-x)[:,0]
def Fy(x):
k_refracting = find_k_refracting(k_incident, x, n1, n2)
return k_refracting[:,1]*(shape_function(*x1.T)+shape_function(*x.T))+k_refracting[:,2]*(x1-x)[:,1]
def F(x):
return 1e5*(Fx(x)**2 + Fy(x)**2)
print "F = ", F(x1)
"""
A FAILED PROJECT.
Having F(x,y,x1,y1) = 0. Easy to root find
1 pair of x,y given 1 pair of x1,y1. Successful
in vectorizing F, making it accept a matrix of
x1,y1.
FAILED IN THE NEXT STEP OF ROOT FINDING.
SCIPY DOESN'T SEEM TO SUPPORT SIMULTANEOUS
ROOT FINDING (vectorization).
"""
x0 = scipy.optimize.root(F,x1)
return x0
def optical_path_diff(k_incident, x1, n1,n2):
x0 = find_x0(k_incident, x1, n1, n2)
p0 = np.concatenate((x0, shape_function(*x0.T)[:,np.newaxis]),axis=1)
p1 = np.concatenate((x1, shape_function(*x1.T)[:,np.newaxis]),axis=1)
p1_image_point = np.concatenate((x1, -shape_function(*x1.T)[:,np.newaxis]),axis=1)
vec_x0x1 = p1-p0
norm = np.linalg.norm(vec_x0x1, axis = 1)
norm[norm == 0] = 1
vec_x0x1 = vec_x0x1/norm[:,np.newaxis]
cos = np.dot(vec_x0x1, k_incident)
dist1 = scipy.spatial.distance.cdist(p0,p1,'euclidean')
dist2 = scipy.spatial.distance.cdist(p0,p1_image_point,'euclidean')
dist1 = np.diagonal(dist1)
dist2 = np.diagonal(dist2)
#print "vec_x0x1 = ", vec_x0x1
#print "cos = ", cos
#print "p0 = ", p0
#print "p1 = ", p1
#print "dist1 = ", dist1
#print "dist2 = ", dist2
OPD_part1 = dist1*cos*n1
OPD_part2 = dist2*n2
OPD = OPD_part2-OPD_part1
return OPD
def pattern(opd):
intensity = 1+np.cos((2*np.pi/0.532)*opd)
return intensity
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
import sys
import processbar
import os
print "starting..."
i = 0
phi = 0
for theta in np.linspace(0.,0.1,1):
fig = plt.figure()
ax = fig.add_subplot(111)
i += 1
opd = optical_path_diff(k_incident = np.array([np.sin(theta)*np.cos(phi),np.sin(theta)*np.sin(phi), -np.cos(theta)]),\
x1 = np.array([[0,10]]),\
n1 = 1.5,\
n2 = 1)
intensity = pattern(opd)
#opd_expected = 2*shape_function(0)*np.cos(np.arcsin(np.sin(angle-0.0000001)*1.5)+0.0000001)
print opd
#print "error in OPD = " ,(opd-opd_expected)/0.532, "wavelength"
#ax.plot(detecting_range, intensity)
#plt.ylim((0,2.5))
#ax.set_xlabel('$\mu m$')
#ax.text(0, 2.2, r'$rotated : %.4f rad$'%angle, fontsize=15)
#dirname = "./movie2D/"
#if not os.path.exists(dirname):
# os.makedirs(dirname)
#plt.savefig(dirname+'{:4.0f}'.format(i)+'.tif')
#plt.close()
#processbar.processbar_tty(i, 100, 1)
print "finished!"
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,842
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/piecewise/plotheight_whole.py
|
from __future__ import division
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
data_img = cv2.imread('sample4.tif',0)
fitimg_whole = np.copy(data_img)
xstorebot = np.load('./xoptstore_bot.npy').item()
xstoreright = np.load('./xoptstore_right.npy').item()
xstoreleft = np.load('./xoptstore_left.npy').item()
xstoretopright= np.load('./xoptstore_top_right.npy').item()
xstoretopleft= np.load('./xoptstore_top_left.npy').item()
#xstore_badtiles=np.load('xoptstore_badtiles20180513_21_22_42.npy').item()
def surface_polynomial(size, coeff,(zoomfactory,zoomfactorx)):
def poly(x, y):
x*=zoomfactorx
y*=zoomfactory
poly = coeff[0]*x**2+coeff[1]*y**2+coeff[2]*x*y+coeff[3]*x+coeff[4]*y+coeff[5]#+coeff[6]*x**3+coeff[7]*y**3+coeff[8]*x*y**2+coeff[9]*y*x**2
return poly
x = np.linspace(0,size[1]-1, size[1])
y = np.linspace(0,size[0]-1, size[0])
zz = poly(x[None,:],y[:,None])
return zz
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111, projection='3d')
#ax.set_aspect('equal','box')
#bot
width=0.8
dyy,dxx = 81,81
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstorebot:
xopt = xstorebot[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(data_img.shape[0]-yy,data_img.shape[0]-yy-dyy,-zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
ax.plot_wireframe(X,Y,height,rstride=int(dyy/2),cstride=int(dxx/2),lw=width)
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
#right
dyy,dxx =int(41*np.tan(np.pi*52/180)),41
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstoreright:
xopt = xstoreright[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(data_img.shape[0]-yy,data_img.shape[0]-yy-dyy,-zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=35
ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
#left
dyy,dxx =int(42*np.tan(np.pi*53/180)),42
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
#if xx>1430:
#continue
if (int(yy/dyy),int(xx/dxx)) in xstoreleft:
xopt = xstoreleft[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(data_img.shape[0]-yy,data_img.shape[0]-yy-dyy,-zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=44
ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
#topright
dyy, dxx = 35,42
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstoretopright:
xopt = xstoretopright[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(data_img.shape[0]-yy,data_img.shape[0]-yy-dyy,-zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=84
ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
#topleft
dyy, dxx = 35,42
zoomfactory,zoomfactorx = 1,1
for yy in range(0,data_img.shape[0]-dyy,dyy):
for xx in range(0,data_img.shape[1]-dxx,dxx):#xx,yy starting upper left corner of patch
if (int(yy/dyy),int(xx/dxx)) in xstoretopleft:
xopt = xstoretopleft[(int(yy/dyy),int(xx/dxx))]
X,Y =np.meshgrid(range(xx,xx+dxx,zoomfactorx),range(data_img.shape[0]-yy,data_img.shape[0]-yy-dyy,-zoomfactory))
height = surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx))
height-=82
ax.plot_wireframe(X,Y,height,rstride=int(dyy/1),cstride=int(dxx/1),lw=width)
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial((dyy/zoomfactory,dxx/zoomfactorx), xopt,(zoomfactory,zoomfactorx)))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
fitimg_whole[yy:yy+dyy,xx:xx+dxx] = 255*generated_intensity
else:
pass
#xopt = xstore_badtiles[(int(yy/dyy),int(xx/dxx))]
#cv2.imwrite('fitimg_whole.tif', fitimg_whole.astype('uint8'))
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,843
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/basinhopping_2steps_version0.py
|
#!/usr/bin/env python
from __future__ import division
import sys
from scipy import interpolate
import time
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import cv2
from skimage import exposure
from scipy.optimize import basinhopping
def equalize(img_array):
"""
returns array with float 0-1
"""
equalized = exposure.equalize_hist(img_array)
return equalized
def difference(data_img, generated_img):
"""
both images have to be 0-1float
"""
diff_value = np.sum((data_img-generated_img)**2)
return diff_value
def surface_polynomial(size, coeff):
def poly(x, y):
poly = coeff[0]*x**2+coeff[1]*y**2+coeff[2]*x*y+coeff[3]*x+coeff[4]*y+coeff[5]
return poly
x = np.linspace(0,size[1]-1, size[1])
y = np.linspace(0,size[0]-1, size[0])
zz = poly(x[None,:],y[:,None])
return zz
def nl(coeff, data_img):
"""
negative likelyhood-like function; aim to minimize this
data_img has to be 0-1float
"""
height = surface_polynomial(data_img.shape,coeff)
expected= 1+ np.cos((4*np.pi/0.532)*height)
expected /= expected.max()#normalize to 0-1float
return difference(data_img, expected)
def accept_test(f_new,x_new,f_old,x_old):
#return True
if abs(x_new[3])>0.15 or abs(x_new[4])>0.15:
return False
else:
return True
def callback(x,f,accept):
#print x
pass
if __name__ == "__main__":
from scipy.ndimage import gaussian_filter
import time
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
N = 30 #a,b value resolution; a, b linear term coeff
sample_size = 0.15#a, b value range
start = time.time()
data_img = cv2.imread('sample.tif', 0)
fitimg = np.copy(data_img)
xstore = {}
dyy,dxx = 100,100
for yy in range(0,1400,dyy):
for xx in range(0,700,dxx):#xx,yy starting upper left corner of patch
patchysize, patchxsize = 100,100
zoomfactory,zoomfactorx = 1,1
data_patch = data_img[yy:yy+patchysize,xx:xx+patchxsize]
data_patch= gaussian_filter(data_patch,sigma=0)
data_patch = data_patch[::zoomfactory,::zoomfactorx]
data_patch= equalize(data_patch)#float0-1
alist = np.linspace(0,sample_size,N) # x direction
blist = np.linspace(-sample_size, sample_size,2*N) # y direction
aa, bb = np.meshgrid(alist,blist)
nl_1storder = np.empty(aa.shape)
for i in np.arange(alist.size):
for j in np.arange(blist.size):
if (j-0.5*len(blist))**2+(i)**2<=(0.2*len(alist))**2:#remove central region to avoid 0,0 global min
nl_1storder[j,i] = np.nan
else:
nl_1storder[j,i] = nl([0,0,0,aa[j,i],bb[j,i],0],data_patch)
sys.stdout.write('\r%i/%i ' % (i*blist.size+j+1,alist.size*blist.size))
sys.stdout.flush()
sys.stdout.write('\n')
elapsed = time.time() - start
print "took %.2f seconds to compute the negative likelihood" % elapsed
index = np.unravel_index(np.nanargmin(nl_1storder), nl_1storder.shape)
index = (alist[index[1]], blist[index[0]])
index = np.array(index)
initcoeff_linear= np.array([0,0,0,index[0],index[1],0])
#print initcoeff_linear
initcoeff_extendlist = []
if (int(yy/dyy)-1,int(xx/dxx)) in xstore:
up = xstore[(int(yy/dyy)-1,int(xx/dxx))]
initcoeff_extendlist.append(np.array([up[0],up[1],up[2],up[2]*dyy+up[3],2*up[1]*dyy+up[4],up[1]*dyy*dyy+up[4]*dyy+up[5]]))
if (int(yy/dyy),int(xx/dxx)-1) in xstore:
left = xstore[(int(yy/dyy),int(xx/dxx)-1)]
initcoeff_extendlist.append(np.array([left[0],left[1],left[2],2*left[0]*dxx+left[3],left[2]*dxx+left[4],left[0]*dxx*dxx+left[3]*dxx+left[5]]))
else:
print 'no calculated neighbours found...'
if len(initcoeff_extendlist) > 0:
initcoeff_extend = np.mean(initcoeff_extendlist,axis=0)
else:
initcoeff_extend = initcoeff_linear
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial(data_patch.shape, initcoeff_linear))
generated_intensity /= generated_intensity.max()
plt.imshow(np.concatenate((generated_intensity,data_patch,(generated_intensity-data_patch)**2),axis=1))
#plt.show()
#initcoeff_extend = np.array([0,0,0,0,0,0])
iternumber = 0
while 1:
print 'iternumber =', iternumber,'for',xx,yy
result = basinhopping(nl, initcoeff_extend, niter = 100, T=100, stepsize=0.0001, interval=20,accept_test=accept_test,minimizer_kwargs={'method': 'Nelder-Mead', 'args': (data_patch)}, disp=True, callback=callback)
if result.fun < 520:
break
else:
initcoeff_extend = result.x
iternumber+=1
if iternumber == 2:
initcoeff_extend = initcoeff_linear
print 'using linear coefficients'
if iternumber == 2:
break
xopt = result.x
xstore[(int(yy/100),int(xx/100))]=xopt
#print xopt
generated_intensity = 1+np.cos((4*np.pi/0.532)*surface_polynomial(data_patch.shape, xopt))
generated_intensity /= generated_intensity.max()
generated_intensity = zoom(generated_intensity,(zoomfactory,zoomfactorx))
#plt.imshow(np.concatenate((generated_intensity,data_patch,(generated_intensity-data_patch)**2),axis=1))
#plt.show()
fitimg[yy:yy+patchysize,xx:xx+patchxsize] = 255*generated_intensity
cv2.imwrite('fitimg.tif', fitimg.astype('uint8'))
print 'time used', time.time()-start, 's'
print 'finished'
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,844
|
hemengf/my_python_lib
|
refs/heads/master
|
/elephantfeet/elephantfeet_generation.py
|
from boundaryv.brownian_gas import touch, findnearest
from door_position.disks import tchbnd
import numpy as np
import matplotlib.pyplot as plt
import os
import progressbar
class Elephant_foot():
def __init__(self, radius, velocity):
self.position = np.array([0.,0.])
self.radius = radius
self.velocity = velocity
def expand(self, dt):
self.radius += self.velocity * dt
class Environment():
def __init__(self, boxsize, totnum, dt, initial_radius, velocity):
self.boxsize = boxsize
self.totnum = totnum
self.foot_list = [0] * self.totnum
self.foot_position_array = np.empty((self.totnum,2))
self.foot_position_array[:] = np.nan
self.dt = dt
self.initial_radius = initial_radius
self.velocity = velocity
def create_feet(self):
print 'Creating elephant feet...'
if os.path.exists('./initial_particles.npy') & os.path.exists('./initial_positions.npy'):
print 'Reading saved initial conditions...'
self.foot_list = np.load('initial_particles.npy')
self.foot_position_array = np.load('initial_positions.npy')
else:
for n in range(0,self.totnum):
out_of_bnd = 1
overlap = 1
while out_of_bnd or overlap:
foot = Elephant_foot(self.initial_radius, self.velocity)
foot.position[0] = np.random.uniform(foot.radius, self.boxsize[0]-foot.radius)
foot.position[1] = np.random.uniform(foot.radius, self.boxsize[1]-foot.radius)
try:
nearest_idx = findnearest(foot.position, self.foot_position_array)
nearest_foot = self.foot_list[nearest_idx]
overlap = touch(foot.position, self.foot_position_array[nearest_idx],foot.radius,nearest_foot.radius)
tchbndlist = tchbnd(foot.position, foot.radius, self.boxsize)
out_of_bnd = sum(tchbndlist)
except ValueError:
break
self.foot_list[n] = foot
self.foot_position_array[n,:] = foot.position
progressbar.progressbar_tty(n+1, self.totnum, 1)
np.save('initial_particles',self.foot_list)
np.save('initial_positions',self.foot_position_array)
def visualize(self):
fig = plt.figure(figsize=(8.0,5.0))
for foot in self.foot_list:
circle = plt.Circle(foot.position, foot.radius, fill = True, linewidth=0.3)
fig.gca().add_artist(circle)
plt.axis([0,self.boxsize[0], 0,self.boxsize[1]])
plt.axes().set_aspect('equal')
plt.savefig('./movie/'+'{:4.0f}'.format(i)+'.tif', dpi = 300)
def expand(self):
for n, footn in enumerate(self.foot_list):
overlap = 0
for i , footi in enumerate(self.foot_list):
if n != i:
overlap += touch(footn.position, footi.position,footn.radius,footi.radius)
tchbndlist = tchbnd(footn.position, footn.radius, self.boxsize)
out_of_bnd = sum(tchbndlist)
#if overlap + out_of_bnd == 0:
if 1:
footn.radius += self.velocity * self.dt
def update(self):
self.expand()
if __name__ == "__main__":
import matplotlib.pyplot as plt
import progressbar
import os
import subprocess
import time
import os
if not os.path.exists('./movie/'):
os.makedirs('./movie/')
start = time.time()
env = Environment(boxsize=(30,30), \
totnum=200, \
dt=0.03, \
initial_radius=0.1, \
velocity=0.5)
env.create_feet()
#env.read_positions(mass = 10, radius = 5)
array = []
totframe = 200
for i in range(totframe):
env.update()
if i%3==0:
env.visualize()
plt.close()
#if i == 1000:
# np.save('initial_positions', env.particle_position_array)
progressbar.progressbar_tty(i+1, totframe, 1)
#subprocess.call('less resultsfilekjk.txt', shell=False)
for foot in env.foot_list:
#print foot.position
array.append(foot.radius)
plt.hist(array,13)
plt.show()
end = time.time()
print end-start
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,845
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/pattern_shift2D.py
|
from __future__ import division
import scipy.optimize
import scipy.spatial.distance
import partial_derivative
import math
#@profile
def shape_function(x,y):
#return np.exp(-0.00002*((x+250)**2+y**2)) + np.exp(-0.00002*((x-250)**2+y**2))+100
return 0.000005*(x**2+y**2)+68.1
#return 0.00000001*x + 68
#@profile
def find_k_refracting(k_incident, x1, n1,n2):
# x1 in the form [x1,y1]
gradient = partial_derivative.partial_derivative(shape_function, *x1)
# gradient in the form [df/dx1,df/dy1]
#n = np.r_[-gradient, 1] adding a column in memory is too slow
n = np.empty((3,))
n[:-1] = -gradient
n[-1] = 1
#print "n = ", n
#print "x1 = ", x1
norm =np.linalg.norm(n)
n = n/norm # n is the unit normal vector pointing 'upward'
c = -np.dot(n, k_incident)
r = n1/n2
sqrtterm = (1-r**2*(1-c**2))
if sqrtterm < 0:
print(Fore.RED)
print "Total internal reflection occurred."
print "1-r**2*(1-c**2) = \n", sqrtterm
print(Style.RESET_ALL)
sys.exit(0)
factor = (r*c- math.sqrt(sqrtterm))
k_refracting = r*k_incident + factor*n
#print 'c =',c
#print "factor", factor
#print "k_refracting = ", k_refracting
return k_refracting
#@profile
def find_x0(k_incident, x1, n1,n2):
# def Fx(x):
# k_refracting = find_k_refracting(k_incident, x, n1, n2)
# return k_refracting[0]*(shape_function(*x1)+shape_function(*x))+k_refracting[2]*(x1-x)[0]
# def Fy(x):
# k_refracting = find_k_refracting(k_incident, x, n1, n2)
# return k_refracting[1]*(shape_function(*x1)+shape_function(*x))+k_refracting[2]*(x1-x)[1]
# def F(x):
# return Fx(x), Fy(x)
def F(x):
k_refracting = find_k_refracting(k_incident, x, n1, n2)
return k_refracting[0]*(shape_function(*x1)+shape_function(*x))+k_refracting[2]*(x1-x)[0], k_refracting[1]*(shape_function(*x1)+shape_function(*x))+k_refracting[2]*(x1-x)[1]
sol = scipy.optimize.root(F,x1)
x0 = sol.x
return x0
#@profile
def optical_path_diff(k_incident, x1, n1,n2):
x0 = find_x0(k_incident, x1, n1, n2)
p0 = np.empty((3,))
p1 = np.empty((3,))
p1_image_point = np.empty((3,))
p0[:-1] = x0
p1[:-1] = x1
p1_image_point[:-1] = x1
p0[-1] = shape_function(*x0)
p1[-1] = shape_function(*x1)
p1_image_point[-1] = -shape_function(*x1)
#p0 = np.r_[x0, shape_function(*x0)]
#p1 = np.r_[x1, shape_function(*x1)]
#p1_image_point = np.r_[x1, -shape_function(*x1)]
vec_x0x1 = p1-p0
norm = np.linalg.norm(vec_x0x1)
if norm == 0:
norm = 1
vec_x0x1 = vec_x0x1/norm
cos = np.dot(vec_x0x1, k_incident)
dist1 = np.linalg.norm(p0-p1)
dist2 = np.linalg.norm(p0-p1_image_point)
#print "vec_x0x1 = ", vec_x0x1
#print "cos = ", cos
#print "p0 = ", p0
#print "p1 = ", p1
#print "dist1 = ", dist1
#print "dist2 = ", dist2
OPD_part1 = dist1*cos*n1
OPD_part2 = dist2*n2
OPD = OPD_part2-OPD_part1
return OPD
#@profile
def pattern(opd):
intensity = 1+np.cos((2*np.pi/0.532)*opd)
return intensity
if __name__ == "__main__":
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.mlab import griddata
import numpy as np
import sys
import progressbar
import os
from itertools import product
import time
from colorama import Style, Fore
start = time.time()
print "starting..."
i = 0
phi = 0
framenumber = 50
for theta in np.linspace(0.,0.065,framenumber):
i += 1
pltnumber = 100
pltlength = 300
coordinates = np.array(list(product(np.linspace(-pltlength,pltlength,pltnumber), np.linspace(-pltlength, pltlength, pltnumber))))
q = 0
intensity = np.zeros((coordinates.shape[0], ))
for detecting_point in coordinates:
opd = optical_path_diff(k_incident = np.array([np.sin(theta)*np.cos(phi),np.sin(theta)*np.sin(phi), -np.cos(theta)]),\
x1 = detecting_point,\
n1 = 1.5,\
n2 = 1)
intensity[q] = pattern(opd)
q+=1
#opd_expected = 2*shape_function(0)*np.cos(np.arcsin(np.sin(angle-0.0000001)*1.5)+0.0000001)
#print pattern(opd)
#print "error in OPD = " ,(opd-opd_expected)/0.532, "wavelength"
X = coordinates[:,0].reshape((pltnumber,pltnumber))
Y = coordinates[:,1].reshape((pltnumber,pltnumber))
Z = intensity.reshape((pltnumber, pltnumber))
fig = plt.figure(num=None, figsize=(6, 6), dpi=60, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111, projection='3d')
#ax.set_xlabel('$x,\mu m$')
#ax.set_ylabel('$y,\mu m$')
#ax.set_zlim(0,4)
#ax.set_zticks([0,2,4])
ax.plot_wireframe(X,Y,Z,linewidth=0.6, color='k',ccount=80,rcount=80)
ax.elev = 85
ax.azim = 0
#ax.text(0, 2.2, r'$rotated : %.4f rad$'%theta, fontsize=15)
dirname = "./movie2D2/"
if not os.path.exists(dirname):
os.makedirs(dirname)
plt.tight_layout()
plt.axis('off')
#plt.show()
plt.savefig(dirname+'{:4.0f}'.format(i)+'.tif',bbox_inches='tight',pad_inches=0)
plt.close()
progressbar.progressbar_tty(i, framenumber, 1)
print "finished!"
print(Fore.CYAN)
print "Total running time:", time.time()-start, 'seconds'
print(Style.RESET_ALL)
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,846
|
hemengf/my_python_lib
|
refs/heads/master
|
/progressbar.py
|
from __future__ import division
from ctypes import windll, create_string_buffer
import time
import sys
import struct
import subprocess
def progressbar_win_console(cur_iter, tot_iter, deci_dig):
"""
Presents the percentage and draws a progress bar.
Import at the begining of a file. Call at the end of a loop.
cur_iter: current iteration number. Counted from 1.
tot_iter: total iteration number.
deci_dig: decimal digits for percentage number.
Works for windows type console.
"""
csbi = create_string_buffer(22)
h = windll.kernel32.GetStdHandle(-11)
res = windll.kernel32.GetConsoleScreenBufferInfo(h,csbi)
(_,_,_,_,_,left,_,right,_,_,_) = struct.unpack('11h',csbi.raw)
# Grab console window width.
# Modified from http://stackoverflow.com/questions/17993814/why-the-irrelevant-code-made-a-difference
console_width = right-left+1
bar_width = int(console_width * 0.8)
tot_dig = deci_dig + 4 # to make sure 100.(4 digits) + deci_dig
percentage = '{:{m}.{n}f}%'.format(cur_iter*100/tot_iter, m = tot_dig, n = deci_dig)
numbar = bar_width*cur_iter/tot_iter
numbar = int(numbar)
sys.stdout.write(percentage)
sys.stdout.write("[" + unichr(0x2588)*numbar + " "*(bar_width-numbar) + "]")
sys.stdout.flush()
sys.stdout.write('\r')
if cur_iter == tot_iter:
sys.stdout.write('\n')
def progressbar_tty(cur_iter, tot_iter, deci_dig):
"""
Presents the percentage and draws a progress bar.
Import at the begining of a file. Call at the end of a loop.
cur_iter: current iteration number. Counted from 1.
tot_iter: total iteration number.
deci_dig: decimal digits for percentage number.
Works for linux type terminal emulator.
"""
#rows, columns = subprocess.check_output(['stty', 'size']).split()
# Grab width of the current terminal.
# Modified from http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
# won't work inside vim using "\r"
columns = subprocess.check_output(['tput','cols'])
rows = subprocess.check_output(['tput','lines'])
columns = int(columns)
bar_width = int(columns* 0.8)
tot_dig = deci_dig + 4 # to make sure 100.(4 digits) + deci_dig
percentage = '{:{m}.{n}f}%'.format(cur_iter*100/tot_iter, m = tot_dig, n = deci_dig)
numbar = bar_width*cur_iter/tot_iter
numbar = int(numbar)
sys.stdout.write(percentage)
sys.stdout.write("[" + u'\u2588'.encode('utf-8')*numbar + " "*(bar_width-numbar) + "]")
sys.stdout.flush()
sys.stdout.write('\r')
if cur_iter == tot_iter:
sys.stdout.write('\n')
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,847
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/simulated_annealing_bak.py
|
#!/usr/bin/env python
from __future__ import division, print_function
import sys
from scipy import interpolate
import time
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import cv2
from skimage import exposure
def normalize(img_array,normrange):
#elementmax = np.amax(img_array)
#elementmin = np.amin(img_array)
#ratio = (elementmax-elementmin)/normrange
#normalized_array = (img_array-elementmin)/(ratio+0.00001)
test = exposure.equalize_hist(img_array)
return test
def difference(reference_img, generated_img, normrange):
reference_img = normalize(reference_img, normrange)
generated_img = normalize(generated_img, normrange)
diff_value = np.sum((reference_img-generated_img)**2)
return diff_value
def vary_surface_polynomial(size, max_variation, coeff):
def poly(x, y):
poly = max_variation*(coeff[0]*x+coeff[1]*y)
return poly
x = np.linspace(0,size[0]-1, size[0])
y = np.linspace(0,size[1]-1, size[1])
zz = poly(x[:,None],y[None, :])
return zz
def nll(ab, max_variation, data, normrange):
#data = normalize(data, normrange)
height = vary_surface_polynomial(data.shape, max_variation, ab)
#expected = normalize(1+np.cos((2/0.532)*height), normrange)
expected = 1+np.cos((2/0.532)*height)
# normalize to [0,1]
expected /= expected.max()
return difference(data, expected, normrange)
if __name__ == "__main__":
from scipy.optimize import fmin
import time
normrange=1
N = 50
sample_size = 15
t0 = time.time()
max_variation = 0.012
reference_intensity = cv2.imread('crop.tif', 0)
reference_intensity = normalize(reference_intensity,1)
#cv2.imwrite('normalized_crop.tif',255*reference_intensity)
alist = np.linspace(0,sample_size,N) # x direction
blist = np.linspace(-sample_size, sample_size,2*N) # y direction
aa, bb = np.meshgrid(alist,blist)
diff = np.empty(aa.shape)
for i in np.arange(alist.size):
for j in np.arange(blist.size):
if (j-0.5*len(blist))**2+(i)**2<=(0.*len(alist))**2:
diff[j,i] = np.nan
else:
diff[j,i] = nll((aa[j,i],bb[j,i]),max_variation,reference_intensity,1.0)
sys.stdout.write('\r%i/%i ' % (i*blist.size+j+1,alist.size*blist.size))
sys.stdout.flush()
sys.stdout.write('\n')
elapsed = time.time() - t0
print("took %.2f seconds to compute the likelihood" % elapsed)
index = np.unravel_index(np.nanargmin(diff), diff.shape)
print(diff[index])
index = (alist[index[1]], blist[index[0]])
index = np.array(index)
print(index)
xopt= fmin(nll, index, args = (max_variation, reference_intensity, normrange), initial_simplex=[index, index+(0,0.01), index+(0.01,0)])
print(xopt)
fig = plt.figure()
#plt.contour(aa, bb, diff, 100)
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(aa,bb,diff)
plt.ylabel("coefficient a")
plt.xlabel("coefficient b")
#plt.gca().set_aspect('equal', adjustable = 'box')
#plt.colorbar()
plt.show()
generated_intensity = normalize(1+np.cos((2/0.532)*vary_surface_polynomial(reference_intensity.shape, max_variation, index)), 1.0)#works for n=1 pocket
#cv2.imwrite('ideal_pattern.tif', 255*generated_intensity)
cv2.imshow('', np.concatenate((generated_intensity, reference_intensity), axis = 1))
cv2.waitKey(0)
#ax = fig.add_subplot(111, projection = '3d')
#ax.plot_surface(xx[::10,::10], yy[::10,::10], zz[::10,::10])
#plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,848
|
hemengf/my_python_lib
|
refs/heads/master
|
/envelope.py
|
from __future__ import division
import numpy as np
from scipy.signal import savgol_filter as sg
from scipy.interpolate import interp1d
from skimage.measure import profile_line as pl
from find_peaks import left_find_indices_min as minindices
from find_peaks import left_find_indices_max as maxindices
import sys
import time
import os
def meandata(img,(startx,starty)=(2042,1674),R=1067,a=167,da=20,dda=1,savename="mdatatemp"):
"""
R profile length
a angle
da averaging angle
dda averaging stepping size
"""
if os.path.exists(savename):
data = np.load(savename)
mdata = np.mean(data,axis=0)
else:
for i,angle in enumerate(np.arange(a,a+da,dda)):
endx = startx+np.cos(angle*np.pi/180)*R
endy = starty-np.sin(angle*np.pi/180)*R
#endx,endy pixel/imagej coord, need to reverse for scipy/numpy use
if i == 0:
data = pl(img,(starty,startx),(endy,endx),order=0)
length = len(data)
else:
#start = time.time()
data = np.vstack((data,pl(img,(starty,startx),(endy,endx),order = 3)[:length]))
#sys.stdout.write('\r'+"averaging: %d/%d, takes %fs"%(i+1,len(np.arange(a,a+da,dda)),time.time()-start))
#np.save(savename,data)
mdata = np.mean(data,axis=0)
stddata = np.std(data,axis=0)
return mdata,stddata
def symmetricmeandata(img,(startx,starty)=(2042,1674),R=1067,a=167,da=20,dda=1,savename="mdatatemp",compare='off',ref=2000):
"""
symmetric version of meandata()
"""
if os.path.exists(savename):
data = np.load(savename)
mdata = np.mean(data,axis=0)
else:
for i,angle in enumerate(np.arange(a,a+da,dda)):
endx = startx+np.cos(angle*np.pi/180)*R
endy = starty-np.sin(angle*np.pi/180)*R
#actually starting from not the center but from the symmetric end point
sstartx = 2*startx-endx
sstarty = 2*starty-endy
if i == 0:
data = pl(img,(sstarty,sstartx),(endy,endx),order=0)
length = len(data)
else:
#start = time.time()
data = np.vstack((data,pl(img,(sstarty,sstartx),(endy,endx),order = 3)[:length]))
if compare == 'on' and i < int(da/dda) :
stddata = np.std(data,axis=0)
if np.sqrt(i+1)*stddata.sum()> ref:
#stop stacking more angles if std is already larger than a criterion; useful in some special cases e.g.wanna 'scan' 360 degrees to see if the profiles will be similar (concentric rings), if std is already very large before hitting 360 no need to keep profiling. the sqrt part is to account for std decrease as 1/sqrt(N)
return -1,-1
#sys.stdout.write('\r'+"averaging: %d/%d, takes %fs"%(i+1,len(np.arange(a,a+da,dda)),time.time()-start))
#np.save(savename,data)
mdata = np.mean(data,axis=0)
stddata = np.std(data,axis=0)
return mdata,stddata
def normalize_envelope(mdata,smoothwindow=19,splineorder=2,envelopeinterp='quadratic'):
"""
x is the maximum range where envelop fitting is possible
"""
s = sg(mdata,smoothwindow,splineorder)
upperx = maxindices(s)
#uppery = np.maximum(mdata[upperx],s[upperx])
uppery = mdata[upperx]
lowerx = minindices(s)
#lowery = np.minimum(mdata[lowerx],s[lowerx])
lowery = mdata[lowerx]
fupper = interp1d(upperx, uppery, kind=envelopeinterp)
flower = interp1d(lowerx, lowery, kind=envelopeinterp)
x = np.arange(max(min(upperx),min(lowerx)),min(max(upperx),max(lowerx)))
y = mdata[x]
newy = (y-flower(x))/(fupper(x)-flower(x))
return x,newy
if __name__=="__main__":
import numpy as np
import cv2
import matplotlib.pyplot as plt
(startx,starty)=(2042,1674)
R = 1067
a = 167
da = 20
dda = 1
imgred = cv2.imread('warpedred.tif',0)
imggreen = cv2.imread('warpedgreen.tif',0)
imgamber = cv2.imread('DSC_3878.jpg',0)
cmap = plt.get_cmap('tab10')
am = cmap(1)
gr = cmap(2)
rd = cmap(3)
print '\nprocessing red'
mdatared = meandata(imgred,(startx,starty),R,a,da,dda,savename='datared.npy')
xred,newyred = normalize_envelope(mdatared[170:]) #170 is to cut off the flat noisy first dark spot; otherwise envelope fitting won't work (it assumes a nice wavy shape without too many local extrema)
xred+=170 #not necessary; just to make sure xred=0 is center of the rings;wanna make sure all the coordinates throughout the script is consistent so its easier to check for bugs
print '\nprocessing amber'
mdataamber = meandata(imgamber,(startx,starty),R,a,da,dda,savename='dataamber.npy')
xamber,newyamber = normalize_envelope(mdataamber[170:])
xamber+=170
print'\nprocess green'
mdatagreen= meandata(imggreen, (startx,starty),R,a,da,dda,savename='datagreen.npy')
xgreen,newygreen= normalize_envelope(mdatagreen[170:])
xgreen+=170
np.save('xgreen',xgreen)
np.save('newygreen',newygreen)
#plt.plot(mdatared,color=cmap(3))
#plt.plot(mdatagreen,color=cmap(2))
#plt.plot(mdataamber,color=cmap(1))
plt.plot(xred,newyred,color=rd)
plt.plot(xamber,newyamber,color=am)
plt.plot(xgreen,newygreen,color=gr)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,849
|
hemengf/my_python_lib
|
refs/heads/master
|
/boundaryv/draft.py
|
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import random
boxsize = 1000
class Particle:
def __init__(self, particle_pos, size):
self.x = particle_pos[0]
self.y = particle_pos[1]
self.orientation = random.uniform(0,2*np.pi)
self.size = size
def touch(particle1pos, particle2pos, particle1size, particle2size):
if np.linalg.norm(particle1pos-particle2pos) <= particle1size + particle2size:
return 1
else:
return 0
def findnearest(particle, particle_array):
dist_array = np.sum((particle - particle_array)**2, axis=1)
return np.nanargmin(dist_array)
def create_multi_particles(totnum):
boxsize = 1000
particle_array = np.empty((totnum,2))
particle_array[:] = np.NAN
particlesize = 10
x= random.uniform(particlesize, boxsize-particlesize)
y = random.uniform(particlesize, boxsize-particlesize)
particle_array[0,:] = np.asarray((x,y))
for n in range(1,totnum):
touchflg = 1
particlesize = 10
failcount = -1
while touchflg == 1:
failcount+=1
x = random.uniform(particlesize, boxsize-particlesize)
y = random.uniform(particlesize, boxsize-particlesize)
particle = np.asarray((x,y))
nearest_idx = findnearest(particle,particle_array)
touchflg = touch(particle_array[nearest_idx], particle, particlesize, particlesize)
particle_array[n,:] = np.asarray((x,y))
return particle_array, failcount
if __name__ == '__main__':
totnum = 100
particle_array, failcount = create_multi_particles(totnum)
fig = plt.figure()
for n in range(totnum):
circle = plt.Circle((particle_array[n,0], particle_array[n,1]), 10, fill=False)
fig.gca().add_artist(circle)
plt.axis([0,1000,0,1000])
plt.axes().set_aspect('equal')
plt.show()
print failcount
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,850
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/shape_fitting/whole/piecewise/checkconnectivity.py
|
from scipy.ndimage import label as lb
import cv2
import matplotlib.pyplot as plt
img = cv2.imread('cl.tif',0)
labeled_array,num =lb(img,structure=[[1,1,1],[1,1,1],[1,1,1]])
plt.imshow(labeled_array)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,851
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/red_amber_green/red_amber_green_button632.py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import CheckButtons
from find_peaks import find_indices_max as fimax
from find_peaks import find_indices_min as fimin
cmap = plt.get_cmap('tab10')
am = cmap(1)
gr = cmap(2)
rd = cmap(3)
amwvlg = 0.590
rdwvlg = 0.6328
grwvlg = 0.532
x = np.arange(0,30, 0.0009)
red = 1+np.cos(4*np.pi*(x+rdwvlg/4)/rdwvlg)
amber = 1+ np.cos(4*np.pi*(x+amwvlg/4)/amwvlg)
green = 1+ np.cos(4*np.pi*(x+grwvlg/4)/grwvlg)
red8 = 1+np.cos(4*np.pi*x/rdwvlg)
amber8 = 1+ np.cos(4*np.pi*x/amwvlg)
green8 = 1+ np.cos(4*np.pi*x/grwvlg)
fig,ax= plt.subplots()
#for i,ind in enumerate(fimin(amber)):
# ax.annotate('%d'%(i+1),xy=(x[ind],0),xytext=(x[ind],-0.1),color=am)
for i,ind in enumerate(fimin(red)):
ax.annotate('%d'%(i+1),xy=(x[ind],0),xytext=(x[ind],-0.2),color=rd)
ax.annotate('%.3f'%(x[ind]),xy=(x[ind],0),xytext=(x[ind],-0.3),color=rd)
for i,ind in enumerate(fimax(red)):
ax.annotate('%.3f'%(x[ind]),xy=(x[ind],0),xytext=(x[ind],2+0.2),color=rd)
plt.subplots_adjust(bottom=0.2)
lred, = ax.plot(x, red,color=rd,visible=False)
lamber, = ax.plot(x, amber, color=am,visible=False)
lgreen, = ax.plot(x, green, color=gr,visible=False)
lred8, = ax.plot(x, red8,color=rd,visible=False)
lamber8, = ax.plot(x, amber8, color=am,visible=False)
lgreen8, = ax.plot(x, green8, color=gr,visible=False)
#ax.plot(x,amber+green+red)
rax = plt.axes([0.01, 0.4, 0.1, 0.15])
check = CheckButtons(rax, ('red', 'amber', 'green','red8','amber8','green8'), (False, False, False, False, False, False))
def func(label):
if label == 'red':
lred.set_visible(not lred.get_visible())
elif label == 'amber':
lamber.set_visible(not lamber.get_visible())
elif label == 'green':
lgreen.set_visible(not lgreen.get_visible())
if label == 'red8':
lred8.set_visible(not lred8.get_visible())
elif label == 'amber8':
lamber8.set_visible(not lamber8.get_visible())
elif label == 'green8':
lgreen8.set_visible(not lgreen8.get_visible())
plt.draw()
check.on_clicked(func)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,852
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/red_amber_green/green_slider_8bit.py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from plotwithsliders import plotwithsliders as ps
from plotwithsliders import sliders_buttons as sb
from find_peaks import find_indices_max as fimax
from find_peaks import find_indices_min as fimin
cmap = plt.get_cmap('tab10')
am = cmap(1)
gr = cmap(2)
rd = cmap(3)
x = np.arange(0,20, 0.001)
red = 1+np.cos(4*np.pi*(x+0.630/4)/0.630)
amber = 1+ np.cos(4*np.pi*(x+0*0.59/4)/0.590)
fig,ax = plt.subplots()
plt.subplots_adjust(bottom=0.2)
ax.set_ylim(-1,3)
for i,ind in enumerate(fimin(amber)):
ax.annotate('%d'%(i+1),xy=(x[ind],0),xytext=(x[ind],-0.1),color=am)
for i,ind in enumerate(fimin(red)):
ax.annotate('%d'%(i+1),xy=(x[ind],0),xytext=(x[ind],-0.2),color=rd)
pararange = [0.5,0.6]
parainit = 0.532
slider,buttonplus,buttonminus = sb(pararange,parainit)
ax.plot(x, red, color=rd)
ax.plot(x, amber, color=am)
def xgreen(wvlg):
return x
def ygreen(wvlg):
return 1+ np.cos(4*np.pi*(xgreen(wvlg)+wvlg/4)/wvlg)
ps([slider],[buttonplus],[buttonminus],ax,xgreen,ygreen,gr,[pararange],[parainit])
plt.title('amber reversed')
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,853
|
hemengf/my_python_lib
|
refs/heads/master
|
/error_boxes.py
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
def make_error_boxes(ax, xdata, ydata, xerror, yerror, facecolor='r',
edgecolor='#1f77b4', errorcolor='k',alpha=1):
"""
Call function to create error boxes
_ = make_error_boxes(ax, x, y, xerr, yerr)
plt.show()
"""
# Create list for all the error patches
errorboxes = []
# Loop over data points; create box from errors at each point
for x, y, xe, ye in zip(xdata, ydata, xerror.T, yerror.T):
rect = Rectangle((x - xe[0], y - ye[0]), xe.sum(), ye.sum())
errorboxes.append(rect)
# Create patch collection with specified colour/alpha
pc = PatchCollection(errorboxes, facecolor=facecolor, alpha=alpha,
edgecolor=edgecolor,lw=0.5)
# Add collection to axes
ax.add_collection(pc)
# Plot errorbars
artists = ax.errorbar(xdata, ydata, xerr=xerror, yerr=yerror,
fmt='None', ecolor=errorcolor)
return artists
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,854
|
hemengf/my_python_lib
|
refs/heads/master
|
/webscraping/t66y.py
|
# -*- encoding: utf-8 -*-
import urllib
import cfscrape
from bs4 import BeautifulSoup
import re
n = 1
f = open('result.html','w+')
f.write('<!DOCTYPE html>')
f.write('<html>')
f.write('<body>')
for page in range(1,50):
site15 ="http://t66y.com/thread0806.php?fid=15&search=&page=%d"%page
site2 ="http://t66y.com/thread0806.php?fid=2&search=&page=%d"%page
site4 ="http://t66y.com/thread0806.php?fid=4&search=&page=%d"%page
site8 ="http://t66y.com/thread0806.php?fid=8&search=&page=%d"%page
site7 ="http://t66y.com/thread0806.php?fid=7&search=&page=%d"%page
for site in [site15, site2,site4,site8,site7]:
#for site in [site7]:
scraper = cfscrape.create_scraper() # returns a CloudflareScraper instance
# Or: scraper = cfscrape.CloudflareScraper() # CloudflareScraper inherits from requests.Session
html = scraper.get(site)
soup = BeautifulSoup(html.content,'html.parser')
trs = soup.findAll('tr',{'class','tr3 t_one tac'},limit=None)
for tr in trs[3:]:
url = 'http://t66y.com/'+tr.find('td',{'class','tal'}).find('a').get('href')
s = tr.find('td',{'class','tal'}).get_text().encode('utf8')
keywords = ['Beginningofkeywords',\
'Shizuka',\
'管野',\
'菅野',\
'佐々木',\
'佐佐木',\
'sasaki',\
'Sasaki',\
'Rina',\
'Ishihara',\
'石原莉奈',\
#'白木',\
'松岡 ちな',\
'春原未来',\
'Chanel',\
'Karla Kush',\
'Karter',\
'Carter',\
'Sophie Dee',\
'Madison Ivy',\
#'pantyhose',\
#'Pantyhose',\
'nylon',\
#'1080',\
#'Stockings',\
#'絲襪',\
#'丝袜',\
#'黑丝',\
#'襪',\
'小島',\
'神纳花',\
'篠田',\
'Ayumi',\
'trans',\
#'ts',\
'妖',\
'变性',\
#'FHD',\
'Butt3rflyforu',\
'EndofKeywords'\
]
for keyword in keywords:
if keyword in s:
linktext = '<a href="{x}">{y}</a>'.format(x=url,y=s)
print linktext
f.write('<p>'+linktext+'</p>')
#print(s),url,'page =',page,'fid =',site[site.index('=')+1:site.index('&')]
#print n
n+=1
f.write('</body>')
f.write('</html>')
f.close()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,855
|
hemengf/my_python_lib
|
refs/heads/master
|
/boundaryv/trycircle.py
|
import matplotlib.pyplot as plt
circle1=plt.Circle((0,0),.2,color='r')
circle2=plt.Circle((.5,.5),.2,color='b')
circle3=plt.Circle((1,1),.2,color='g',clip_on=False)
fig = plt.gcf()
fig.gca().add_artist(circle1)
fig.gca().add_artist(circle2)
fig.gca().add_artist(circle3)
plt.axis([0,2,0,2])
plt.axes().set_aspect('equal')
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,856
|
hemengf/my_python_lib
|
refs/heads/master
|
/trans_circulation/plot_lambda1vsanglefunction.py
|
import matplotlib.pyplot as plt
import numpy as np
data = np.loadtxt('data_lambda1vsangle')
lambda1 = 0.5*(data[:,2]+data[:,3])
angle = 0.5*(180-data[:,0]+data[:,1])*np.pi/180.
cosangle = np.cos(angle)
sinangle = np.sin(angle)
anglefunction = sinangle/np.power(cosangle,0.33)
plt.scatter(anglefunction, lambda1, s=30, facecolors='none',edgecolors='k')
plt.axis([0,1.5,0,160])
plt.xlabel(r'$\frac{\sin\phi}{\cos^\frac{1}{3}\phi}$',fontsize=20)
plt.ylabel(r'$\lambda_1$',fontsize=20)
plt.gcf().subplots_adjust(bottom = 0.15)
plt.savefig('lambdavsangle.png')
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,857
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/find_center.py
|
from __future__ import division
import find_peaks
import numpy as np
def center_position(intensity, x, center):
left_indices = find_peaks.left_find_indices_all(intensity)
left_x_position = x[left_indices]
left_center_idx = np.abs(left_x_position-center).argmin()
right_indices = find_peaks.right_find_indices_all(intensity)
right_x_position = x[right_indices]
right_center_idx = np.abs(right_x_position-center).argmin()
return (left_x_position[left_center_idx]+right_x_position[right_center_idx])/2
if __name__ == "__main__":
from scipy import signal
import matplotlib.pyplot as plt
intensity = np.load('intensity.npy')
coordinates = np.linspace(-500,500,300)
peak = center_position(intensity,coordinates, 0)
plt.plot(coordinates, intensity)
plt.axvline(x = peak)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,858
|
hemengf/my_python_lib
|
refs/heads/master
|
/trystatus.py
|
import time
import progressbar
for i in range(400):
# work
time.sleep(0.01)
progressbar.progressbar_tty(i,399,3)
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,859
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/red_amber_green/green_slider.py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import CheckButtons
from plotwithsliders import plotwithsliders as ps
from plotwithsliders import sliders_buttons as sb
from find_peaks import find_indices_max as fimax
from find_peaks import find_indices_min as fimin
cmap = plt.get_cmap('tab10')
am = cmap(1)
gr = cmap(2)
rd = cmap(3)
x = np.arange(0,20, 0.001)
red = 1+np.cos(4*np.pi*(x+0.630/4)/0.630)
amber = 1+ np.cos(4*np.pi*(x+0.59/4)/0.590)
fig,ax = plt.subplots()
plt.subplots_adjust(bottom=0.2)
ax.set_ylim(-1,3)
#lred,= ax.plot(x, red, color=rd, visible=False)
lamber, = ax.plot(x, amber, color=am,visible=False)
for i,ind in enumerate(fimin(amber)):
ax.annotate('%d'%(i+1),xy=(x[ind],0),xytext=(x[ind],-0.1),color=am)
for i,ind in enumerate(fimin(red)):
ax.annotate('%d'%(i+1),xy=(x[ind],0),xytext=(x[ind],-0.2),color=rd)
pararange = [0.5,0.6]
parainit = 0.532
slider,buttonplus,buttonminus = sb(pararange,parainit)
def xgreen(wvlg):
return x
def ygreen(wvlg):
return 1+ np.cos(4*np.pi*(xgreen(wvlg)+wvlg/4)/wvlg)
lgreen = ps([slider],[buttonplus],[buttonminus],ax,xgreen,ygreen,gr,[pararange],[parainit])
parainitred = 0.630
pararangered = [0.6,0.7]
sliderred,buttonplusred,buttonminusred = sb(pararangered,parainitred, height=0.12)
def xred(wvlg):
return x
def yred(wvlg):
return 1+ np.cos(4*np.pi*(xred(wvlg)+wvlg/4)/wvlg)
lred = ps([sliderred],[buttonplusred],[buttonminusred],ax,xred,yred,rd,[pararangered],[parainitred])
rax = plt.axes([0.01, 0.4, 0.1, 0.15])
check = CheckButtons(rax, ('red', 'amber', 'green'), (True, False, True))
def func(label):
if label == 'red':
lred.set_visible(not lred.get_visible())
elif label == 'amber':
lamber.set_visible(not lamber.get_visible())
elif label == 'green':
lgreen.set_visible(not lgreen.get_visible())
plt.draw()
check.on_clicked(func)
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,860
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/dataplot.py
|
import matplotlib.pyplot as plt
import numpy as np
framenumber = 50
fig = plt.figure()
ax = fig.add_subplot(111)
d = {}
height_range = range(0,2000,100)
for i in height_range:
d["data%d"%i] = np.load("./output_test/center_array_%d.npy"%i)
d["data%d"%i] = d["data%d"%i][::1]
angles = np.linspace(0,0.06, framenumber)
angles = angles[::1]
plt.plot(angles, d["data%d"%i], 'o-', markersize =i/200)
ax.set_xlabel("rotated angle, $rad$")
ax.set_ylabel("center shift $\mu m$")
#plt.plot([q for q in height_range], [d["data%d"%k][-1] for k in height_range])
#ax.set_xlabel("center height, $\mu m$")
#ax.set_ylabel("center shift, $\mu m$")
plt.show()
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,861
|
hemengf/my_python_lib
|
refs/heads/master
|
/interference_pattern/callable_test.py
|
def perform(args):
x = args[0]
return x, shape_function(args)
def shape_function(x):
return np.sin(x[0])+x[1]
if __name__ == "__main__":
import numpy as np
print perform((1,0,3))
|
{"/interference_pattern/red_amber_green/red_amber_green_button632.py": ["/find_peaks.py"], "/interference_pattern/red_amber_green/green_slider_8bit.py": ["/plotwithsliders.py", "/find_peaks.py"], "/interference_pattern/find_center.py": ["/find_peaks.py"], "/trystatus.py": ["/progressbar.py"], "/interference_pattern/red_amber_green/green_slider.py": ["/plotwithsliders.py", "/find_peaks.py"]}
|
15,915
|
prudhvikumar22/proper_modern_webui_automation
|
refs/heads/master
|
/webui/webui.py
|
try:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
import os
import time
except Exception as e:
print("Module might be missing, See the message----> ", e)
TIME_TO_WAIT = 90
def create_driver(value):
if value == 'CHROME':
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
driver = webdriver.Chrome(options=options)
driver.delete_all_cookies()
#driver.maximize_window()
return driver
elif value == 'FIREFOX':
driver = webdriver.Firefox()
driver.delete_all_cookies()
driver.maximize_window()
return driver
else:
return "Create with values as either CHROME or FIREFOX to initiate the driver"
class WebUI:
def __init__(self, driver):
self.driver = driver
def find_element(self, locator):
return self.driver.find_element(*locator)
def open(self, link):
self.driver.get(link)
def enter(self, locator, data):
#print(*locator, data)
WebDriverWait(self.driver, TIME_TO_WAIT).until(EC.visibility_of_element_located((locator)))
self.driver.find_element(*locator).send_keys(data)
def click(self, locator):
#print(locator)
WebDriverWait(self.driver, TIME_TO_WAIT).until(EC.visibility_of_element_located((locator)))
self.driver.find_element(*locator).click()
def go_to(self, locator):
WebDriverWait(self.driver, TIME_TO_WAIT).until(EC.presence_of_element_located((locator)))
element = self.driver.find_element(*locator)
self.driver.execute_script("arguments[0].click();", element)
def right_click(self, locator):
element = self.driver.find_element(*locator)
action_right_click = ActionChains(self.driver)
action_right_click.context_click(element).perform()
def hover(self, locator):
element = self.driver.find_element(*locator)
action_hover = ActionChains(self.driver)
action_hover.move_to_element(element).perform()
def performance(self, option, locator):
if option == 'visible':
WebDriverWait(self.driver, TIME_TO_WAIT).until(EC.visibility_of_all_elements_located((locator)))
if option == 'invisible':
WebDriverWait(self.driver, TIME_TO_WAIT).until(EC.invisibility_of_element_located((locator)))
|
{"/tests/test_youtube_site.py": ["/tests/conftest.py"], "/tests/conftest.py": ["/webui/webui.py"]}
|
15,916
|
prudhvikumar22/proper_modern_webui_automation
|
refs/heads/master
|
/tests/test_youtube_site.py
|
from tests.conftest import browser
def test_youtube_site_load(driver, browser):
browser.open("http://www.youtube.com")
assert driver.title == "YouTube"
|
{"/tests/test_youtube_site.py": ["/tests/conftest.py"], "/tests/conftest.py": ["/webui/webui.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.