From 56c64e0fbbbd97d01f022d8d6b32607f9d52caa9 Mon Sep 17 00:00:00 2001 From: Andrei Fluerasu Date: Mon, 29 Apr 2024 15:18:36 -0400 Subject: [PATCH 1/6] Update synced with CHX_Software/pyCHX by MR --- pyCHX/Create_Report.py | 3044 ++++++------- pyCHX/chx_compress.py | 2040 ++++----- pyCHX/chx_correlationc.py | 1789 ++++---- pyCHX/chx_generic_functions.py | 6598 ++++++++++++++--------------- pyCHX/chx_xpcs_xsvs_jupyter_V1.py | 3280 ++++++-------- pyCHX/xpcs_timepixel.py | 1431 +++---- 6 files changed, 8093 insertions(+), 10089 deletions(-) diff --git a/pyCHX/Create_Report.py b/pyCHX/Create_Report.py index 5dbe4a1..f434328 100644 --- a/pyCHX/Create_Report.py +++ b/pyCHX/Create_Report.py @@ -1,9 +1,9 @@ -""" +''' Yugang Created at Aug 08, 2016, CHX-NSLS-II Create a PDF file from XPCS data analysis results, which are generated by CHX data analysis pipeline -How to use: +How to use: python Create_Report.py full_file_path uid output_dir (option) An exmplae to use: @@ -11,202 +11,198 @@ python Create_Report.py /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/ af8f66 /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/test/ -""" +''' - -def check_dict_keys(dicts, key): +def check_dict_keys( dicts, key): if key not in list(dicts.keys()): - dicts[key] = "unknown" + dicts[key] = 'unknown' + + +import h5py + +from reportlab.pdfgen import canvas +from reportlab.lib.units import inch, cm , mm +from reportlab.lib.colors import pink, green, brown, white, black, red, blue -import os -import sys -from datetime import datetime -from time import time -import h5py -import numpy as np -import pandas as pds -from PIL import Image -from reportlab.lib.colors import black, blue, brown, green, pink, red, white -from reportlab.lib.pagesizes import A4, letter from reportlab.lib.styles import getSampleStyleSheet -from reportlab.lib.units import cm, inch, mm -from reportlab.pdfgen import canvas +#from reportlab.platypus import Image, Paragraph, Table -from pyCHX.chx_generic_functions import pload_obj +from reportlab.lib.pagesizes import letter, A4 +from pyCHX.chx_generic_functions import (pload_obj ) -# from reportlab.platypus import Image, Paragraph, Table +from PIL import Image +from time import time +from datetime import datetime -def add_one_line_string(c, s, top, left=30, fontsize=11): - if (fontsize * len(s)) > 1000: - fontsize = 1000.0 / (len(s)) - c.setFont("Helvetica", fontsize) - c.drawString(left, top, s) +import sys,os +import pandas as pds +import numpy as np -def add_image_string( - c, imgf, data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top, return_=False -): - image = data_dir + imgf +def add_one_line_string( c, s, top, left=30, fontsize = 11 ): + if (fontsize*len(s )) >1000: + fontsize = 1000./(len(s)) + c.setFont("Helvetica", fontsize ) + c.drawString(left, top, s) + + + +def add_image_string( c, imgf, data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top, return_ = False ): + + image = data_dir + imgf if os.path.exists(image): - im = Image.open(image) - ratio = float(im.size[1]) / im.size[0] - height = img_height - width = height / ratio - # if width>400: + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= img_height + width = height/ratio + #if width>400: # width = 350 # height = width*ratio - c.drawImage(image, img_left, img_top, width=width, height=height, mask=None) + c.drawImage( image, img_left, img_top, width= width,height=height,mask=None) c.setFont("Helvetica", 16) - c.setFillColor(blue) - c.drawString(str1_left, str1_top, str1) + c.setFillColor( blue ) + c.drawString(str1_left, str1_top,str1 ) c.setFont("Helvetica", 12) - c.setFillColor(red) - c.drawString(str2_left, str2_top, "filename: %s" % imgf) + c.setFillColor(red) + c.drawString(str2_left, str2_top, 'filename: %s'%imgf ) if return_: - return height / ratio - + return height/ratio + else: - c.setFillColor(blue) - c.drawString(str1_left, str1_top, str1) - c.setFillColor(red) - c.drawString(str1_left, str1_top - 40, "-->Not Calculated!") - - -class create_pdf_report(object): - - """Aug 16, YG@CHX-NSLS-II - Create a pdf report by giving data_dir, uid, out_dir - data_dir: the input data directory, including all necessary images - the images names should be: - meta_file = 'uid=%s-md'%uid - avg_img_file = 'uid=%s--img-avg-.png'%uid - ROI_on_img_file = 'uid=%s--ROI-on-Image-.png'%uid - qiq_file = 'uid=%s--Circular-Average-.png'%uid - ROI_on_Iq_file = 'uid=%s--ROI-on-Iq-.png'%uid - - Iq_t_file = 'uid=%s--Iq-t-.png'%uid - img_sum_t_file = 'uid=%s--img-sum-t.png'%uid - wat_file= 'uid=%s--Waterfall-.png'%uid - Mean_inten_t_file= 'uid=%s--Mean-intensity-of-each-ROI-.png'%uid - - g2_file = 'uid=%s--g2-.png'%uid - g2_fit_file = 'uid=%s--g2--fit-.png'%uid - q_rate_file = 'uid=--%s--Q-Rate--fit-.png'%uid - - two_time_file = 'uid=%s--Two-time-.png'%uid - two_g2_file = 'uid=%s--g2--two-g2-.png'%uid - - uid: the unique id - out_dir: the output directory - report_type: - 'saxs': report saxs results - 'gisaxs': report gisaxs results - - - Output: - A PDF file with name as "XPCS Analysis Report for uid=%s"%uid in out_dir folder - """ - - def __init__( - self, - data_dir, - uid, - out_dir=None, - filename=None, - load=True, - user=None, - report_type="saxs", - md=None, - res_h5_filename=None, - ): + c.setFillColor( blue ) + c.drawString( str1_left, str1_top, str1) + c.setFillColor(red) + c.drawString( str1_left, str1_top -40, '-->Not Calculated!' ) + + + +class create_pdf_report( object ): + + '''Aug 16, YG@CHX-NSLS-II + Create a pdf report by giving data_dir, uid, out_dir + data_dir: the input data directory, including all necessary images + the images names should be: + meta_file = 'uid=%s-md'%uid + avg_img_file = 'uid=%s--img-avg-.png'%uid + ROI_on_img_file = 'uid=%s--ROI-on-Image-.png'%uid + qiq_file = 'uid=%s--Circular-Average-.png'%uid + ROI_on_Iq_file = 'uid=%s--ROI-on-Iq-.png'%uid + + Iq_t_file = 'uid=%s--Iq-t-.png'%uid + img_sum_t_file = 'uid=%s--img-sum-t.png'%uid + wat_file= 'uid=%s--Waterfall-.png'%uid + Mean_inten_t_file= 'uid=%s--Mean-intensity-of-each-ROI-.png'%uid + + g2_file = 'uid=%s--g2-.png'%uid + g2_fit_file = 'uid=%s--g2--fit-.png'%uid + q_rate_file = 'uid=--%s--Q-Rate--fit-.png'%uid + + two_time_file = 'uid=%s--Two-time-.png'%uid + two_g2_file = 'uid=%s--g2--two-g2-.png'%uid + + uid: the unique id + out_dir: the output directory + report_type: + 'saxs': report saxs results + 'gisaxs': report gisaxs results + + + Output: + A PDF file with name as "XPCS Analysis Report for uid=%s"%uid in out_dir folder + ''' + + def __init__( self, data_dir, uid, out_dir=None, filename=None, load=True, user=None, + report_type='saxs',md=None, res_h5_filename=None ): from datetime import datetime - self.data_dir = data_dir self.uid = uid - self.md = md - # print(md) + self.md = md + #print(md) if user is None: - user = "chx" + user = 'chx' self.user = user if out_dir is None: - out_dir = data_dir + out_dir = data_dir if not os.path.exists(out_dir): os.makedirs(out_dir) - self.out_dir = out_dir - + self.out_dir=out_dir + self.styles = getSampleStyleSheet() self.width, self.height = letter - - self.report_type = report_type - dt = datetime.now() - CurTime = "%02d/%02d/%s/-%02d/%02d/" % (dt.month, dt.day, dt.year, dt.hour, dt.minute) + + self.report_type = report_type + dt =datetime.now() + CurTime = '%02d/%02d/%s/-%02d/%02d/' % ( dt.month, dt.day, dt.year,dt.hour,dt.minute) self.CurTime = CurTime if filename is None: - filename = "XPCS_Analysis_Report_for_uid=%s.pdf" % uid - filename = out_dir + filename - c = canvas.Canvas(filename, pagesize=letter) - self.filename = filename + filename="XPCS_Analysis_Report_for_uid=%s.pdf"%uid + filename=out_dir + filename + c = canvas.Canvas( filename, pagesize=letter) + self.filename= filename self.res_h5_filename = res_h5_filename - # c.setTitle("XPCS Analysis Report for uid=%s"%uid) + #c.setTitle("XPCS Analysis Report for uid=%s"%uid) c.setTitle(filename) self.c = c if load: self.load_metadata() - + def load_metadata(self): - uid = self.uid + uid=self.uid data_dir = self.data_dir - # load metadata - meta_file = "uid=%s_md" % uid - self.metafile = data_dir + meta_file - if self.md is None: - md = pload_obj(data_dir + meta_file) + #load metadata + meta_file = 'uid=%s_md'%uid + self.metafile = data_dir + meta_file + if self.md is None: + md = pload_obj( data_dir + meta_file ) self.md = md - else: + else: md = self.md - # print('Get md from giving md') - # print(md) - self.sub_title_num = 0 + #print('Get md from giving md') + #print(md) + self.sub_title_num = 0 uid_g2 = None uid_c12 = None - if "uid_g2" in list(md.keys()): - uid_g2 = md["uid_g2"] - if "uid_c12" in list(md.keys()): - uid_c12 = md["uid_c12"] - - """global definition""" - - if "beg_OneTime" in list(md.keys()): - beg_OneTime = md["beg_OneTime"] - end_OneTime = md["end_OneTime"] + if 'uid_g2' in list(md.keys()): + uid_g2 = md['uid_g2'] + if 'uid_c12' in list(md.keys()): + uid_c12 = md['uid_c12'] + + '''global definition''' + + if 'beg_OneTime' in list( md.keys()): + beg_OneTime = md['beg_OneTime'] + end_OneTime = md['end_OneTime'] else: beg_OneTime = None end_OneTime = None - - if "beg_TwoTime" in list(md.keys()): - beg_TwoTime = md["beg_TwoTime"] - end_TwoTime = md["end_TwoTime"] + + if 'beg_TwoTime' in list( md.keys()): + beg_TwoTime = md['beg_TwoTime'] + end_TwoTime = md['end_TwoTime'] else: beg_TwoTime = None - end_TwoTime = None - + end_TwoTime = None + + try: - beg = md["beg"] - end = md["end"] - uid_ = uid + "_fra_%s_%s" % (beg, end) + beg = md['beg'] + end= md['end'] + uid_ = uid + '_fra_%s_%s'%(beg, end) if beg_OneTime is None: - uid_OneTime = uid + "_fra_%s_%s" % (beg, end) + uid_OneTime = uid + '_fra_%s_%s'%(beg, end) else: - uid_OneTime = uid + "_fra_%s_%s" % (beg_OneTime, end_OneTime) + uid_OneTime = uid + '_fra_%s_%s'%(beg_OneTime, end_OneTime) if beg_TwoTime is None: - uid_TwoTime = uid + "_fra_%s_%s" % (beg, end) + uid_TwoTime = uid + '_fra_%s_%s'%(beg, end) else: - uid_TwoTime = uid + "_fra_%s_%s" % (beg_TwoTime, end_TwoTime) - + uid_TwoTime = uid + '_fra_%s_%s'%(beg_TwoTime, end_TwoTime) + except: uid_ = uid uid_OneTime = uid @@ -214,226 +210,223 @@ def load_metadata(self): uid_ = uid uid_OneTime = uid - self.avg_img_file = "uid=%s_img_avg.png" % uid - self.ROI_on_img_file = "uid=%s_ROI_on_Image.png" % uid - - self.qiq_file = "uid=%s_q_Iq.png" % uid - self.qiq_fit_file = "uid=%s_form_factor_fit.png" % uid - # self.qr_1d_file = 'uid=%s_Qr_ROI.png'%uid - if self.report_type == "saxs" or self.report_type == "ang_saxs": - self.ROI_on_Iq_file = "uid=%s_ROI_on_Iq.png" % uid - - elif self.report_type == "gi_saxs": - self.ROI_on_Iq_file = "uid=%s_Qr_ROI.png" % uid - - self.Iq_t_file = "uid=%s_q_Iqt.png" % uid - self.img_sum_t_file = "uid=%s_img_sum_t.png" % uid - self.wat_file = "uid=%s_waterfall.png" % uid - self.Mean_inten_t_file = "uid=%s_t_ROIs.png" % uid - self.oavs_file = "uid=%s_OAVS.png" % uid - - if uid_g2 is None: - uid_g2 = uid_OneTime - self.g2_file = "uid=%s_g2.png" % uid_g2 - self.g2_fit_file = "uid=%s_g2_fit.png" % uid_g2 - # print( self.g2_fit_file ) + self.avg_img_file = 'uid=%s_img_avg.png'%uid + self.ROI_on_img_file = 'uid=%s_ROI_on_Image.png'%uid + + self.qiq_file = 'uid=%s_q_Iq.png'%uid + self.qiq_fit_file = 'uid=%s_form_factor_fit.png'%uid + #self.qr_1d_file = 'uid=%s_Qr_ROI.png'%uid + if self.report_type =='saxs' or self.report_type =='ang_saxs': + self.ROI_on_Iq_file = 'uid=%s_ROI_on_Iq.png'%uid + + elif self.report_type =='gi_saxs': + self.ROI_on_Iq_file = 'uid=%s_Qr_ROI.png'%uid + + self.Iq_t_file = 'uid=%s_q_Iqt.png'%uid + self.img_sum_t_file = 'uid=%s_img_sum_t.png'%uid + self.wat_file= 'uid=%s_waterfall.png'%uid + self.Mean_inten_t_file= 'uid=%s_t_ROIs.png'%uid + self.oavs_file = 'uid=%s_OAVS.png'%uid + + if uid_g2 is None: + uid_g2 = uid_OneTime + self.g2_file = 'uid=%s_g2.png'%uid_g2 + self.g2_fit_file = 'uid=%s_g2_fit.png'%uid_g2 + #print( self.g2_fit_file ) self.g2_new_page = False self.g2_fit_new_page = False - if self.report_type == "saxs": - jfn = "uid=%s_g2.png" % uid_g2 - if os.path.exists(data_dir + jfn): + if self.report_type =='saxs': + jfn = 'uid=%s_g2.png'%uid_g2 + if os.path.exists( data_dir + jfn): self.g2_file = jfn else: - jfn = "uid=%s_g2__joint.png" % uid_g2 - if os.path.exists(data_dir + jfn): + jfn = 'uid=%s_g2__joint.png'%uid_g2 + if os.path.exists( data_dir + jfn): self.g2_file = jfn - self.g2_new_page = True - # self.g2_new_page = True - jfn = "uid=%s_g2_fit.png" % uid_g2 - if os.path.exists(data_dir + jfn): + self.g2_new_page = True + #self.g2_new_page = True + jfn = 'uid=%s_g2_fit.png'%uid_g2 + if os.path.exists(data_dir + jfn ): self.g2_fit_file = jfn - # self.g2_fit_new_page = True + #self.g2_fit_new_page = True else: - jfn = "uid=%s_g2_fit__joint.png" % uid_g2 - if os.path.exists(data_dir + jfn): + jfn = 'uid=%s_g2_fit__joint.png'%uid_g2 + if os.path.exists(data_dir + jfn ): self.g2_fit_file = jfn - self.g2_fit_new_page = True - - else: - jfn = "uid=%s_g2__joint.png" % uid_g2 - if os.path.exists(data_dir + jfn): + self.g2_fit_new_page = True + + else: + jfn = 'uid=%s_g2__joint.png'%uid_g2 + if os.path.exists( data_dir + jfn): self.g2_file = jfn - self.g2_new_page = True - jfn = "uid=%s_g2_fit__joint.png" % uid_g2 - if os.path.exists(data_dir + jfn): + self.g2_new_page = True + jfn = 'uid=%s_g2_fit__joint.png'%uid_g2 + if os.path.exists(data_dir + jfn ): self.g2_fit_file = jfn - self.g2_fit_new_page = True - - self.q_rate_file = "uid=%s_Q_Rate_fit.png" % uid_g2 - self.q_rate_loglog_file = "uid=%s_Q_Rate_loglog.png" % uid_g2 - self.g2_q_fitpara_file = "uid=%s_g2_q_fitpara_plot.png" % uid_g2 - - # print( self.q_rate_file ) + self.g2_fit_new_page = True + + self.q_rate_file = 'uid=%s_Q_Rate_fit.png'%uid_g2 + self.q_rate_loglog_file = 'uid=%s_Q_Rate_loglog.png'%uid_g2 + self.g2_q_fitpara_file = 'uid=%s_g2_q_fitpara_plot.png'%uid_g2 + + + #print( self.q_rate_file ) if uid_c12 is None: - uid_c12 = uid_ - self.q_rate_two_time_fit_file = "uid=%s_two_time_Q_Rate_fit.png" % uid_c12 - # print( self.q_rate_two_time_fit_file ) - - self.two_time_file = "uid=%s_Two_time.png" % uid_c12 - self.two_g2_file = "uid=%s_g2_two_g2.png" % uid_c12 - - if self.report_type == "saxs": - jfn = "uid=%s_g2_two_g2.png" % uid_c12 + uid_c12 = uid_ + self.q_rate_two_time_fit_file = 'uid=%s_two_time_Q_Rate_fit.png'%uid_c12 + #print( self.q_rate_two_time_fit_file ) + + self.two_time_file = 'uid=%s_Two_time.png'%uid_c12 + self.two_g2_file = 'uid=%s_g2_two_g2.png'%uid_c12 + + if self.report_type =='saxs': + + jfn = 'uid=%s_g2_two_g2.png'%uid_c12 self.two_g2_new_page = False - if os.path.exists(data_dir + jfn): - # print( 'Here we go') + if os.path.exists( data_dir + jfn ): + #print( 'Here we go') self.two_g2_file = jfn - # self.two_g2_new_page = True - else: - jfn = "uid=%s_g2_two_g2__joint.png" % uid_c12 + #self.two_g2_new_page = True + else: + jfn = 'uid=%s_g2_two_g2__joint.png'%uid_c12 self.two_g2_new_page = False - if os.path.exists(data_dir + jfn): - # print( 'Here we go') + if os.path.exists( data_dir + jfn ): + #print( 'Here we go') self.two_g2_file = jfn - self.two_g2_new_page = True - else: - jfn = "uid=%s_g2_two_g2__joint.png" % uid_c12 + self.two_g2_new_page = True + else: + jfn = 'uid=%s_g2_two_g2__joint.png'%uid_c12 self.two_g2_new_page = False - if os.path.exists(data_dir + jfn): - # print( 'Here we go') + if os.path.exists( data_dir + jfn ): + #print( 'Here we go') self.two_g2_file = jfn - self.two_g2_new_page = True - - self.four_time_file = "uid=%s_g4.png" % uid_ - jfn = "uid=%s_g4__joint.png" % uid_ + self.two_g2_new_page = True + + + self.four_time_file = 'uid=%s_g4.png'%uid_ + jfn = 'uid=%s_g4__joint.png'%uid_ self.g4_new_page = False - if os.path.exists(data_dir + jfn): + if os.path.exists( data_dir + jfn ): self.four_time_file = jfn - self.g4_new_page = True - - self.xsvs_fit_file = "uid=%s_xsvs_fit.png" % uid_ - self.contrast_file = "uid=%s_contrast.png" % uid_ - self.dose_file = "uid=%s_dose_analysis.png" % uid_ - - jfn = "uid=%s_dose_analysis__joint.png" % uid_ + self.g4_new_page = True + + self.xsvs_fit_file = 'uid=%s_xsvs_fit.png'%uid_ + self.contrast_file = 'uid=%s_contrast.png'%uid_ + self.dose_file = 'uid=%s_dose_analysis.png'%uid_ + + jfn = 'uid=%s_dose_analysis__joint.png'%uid_ self.dose_file_new_page = False - if os.path.exists(data_dir + jfn): - self.dose_file = jfn + if os.path.exists( data_dir + jfn ): + self.dose_file = jfn self.dose_file_new_page = True - - # print( self.dose_file ) + + #print( self.dose_file ) if False: - self.flow_g2v = "uid=%s_1a_mqv_g2_v_fit.png" % uid_ - self.flow_g2p = "uid=%s_1a_mqp_g2_p_fit.png" % uid_ - self.flow_g2v_rate_fit = "uid=%s_v_fit_rate_Q_Rate_fit.png" % uid_ - self.flow_g2p_rate_fit = "uid=%s_p_fit_rate_Q_Rate_fit.png" % uid_ - - if True: - self.two_time = "uid=%s_pv_two_time.png" % uid_ - # self.two_time_v = 'uid=%s_pv_two_time.png'%uid_ - - # self.flow_g2bv = 'uid=%s_g2b_v_fit.png'%uid_ - # self.flow_g2bp = 'uid=%s_g2b_p_fit.png'%uid_ - self.flow_g2_g2b_p = "uid=%s_g2_two_g2_p.png" % uid_ - self.flow_g2_g2b_v = "uid=%s_g2_two_g2_v.png" % uid_ - - self.flow_g2bv_rate_fit = "uid=%s_vertb_Q_Rate_fit.png" % uid_ - self.flow_g2bp_rate_fit = "uid=%s_parab_Q_Rate_fit.png" % uid_ - - self.flow_g2v = "uid=%s_g2_v_fit.png" % uid_ - self.flow_g2p = "uid=%s_g2_p_fit.png" % uid_ - self.flow_g2v_rate_fit = "uid=%s_vert_Q_Rate_fit.png" % uid_ - self.flow_g2p_rate_fit = "uid=%s_para_Q_Rate_fit.png" % uid_ - - # self.report_header(page=1, top=730, new_page=False) - # self.report_meta(new_page=False) - - self.q2Iq_file = "uid=%s_q2_iq.png" % uid - self.iq_invariant_file = "uid=%s_iq_invariant.png" % uid - - def report_invariant(self, top=300, new_page=False): - """create the invariant analysis report - two images: - ROI on average intensity image - ROI on circular average - """ - uid = self.uid - c = self.c - # add sub-title, static images + self.flow_g2v = 'uid=%s_1a_mqv_g2_v_fit.png'%uid_ + self.flow_g2p = 'uid=%s_1a_mqp_g2_p_fit.png'%uid_ + self.flow_g2v_rate_fit = 'uid=%s_v_fit_rate_Q_Rate_fit.png'%uid_ + self.flow_g2p_rate_fit = 'uid=%s_p_fit_rate_Q_Rate_fit.png'%uid_ + + if True: + self.two_time = 'uid=%s_pv_two_time.png'%uid_ + #self.two_time_v = 'uid=%s_pv_two_time.png'%uid_ + + #self.flow_g2bv = 'uid=%s_g2b_v_fit.png'%uid_ + #self.flow_g2bp = 'uid=%s_g2b_p_fit.png'%uid_ + self.flow_g2_g2b_p = 'uid=%s_g2_two_g2_p.png'%uid_ + self.flow_g2_g2b_v = 'uid=%s_g2_two_g2_v.png'%uid_ + + self.flow_g2bv_rate_fit = 'uid=%s_vertb_Q_Rate_fit.png'%uid_ + self.flow_g2bp_rate_fit = 'uid=%s_parab_Q_Rate_fit.png'%uid_ + + self.flow_g2v = 'uid=%s_g2_v_fit.png'%uid_ + self.flow_g2p = 'uid=%s_g2_p_fit.png'%uid_ + self.flow_g2v_rate_fit = 'uid=%s_vert_Q_Rate_fit.png'%uid_ + self.flow_g2p_rate_fit = 'uid=%s_para_Q_Rate_fit.png'%uid_ + + #self.report_header(page=1, top=730, new_page=False) + #self.report_meta(new_page=False) + + self.q2Iq_file = 'uid=%s_q2_iq.png'%uid + self.iq_invariant_file = 'uid=%s_iq_invariant.png'%uid + + def report_invariant( self, top= 300, new_page=False): + '''create the invariant analysis report + two images: + ROI on average intensity image + ROI on circular average + ''' + uid=self.uid + c= self.c + #add sub-title, static images c.setFillColor(black) - c.setFont("Helvetica", 20) + c.setFont("Helvetica", 20) ds = 230 - self.sub_title_num += 1 - c.drawString(10, top, "%s. I(q) Invariant Analysis" % self.sub_title_num) # add title - # add q2Iq - c.setFont("Helvetica", 14) - imgf = self.q2Iq_file - # print( imgf ) - label = "q^2*I(q)" - add_image_string( - c, - imgf, - self.data_dir, - img_left=60, - img_top=top - ds * 1.15, - img_height=180, - str1_left=110, - str1_top=top - 35, - str1=label, - str2_left=60, - str2_top=top - 320, - ) - - # add iq_invariant + self.sub_title_num +=1 + c.drawString(10, top, "%s. I(q) Invariant Analysis"%self.sub_title_num ) #add title + #add q2Iq + c.setFont("Helvetica", 14) + imgf = self.q2Iq_file + #print( imgf ) + label = 'q^2*I(q)' + add_image_string( c, imgf, self.data_dir, img_left= 60, img_top=top - ds*1.15, img_height=180, + str1_left=110, str1_top = top-35,str1=label, + str2_left = 60, str2_top = top -320 ) + + #add iq_invariant imgf = self.iq_invariant_file - img_height = 180 - img_left, img_top = 320, top - ds * 1.15 - str1_left, str1_top, str1 = 420, top - 35, "I(q) Invariant" - str2_left, str2_top = 350, top - 320 + img_height= 180 + img_left,img_top =320, top - ds*1.15 + str1_left, str1_top,str1= 420, top- 35, 'I(q) Invariant' + str2_left, str2_top = 350, top- 320 - # print ( imgf ) + #print ( imgf ) - add_image_string( - c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top - ) + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + if new_page: c.showPage() - c.save() - + c.save() + + + def report_header(self, page=1, new_page=False): - """create headers, including title/page number""" - c = self.c + '''create headers, including title/page number''' + c= self.c CurTime = self.CurTime - uid = self.uid - user = self.user + uid=self.uid + user=self.user c.setFillColor(black) c.setFont("Helvetica", 14) - # add page number - c.drawString(250, 10, "Page--%s--" % (page)) - # add time stamp - - # c.drawString(350, 10, "Created at %s@CHX-by-%s"%( CurTime,user ) ) - s_ = "Created at %s@CHX-By-%s" % (CurTime, user) - add_one_line_string(c, s_, 10, left=350, fontsize=11) - - # add title - # c.setFont("Helvetica", 22) - title = "XPCS Analysis Report for uid=%s" % uid - c.setFont("Helvetica", 1000 / (len(title))) - # c.drawString(180,760, "XPCS Report of uid=%s"%uid ) #add title - c.drawString(50, 760, "XPCS Analysis Report for uid=%s" % uid) # add title - # add a line under title - c.setStrokeColor(red) - c.setLineWidth(width=1.5) - c.line(50, 750, 550, 750) + #add page number + c.drawString(250, 10, "Page--%s--"%( page ) ) + #add time stamp + + #c.drawString(350, 10, "Created at %s@CHX-by-%s"%( CurTime,user ) ) + s_ = "Created at %s@CHX-By-%s"%( CurTime,user ) + add_one_line_string( c, s_, 10, left=350,fontsize = 11 ) + + #add title + #c.setFont("Helvetica", 22) + title = "XPCS Analysis Report for uid=%s"%uid + c.setFont("Helvetica", 1000/( len(title) ) ) + #c.drawString(180,760, "XPCS Report of uid=%s"%uid ) #add title + c.drawString(50,760, "XPCS Analysis Report for uid=%s"%uid ) #add title + #add a line under title + c.setStrokeColor( red ) + c.setLineWidth(width=1.5) + c.line( 50, 750, 550, 750 ) if new_page: c.showPage() c.save() + def report_meta(self, top=740, new_page=False): - """create the meta data report, - the meta data include: + '''create the meta data report, + the meta data include: uid Sample: Measurement @@ -442,1719 +435,1506 @@ def report_meta(self, top=740, new_page=False): Beam Center Mask file Data dir - Pipeline notebook - """ + Pipeline notebook + ''' - c = self.c - # load metadata + c=self.c + #load metadata md = self.md try: - uid = md["uid"] + uid = md['uid'] except: - uid = self.uid - # add sub-title, metadata - c.setFont("Helvetica", 20) + uid=self.uid + #add sub-title, metadata + c.setFont("Helvetica", 20) ds = 15 self.sub_title_num += 1 - c.drawString(10, top, "%s. Metadata" % self.sub_title_num) # add title - top = top - 5 + c.drawString(10, top, "%s. Metadata"%self.sub_title_num ) #add title + top = top - 5 fontsize = 11 - c.setFont("Helvetica", fontsize) - - nec_keys = [ - "sample", - "start_time", - "stop_time", - "Measurement", - "exposure time", - "incident_wavelength", - "cam_acquire_t", - "frame_time", - "detector_distance", - "feedback_x", - "feedback_y", - "shutter mode", - "beam_center_x", - "beam_center_y", - "beam_refl_center_x", - "beam_refl_center_y", - "mask_file", - "bad_frame_list", - "transmission", - "roi_mask_file", - ] + c.setFont("Helvetica", fontsize) + + nec_keys = [ 'sample', 'start_time', 'stop_time','Measurement' ,'exposure time' ,'incident_wavelength', 'cam_acquire_t', + 'frame_time','detector_distance', 'feedback_x', 'feedback_y', 'shutter mode', + 'beam_center_x', 'beam_center_y', 'beam_refl_center_x', 'beam_refl_center_y','mask_file','bad_frame_list', 'transmission', 'roi_mask_file'] for key in nec_keys: check_dict_keys(md, key) - - try: # try exp time from detector - exposuretime = md["count_time"] # exposure time in sec - except: - exposuretime = md["cam_acquire_time"] # exposure time in sec - - try: # try acq time from detector - acquisition_period = md["frame_time"] + + try:#try exp time from detector + exposuretime= md['count_time'] #exposure time in sec + except: + exposuretime= md['cam_acquire_time'] #exposure time in sec + + try:#try acq time from detector + acquisition_period = md['frame_time'] except: try: - acquisition_period = md["acquire period"] - except: - uid = md["uid"] - acquisition_period = float(db[uid]["start"]["acquire period"]) - + acquisition_period = md['acquire period'] + except: + uid = md['uid'] + acquisition_period = float( db[uid]['start']['acquire period'] ) + + s = [] - s.append("UID: %s" % uid) ###line 1, for uid - s.append("Sample: %s" % md["sample"]) ####line 2 sample - s.append( - "Data Acquisition From: %s To: %s" % (md["start_time"], md["stop_time"]) - ) ####line 3 Data Acquisition time - s.append("Measurement: %s" % md["Measurement"]) ####line 4 'Measurement - - # print( md['incident_wavelength'], int(md['number of images']), md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) - # print(acquisition_period) - s.append( - "Wavelength: %s A | Num of Image: %d | Exposure time: %s ms | Acquire period: %s ms" - % ( - md["incident_wavelength"], - int(md["number of images"]), - round(float(exposuretime) * 1000, 4), - round(float(acquisition_period) * 1000, 4), - ) - ) ####line 5 'lamda... - - s.append( - "Detector-Sample Distance: %s m| FeedBack Mode: x -> %s & y -> %s| Shutter Mode: %s" - % (md["detector_distance"], md["feedback_x"], md["feedback_y"], md["shutter mode"]) - ) ####line 6 'Detector-Sample Distance.. - if self.report_type == "saxs": - s7 = "Beam Center: [%s, %s] (pixel)" % (md["beam_center_x"], md["beam_center_y"]) - elif self.report_type == "gi_saxs": - s7 = ( - "Incident Center: [%s, %s] (pixel)" % (md["beam_center_x"], md["beam_center_y"]) - + " || " - + "Reflect Center: [%s, %s] (pixel)" % (md["beam_refl_center_x"], md["beam_refl_center_y"]) - ) - elif self.report_type == "ang_saxs" or self.report_type == "gi_waxs": - s7 = "Beam Center: [%s, %s] (pixel)" % (md["beam_center_x"], md["beam_center_y"]) + s.append( 'UID: %s'%uid ) ###line 1, for uid + s.append('Sample: %s'%md['sample'] ) ####line 2 sample + s.append('Data Acquisition From: %s To: %s'%(md['start_time'], md['stop_time']))####line 3 Data Acquisition time + s.append( 'Measurement: %s'%md['Measurement'] ) ####line 4 'Measurement + + #print( md['incident_wavelength'], int(md['number of images']), md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) + #print(acquisition_period) + s.append( 'Wavelength: %s A | Num of Image: %d | Exposure time: %s ms | Acquire period: %s ms'%( md['incident_wavelength'], int(md['number of images']),round(float(exposuretime)*1000,4), round(float( acquisition_period )*1000,4) ) ) ####line 5 'lamda... + + s.append( 'Detector-Sample Distance: %s m| FeedBack Mode: x -> %s & y -> %s| Shutter Mode: %s'%( + md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) ) ####line 6 'Detector-Sample Distance.. + if self.report_type == 'saxs': + s7= 'Beam Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) + elif self.report_type == 'gi_saxs': + s7= ('Incident Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) + + ' || ' + + 'Reflect Center: [%s, %s] (pixel)'%(md['beam_refl_center_x'], md['beam_refl_center_y']) ) + elif self.report_type == 'ang_saxs' or self.report_type == 'gi_waxs' : + s7= 'Beam Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) else: - s7 = "" - - s7 += " || " + "BadLen: %s" % len(md["bad_frame_list"]) - s7 += " || " + "Transmission: %s" % md["transmission"] - s.append(s7) ####line 7 'Beam center... - m = "Mask file: %s" % md["mask_file"] + " || " + "ROI mask file: %s" % md["roi_mask_file"] - # s.append( 'Mask file: %s'%md['mask_file'] ) ####line 8 mask filename - # s.append( ) ####line 8 mask filename + s7 = '' + + s7 += ' || ' + 'BadLen: %s'%len(md['bad_frame_list']) + s7 += ' || ' + 'Transmission: %s'%md['transmission'] + s.append( s7 ) ####line 7 'Beam center... + m = 'Mask file: %s'%md['mask_file'] + ' || ' + 'ROI mask file: %s'%md['roi_mask_file'] + #s.append( 'Mask file: %s'%md['mask_file'] ) ####line 8 mask filename + #s.append( ) ####line 8 mask filename s.append(m) - + if self.res_h5_filename is not None: self.data_dir_ = self.data_dir + self.res_h5_filename else: - self.data_dir_ = self.data_dir - s.append("Analysis Results Dir: %s" % self.data_dir_) ####line 9 results folder - - s.append("Metadata Dir: %s.csv-&.pkl" % self.metafile) ####line 10 metadata folder + self.data_dir_ = self.data_dir + s.append( 'Analysis Results Dir: %s'%self.data_dir_ ) ####line 9 results folder + + + s.append( 'Metadata Dir: %s.csv-&.pkl'%self.metafile ) ####line 10 metadata folder try: - s.append("Pipeline notebook: %s" % md["NOTEBOOK_FULL_PATH"]) ####line 11 notebook folder + s.append( 'Pipeline notebook: %s'%md['NOTEBOOK_FULL_PATH'] ) ####line 11 notebook folder except: pass - # print( 'here' ) - line = 1 - for s_ in s: - add_one_line_string(c, s_, top - ds * line, left=30, fontsize=fontsize) - line += 1 - + #print( 'here' ) + line =1 + for s_ in s: + add_one_line_string( c, s_, top -ds*line , left=30,fontsize = fontsize ) + line += 1 + if new_page: c.showPage() c.save() - - def report_static(self, top=560, new_page=False, iq_fit=False): - """create the static analysis report - two images: - average intensity image - circular average - - """ - # add sub-title, static images - - c = self.c + + def report_static( self, top=560, new_page=False, iq_fit=False): + '''create the static analysis report + two images: + average intensity image + circular average + + ''' + #add sub-title, static images + + c= self.c c.setFont("Helvetica", 20) - uid = self.uid - - ds = 220 - self.sub_title_num += 1 - c.drawString(10, top, "%s. Static Analysis" % self.sub_title_num) # add title + uid=self.uid + + ds = 220 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Static Analysis"%self.sub_title_num ) #add title - # add average image + #add average image c.setFont("Helvetica", 14) - - imgf = self.avg_img_file - - if self.report_type == "saxs": + + imgf = self.avg_img_file + + if self.report_type == 'saxs': ipos = 60 - dshift = 0 - elif self.report_type == "gi_saxs": + dshift=0 + elif self.report_type == 'gi_saxs': ipos = 200 - dshift = 140 - elif self.report_type == "ang_saxs": + dshift= 140 + elif self.report_type == 'ang_saxs': ipos = 200 - dshift = 140 + dshift= 140 else: ipos = 200 - dshift = 140 - - add_image_string( - c, - imgf, - self.data_dir, - img_left=ipos, - img_top=top - ds, - img_height=180, - str1_left=90 + dshift, - str1_top=top - 35, - str1="Average Intensity Image", - str2_left=80 + dshift, - str2_top=top - 230, - ) - - # add q_Iq - if self.report_type == "saxs": - imgf = self.qiq_file - # print(imgf) + dshift= 140 + + + add_image_string( c, imgf, self.data_dir, img_left= ipos, img_top=top-ds, img_height=180, + str1_left=90 + dshift, str1_top = top-35,str1='Average Intensity Image', + str2_left = 80 + dshift, str2_top = top -230 ) + + #add q_Iq + if self.report_type == 'saxs': + imgf = self.qiq_file + #print(imgf) if iq_fit: - imgf = self.qiq_fit_file - label = "Circular Average" + imgf = self.qiq_fit_file + label = 'Circular Average' lab_pos = 390 fn_pos = 320 - add_image_string( - c, - imgf, - self.data_dir, - img_left=320, - img_top=top - ds, - img_height=180, - str1_left=lab_pos, - str1_top=top - 35, - str1=label, - str2_left=fn_pos, - str2_top=top - 230, - ) + add_image_string( c, imgf, self.data_dir, img_left=320, img_top=top-ds, img_height=180, + str1_left=lab_pos, str1_top = top-35,str1=label, + str2_left = fn_pos, str2_top = top -230 ) else: if False: - imgf = self.ROI_on_Iq_file # self.qr_1d_file - label = "Qr-1D" + imgf = self.ROI_on_Iq_file #self.qr_1d_file + label = 'Qr-1D' lab_pos = 420 - fn_pos = 350 - - add_image_string( - c, - imgf, - self.data_dir, - img_left=320, - img_top=top - ds, - img_height=180, - str1_left=lab_pos, - str1_top=top - 35, - str1=label, - str2_left=fn_pos, - str2_top=top - 230, - ) + fn_pos = 350 + + add_image_string( c, imgf, self.data_dir, img_left=320, img_top=top-ds, img_height=180, + str1_left=lab_pos, str1_top = top-35,str1=label, + str2_left = fn_pos, str2_top = top -230 ) if new_page: c.showPage() - c.save() - - def report_ROI(self, top=300, new_page=False): - """create the static analysis report - two images: - ROI on average intensity image - ROI on circular average - """ - uid = self.uid - c = self.c - # add sub-title, static images + c.save() + + def report_ROI( self, top= 300, new_page=False): + '''create the static analysis report + two images: + ROI on average intensity image + ROI on circular average + ''' + uid=self.uid + c= self.c + #add sub-title, static images c.setFillColor(black) - c.setFont("Helvetica", 20) + c.setFont("Helvetica", 20) ds = 230 - self.sub_title_num += 1 - c.drawString(10, top, "%s. Define of ROI" % self.sub_title_num) # add title - # add ROI on image - c.setFont("Helvetica", 14) + self.sub_title_num +=1 + c.drawString(10, top, "%s. Define of ROI"%self.sub_title_num ) #add title + #add ROI on image + c.setFont("Helvetica", 14) imgf = self.ROI_on_img_file - label = "ROI on Image" - add_image_string( - c, - imgf, - self.data_dir, - img_left=60, - img_top=top - ds * 1.15, - img_height=240, - str1_left=110, - str1_top=top - 35, - str1=label, - str2_left=60, - str2_top=top - 260, - ) - - # add q_Iq - if self.report_type == "saxs" or self.report_type == "gi_saxs" or self.report_type == "ang_saxs": - imgf = self.ROI_on_Iq_file - img_height = 180 - img_left, img_top = 320, top - ds - str1_left, str1_top, str1 = 420, top - 35, "ROI on Iq" - str2_left, str2_top = 350, top - 260 - - # print ( imgf ) - - add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - ) - + label = 'ROI on Image' + add_image_string( c, imgf, self.data_dir, img_left= 60, img_top=top - ds*1.15, img_height=240, + str1_left=110, str1_top = top-35,str1=label, + str2_left = 60, str2_top = top -260 ) + + #add q_Iq + if self.report_type == 'saxs' or self.report_type == 'gi_saxs' or self.report_type == 'ang_saxs': + imgf = self.ROI_on_Iq_file + img_height=180 + img_left,img_top =320, top - ds + str1_left, str1_top,str1= 420, top- 35, 'ROI on Iq' + str2_left, str2_top = 350, top- 260 + + #print ( imgf ) + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + if new_page: c.showPage() - c.save() - - def report_time_analysis(self, top=720, new_page=False): - """create the time dependent analysis report - four images: - each image total intensity as a function of time - iq~t - waterfall - mean intensity of each ROI as a function of time - """ - c = self.c - uid = self.uid - # add sub-title, Time-dependent plot + c.save() + + + def report_time_analysis( self, top= 720,new_page=False): + '''create the time dependent analysis report + four images: + each image total intensity as a function of time + iq~t + waterfall + mean intensity of each ROI as a function of time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - top1 = top + top1=top ds = 20 - self.sub_title_num += 1 - c.drawString(10, top, "%s. Time Dependent Plot" % self.sub_title_num) # add title + self.sub_title_num +=1 + c.drawString(10, top, "%s. Time Dependent Plot"%self.sub_title_num ) #add title c.setFont("Helvetica", 14) - + + top = top1 - 160 - - # add img_sum_t - if self.report_type == "saxs": + + #add img_sum_t + if self.report_type == 'saxs': ipos = 80 - elif self.report_type == "gi_saxs": - ipos = 200 - elif self.report_type == "ang_saxs": + elif self.report_type == 'gi_saxs': ipos = 200 + elif self.report_type == 'ang_saxs': + ipos = 200 else: - ipos = 200 - - imgf = self.img_sum_t_file - img_height = 140 - img_left, img_top = ipos, top - str1_left, str1_top, str1 = ipos + 60, top1 - 20, "img sum ~ t" - str2_left, str2_top = ipos, top - 5 - - add_image_string( - c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top - ) - - # plot iq~t - if self.report_type == "saxs": + ipos = 200 + + imgf = self.img_sum_t_file + img_height=140 + img_left,img_top = ipos, top + str1_left, str1_top,str1= ipos + 60, top1 - 20 , 'img sum ~ t' + str2_left, str2_top = ipos, top- 5 + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + #plot iq~t + if self.report_type == 'saxs': imgf = self.Iq_t_file image = self.data_dir + imgf - - img_height = 140 - img_left, img_top = 350, top - str1_left, str1_top, str1 = 420, top1 - 20, "iq ~ t" - str2_left, str2_top = 360, top - 5 - - add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - ) - elif self.report_type == "gi_saxs": + + + img_height=140 + img_left,img_top = 350, top + str1_left, str1_top,str1= 420, top1-20 , 'iq ~ t' + str2_left, str2_top = 360, top- 5 + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + elif self.report_type == 'gi_saxs': pass - + top = top1 - 340 - # add waterfall plot + #add waterfall plot imgf = self.wat_file - - img_height = 160 - img_left, img_top = 80, top - str1_left, str1_top, str1 = 140, top + img_height, "waterfall plot" - str2_left, str2_top = 80, top - 5 - - if self.report_type != "ang_saxs": - add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - ) + + img_height=160 + img_left,img_top = 80, top + str1_left, str1_top,str1= 140, top + img_height, 'waterfall plot' + str2_left, str2_top = 80, top- 5 + + if self.report_type != 'ang_saxs': + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) else: pass - # add mean-intensity of each roi + #add mean-intensity of each roi imgf = self.Mean_inten_t_file - - img_height = 160 - img_left, img_top = 360, top - str1_left, str1_top, str1 = 330, top + img_height, "Mean-intensity-of-each-ROI" - str2_left, str2_top = 310, top - 5 - if self.report_type != "ang_saxs": - add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - ) + + img_height=160 + img_left,img_top = 360, top + str1_left, str1_top,str1= 330, top + img_height, 'Mean-intensity-of-each-ROI' + str2_left, str2_top = 310, top- 5 + if self.report_type != 'ang_saxs': + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) else: pass - + if new_page: c.showPage() c.save() - - def report_oavs(self, top=350, oavs_file=None, new_page=False): - """create the oavs images report""" - - c = self.c - uid = self.uid - # add sub-title, One Time Correlation Function + + def report_oavs( self, top= 350, oavs_file=None, new_page=False): + '''create the oavs images report + + ''' + + c= self.c + uid=self.uid + #add sub-title, One Time Correlation Function c.setFillColor(black) c.setFont("Helvetica", 20) ds = 20 - self.sub_title_num += 1 - c.drawString(10, top, "%s. OAVS Images" % self.sub_title_num) # add title + self.sub_title_num +=1 + c.drawString(10, top, "%s. OAVS Images"%self.sub_title_num ) #add title c.setFont("Helvetica", 14) - # add g2 plot + #add g2 plot if oavs_file is None: imgf = self.oavs_file else: - imgf = oavs_file - # print(self.data_dir + imgf) + imgf = oavs_file + #print(self.data_dir + imgf) if os.path.exists(self.data_dir + imgf): - im = Image.open(self.data_dir + imgf) - ratio = float(im.size[1]) / im.size[0] - img_width = 600 - img_height = img_width * ratio # img_height - # width = height/ratio - + im = Image.open( self.data_dir+imgf ) + ratio = float(im.size[1])/im.size[0] + img_width = 600 + img_height= img_width * ratio #img_height + #width = height/ratio + if not new_page: - # img_height= 550 + #img_height= 550 top = top - 600 - str2_left, str2_top = 80, top - 400 - img_left, img_top = 1, top - - if new_page: - # img_height= 150 + str2_left, str2_top = 80, top - 400 + img_left,img_top = 1, top + + if new_page: + #img_height= 150 top = top - img_height - 50 - str2_left, str2_top = 80, top - 50 - img_left, img_top = 10, top - - str1_left, str1_top, str1 = 150, top + img_height, "OAVS images" - img_width = add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - return_=True, - ) - # print( imgf,self.data_dir ) + str2_left, str2_top = 80, top - 50 + img_left,img_top = 10, top + + str1_left, str1_top, str1= 150, top + img_height, 'OAVS images' + img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top, return_=True ) + #print( imgf,self.data_dir ) print(img_width, img_height) - - def report_one_time(self, top=350, g2_fit_file=None, q_rate_file=None, new_page=False): - """create the one time correlation function report - Two images: - One Time Correlation Function with fit - q-rate fit - """ - - c = self.c - uid = self.uid - # add sub-title, One Time Correlation Function + + + + def report_one_time( self, top= 350, g2_fit_file=None, q_rate_file=None, new_page=False): + '''create the one time correlation function report + Two images: + One Time Correlation Function with fit + q-rate fit + ''' + + c= self.c + uid=self.uid + #add sub-title, One Time Correlation Function c.setFillColor(black) c.setFont("Helvetica", 20) ds = 20 - self.sub_title_num += 1 - c.drawString(10, top, "%s. One Time Correlation Function" % self.sub_title_num) # add title + self.sub_title_num +=1 + c.drawString(10, top, "%s. One Time Correlation Function"%self.sub_title_num ) #add title c.setFont("Helvetica", 14) - # add g2 plot + #add g2 plot if g2_fit_file is None: imgf = self.g2_fit_file else: - imgf = g2_fit_file - - if self.report_type != "ang_saxs": - img_height = 300 - top = top - 320 - str2_left, str2_top = 80, top - 0 - + imgf = g2_fit_file + + if self.report_type != 'ang_saxs': + img_height= 300 + top = top - 320 + str2_left, str2_top = 80, top- 0 + else: - img_height = 550 + img_height= 550 top = top - 600 - str2_left, str2_top = 80, top - 400 - # add one_time caculation - img_left, img_top = 1, top + str2_left, str2_top = 80, top - 400 + #add one_time caculation + img_left,img_top = 1, top if self.g2_fit_new_page or self.g2_new_page: - img_height = 550 + + img_height= 550 top = top - 250 - str2_left, str2_top = 80, top - 0 - img_left, img_top = 60, top - - str1_left, str1_top, str1 = 150, top + img_height, "g2 fit plot" - img_width = add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - return_=True, - ) - # print( imgf,self.data_dir ) - # add g2 plot fit - # print(self.q_rate_file ) - if os.path.isfile(self.data_dir + self.q_rate_file): - # print('here') - # print(self.q_rate_file ) - top = top + 70 # + str2_left, str2_top = 80, top - 0 + img_left,img_top = 60, top + + str1_left, str1_top,str1= 150, top + img_height, 'g2 fit plot' + img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top, return_=True ) + #print( imgf,self.data_dir ) + #add g2 plot fit + #print(self.q_rate_file ) + if os.path.isfile( self.data_dir + self.q_rate_file ): + #print('here') + #print(self.q_rate_file ) + top = top + 70 # if q_rate_file is None: imgf = self.q_rate_file else: - imgf = q_rate_file - if self.report_type != "ang_saxs": - # print(img_width) + imgf = q_rate_file + if self.report_type != 'ang_saxs': + #print(img_width) if img_width > 400: - img_height = 90 + img_height = 90 else: - img_height = 180 - img_left, img_top = img_width - 10, top # 350, top - str2_left, str2_top = img_width + 50, top - 5 # 380, top - 5 - str1_left, str1_top, str1 = 450, top + 230, "q-rate fit plot" + img_height= 180 + img_left,img_top = img_width-10, top #350, top + str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot' else: - img_height = 300 - img_left, img_top = 350, top - 150 - str2_left, str2_top = 380, top - 5 - str1_left, str1_top, str1 = 450, top + 180, "q-rate fit plot" + img_height= 300 + img_left,img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 180, 'q-rate fit plot' if self.g2_fit_new_page or self.g2_new_page: - top = top - 200 - img_height = 180 - img_left, img_top = 350, top + top = top - 200 + img_height= 180 + img_left,img_top = 350, top str2_left, str2_top = 380, top - 5 - str1_left, str1_top, str1 = 450, top + 230, "q-rate fit plot" - add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - ) - + str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot' + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + else: top = top + 320 # if q_rate_file is None: imgf = self.q_rate_loglog_file else: - imgf = q_rate_file - # print(imgf) - if self.report_type != "ang_saxs": - # print(img_width) + imgf = q_rate_file + #print(imgf) + if self.report_type != 'ang_saxs': + #print(img_width) if img_width > 400: - img_height = 90 / 2 + img_height = 90/2 else: - img_height = 180 / 2 - img_left, img_top = img_width - 10, top # 350, top - str2_left, str2_top = img_width + 50, top - 5 # 380, top - 5 - str1_left, str1_top, str1 = 450, top + 230, "q-rate loglog plot" + img_height= 180 /2 + img_left,img_top = img_width-10, top #350, top + str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'q-rate loglog plot' else: - img_height = 300 / 2 - img_left, img_top = 350, top - 150 - str2_left, str2_top = 380, top - 5 - str1_left, str1_top, str1 = 450, top + 180, "q-rate loglog plot" + img_height= 300/2 + img_left,img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 180, 'q-rate loglog plot' if self.g2_fit_new_page or self.g2_new_page: - top = top - 200 + 50 - img_height = 180 / 1.5 - img_left, img_top = 350, top + top = top - 200 + 50 + img_height= 180 / 1.5 + img_left,img_top = 350, top str2_left, str2_top = 380, top - 5 - str1_left, str1_top, str1 = 450, top + 120, "q-rate loglog plot" - - # print('here') - - add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - ) - - top = top - 100 # + str1_left, str1_top,str1= 450, top + 120, 'q-rate loglog plot' + + #print('here') + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + top = top - 100 # if q_rate_file is None: imgf = self.g2_q_fitpara_file else: - imgf = q_rate_file - if self.report_type != "ang_saxs": - # print(img_width) + imgf = q_rate_file + if self.report_type != 'ang_saxs': + #print(img_width) if img_width > 400: - img_height = 90 + img_height = 90 else: - img_height = 180 - img_left, img_top = img_width - 10, top # 350, top - str2_left, str2_top = img_width + 50, top - 5 # 380, top - 5 - str1_left, str1_top, str1 = 450, top + 230, "g2 fit para" + img_height= 180 + img_left,img_top = img_width-10, top #350, top + str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'g2 fit para' else: - img_height = 300 - img_left, img_top = 350, top - 150 - str2_left, str2_top = 380, top - 5 - str1_left, str1_top, str1 = 450, top + 180, "g2 fit para" + img_height= 300 + img_left,img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 180, 'g2 fit para' if self.g2_fit_new_page or self.g2_new_page: - top = top - 200 - img_height = 180 * 1.5 - img_left, img_top = 350, top + top = top - 200 + img_height= 180 * 1.5 + img_left,img_top = 350, top str2_left, str2_top = 380, top - 5 - str1_left, str1_top, str1 = 450, top + 280, "g2 fit para" - add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - ) - + str1_left, str1_top,str1= 450, top + 280, 'g2 fit para' + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + if new_page: c.showPage() c.save() - def report_mulit_one_time(self, top=720, new_page=False): - """create the mulit one time correlation function report - Two images: - One Time Correlation Function with fit - q-rate fit - """ - c = self.c - uid = self.uid - # add sub-title, One Time Correlation Function + + + def report_mulit_one_time( self, top= 720,new_page=False): + '''create the mulit one time correlation function report + Two images: + One Time Correlation Function with fit + q-rate fit + ''' + c= self.c + uid=self.uid + #add sub-title, One Time Correlation Function c.setFillColor(black) c.setFont("Helvetica", 20) ds = 20 - self.sub_title_num += 1 - c.drawString(10, top, "%s. One Time Correlation Function" % self.sub_title_num) # add title + self.sub_title_num +=1 + c.drawString(10, top, "%s. One Time Correlation Function"%self.sub_title_num ) #add title c.setFont("Helvetica", 14) - # add g2 plot + #add g2 plot top = top - 320 imgf = self.g2_fit_file image = self.data_dir + imgf if not os.path.exists(image): image = self.data_dir + self.g2_file - im = Image.open(image) - ratio = float(im.size[1]) / im.size[0] - height = 300 - c.drawImage(image, 1, top, width=height / ratio, height=height, mask="auto") - # c.drawImage( image, 1, top, width= height/ratio,height=height, mask= None ) + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= 300 + c.drawImage( image, 1, top, width= height/ratio,height=height, mask= 'auto') + #c.drawImage( image, 1, top, width= height/ratio,height=height, mask= None ) c.setFont("Helvetica", 16) - c.setFillColor(blue) - c.drawString(150, top + height, "g2 fit plot") + c.setFillColor( blue) + c.drawString( 150, top + height , 'g2 fit plot' ) c.setFont("Helvetica", 12) - c.setFillColor(red) - c.drawString(80, top - 0, "filename: %s" % imgf) + c.setFillColor(red) + c.drawString( 80, top- 0, 'filename: %s'%imgf ) - # add g2 plot fit - top = top + 70 # + #add g2 plot fit + top = top + 70 # imgf = self.q_rate_file image = self.data_dir + imgf - if os.path.exists(image): - im = Image.open(image) - ratio = float(im.size[1]) / im.size[0] - height = 180 - c.drawImage(image, 350, top, width=height / ratio, height=height, mask="auto") + if os.path.exists(image): + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= 180 + c.drawImage( image, 350, top, width= height/ratio,height=height,mask= 'auto') c.setFont("Helvetica", 16) - c.setFillColor(blue) - c.drawString(450, top + 230, "q-rate fit plot") + c.setFillColor( blue) + c.drawString( 450, top + 230, 'q-rate fit plot' ) c.setFont("Helvetica", 12) - c.setFillColor(red) - c.drawString(380, top - 5, "filename: %s" % imgf) - + c.setFillColor(red) + c.drawString( 380, top- 5, 'filename: %s'%imgf ) + if new_page: c.showPage() c.save() - def report_two_time(self, top=720, new_page=False): - """create the one time correlation function report - Two images: - Two Time Correlation Function - two one-time correlatoin function from multi-one-time and from diagonal two-time - """ - c = self.c - uid = self.uid - # add sub-title, Time-dependent plot + + + def report_two_time( self, top= 720, new_page=False): + '''create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - + ds = 20 - self.sub_title_num += 1 - c.drawString(10, top, "%s. Two Time Correlation Function" % self.sub_title_num) # add title + self.sub_title_num +=1 + c.drawString(10, top, "%s. Two Time Correlation Function"%self.sub_title_num ) #add title c.setFont("Helvetica", 14) - - top1 = top + + top1=top top = top1 - 330 - # add q_Iq_t + #add q_Iq_t imgf = self.two_time_file - - img_height = 300 - img_left, img_top = 80, top - str1_left, str1_top, str1 = 180, top + 300, "two time correlation function" + + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 180, top + 300, 'two time correlation function' str2_left, str2_top = 180, top - 10 - img_width = add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - return_=True, - ) + img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top, return_=True ) + + top = top - 340 - # add q_Iq_t + #add q_Iq_t imgf = self.two_g2_file - - if True: # not self.two_g2_new_page: - img_height = 300 - img_left, img_top = 100 - 70, top - str1_left, str1_top, str1 = 210 - 70, top + 310, "compared g2" - str2_left, str2_top = 180 - 70, top - 10 - + + if True:#not self.two_g2_new_page: + + img_height= 300 + img_left,img_top = 100 -70, top + str1_left, str1_top,str1= 210-70, top + 310, 'compared g2' + str2_left, str2_top = 180-70, top - 10 + if self.two_g2_new_page: - img_left, img_top = 100, top - print(imgf) - img_width = add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - return_=True, - ) - # print(imgf) - top = top + 50 + img_left,img_top = 100, top + print(imgf ) + img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top,return_=True ) + #print(imgf) + top = top + 50 imgf = self.q_rate_two_time_fit_file - # print(imgf, img_width, top) + #print(imgf, img_width, top) if img_width < 400: - img_height = 140 - img_left, img_top = 350, top + 30 + img_height= 140 + img_left,img_top = 350, top + 30 str2_left, str2_top = 380 - 80, top - 5 - str1_left, str1_top, str1 = 450 - 80, top + 230, "q-rate fit from two-time" + str1_left, str1_top,str1= 450 -80 , top + 230, 'q-rate fit from two-time' else: - img_height = 90 - img_left, img_top = img_width - 10, top # 350, top - str2_left, str2_top = img_width + 50, top - 5 # 380, top - 5 - str1_left, str1_top, str1 = 450, top + 230, "q-rate fit plot" - - add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - ) - + img_height = 90 + img_left,img_top = img_width-10, top #350, top + str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot' + + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + + + if new_page: c.showPage() - c.save() - - def report_four_time(self, top=720, new_page=False): - """create the one time correlation function report - Two images: - Two Time Correlation Function - two one-time correlatoin function from multi-one-time and from diagonal two-time - """ - - c = self.c - uid = self.uid - # add sub-title, Time-dependent plot + c.save() + + def report_four_time( self, top= 720, new_page=False): + '''create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - + ds = 20 - self.sub_title_num += 1 - c.drawString(10, top, "%s. Four Time Correlation Function" % self.sub_title_num) # add title + self.sub_title_num +=1 + c.drawString(10, top, "%s. Four Time Correlation Function"%self.sub_title_num ) #add title c.setFont("Helvetica", 14) - - top1 = top + + top1=top top = top1 - 330 - # add q_Iq_t + #add q_Iq_t imgf = self.four_time_file - + if not self.g4_new_page: - img_height = 300 - img_left, img_top = 80, top - str1_left, str1_top, str1 = 180, top + 300, "four time correlation function" - str2_left, str2_top = 180, top - 10 + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 180, top + 300, 'four time correlation function' + str2_left, str2_top = 180, top - 10 else: - img_height = 600 + img_height= 600 top -= 300 - img_left, img_top = 80, top - str1_left, str1_top, str1 = 180, top + 300 - 250, "four time correlation function" - str2_left, str2_top = 180, top - 10 - - add_image_string( - c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top - ) + img_left,img_top = 80, top + str1_left, str1_top,str1= 180, top + 300-250, 'four time correlation function' + str2_left, str2_top = 180, top - 10 + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + if new_page: c.showPage() - c.save() - - def report_dose(self, top=720, new_page=False): - c = self.c - uid = self.uid - # add sub-title, Time-dependent plot - c.setFont("Helvetica", 20) + c.save() + + def report_dose( self, top= 720, new_page=False): + + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) ds = 20 - self.sub_title_num += 1 - c.drawString(10, top, "%s. Dose Analysis" % self.sub_title_num) # add title + self.sub_title_num +=1 + c.drawString(10, top, "%s. Dose Analysis"%self.sub_title_num ) #add title c.setFont("Helvetica", 14) - - top1 = top + + top1=top top = top1 - 530 - # add q_Iq_t + #add q_Iq_t imgf = self.dose_file - - img_height = 500 - img_left, img_top = 80, top - str1_left, str1_top, str1 = 180, top + 500, "dose analysis" + + img_height= 500 + img_left,img_top = 80, top + str1_left, str1_top,str1= 180, top + 500, 'dose analysis' str2_left, str2_top = 180, top - 10 - - # print( self.data_dir + self.dose_file) - if os.path.exists(self.data_dir + imgf): - # print( self.dose_file) - im = Image.open(self.data_dir + imgf) - ratio = float(im.size[1]) / im.size[0] - width = img_height / ratio - # print(width) - if width > 450: - img_height = 450 * ratio - + + #print( self.data_dir + self.dose_file) + if os.path.exists( self.data_dir + imgf): + #print( self.dose_file) + im = Image.open( self.data_dir + imgf ) + ratio = float(im.size[1])/im.size[0] + width = img_height/ratio + #print(width) + if width >450: + img_height = 450*ratio + if self.dose_file_new_page: - # img_left,img_top = 180, top - img_left, img_top = 100, top - - add_image_string( - c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top - ) + #img_left,img_top = 180, top + img_left,img_top = 100, top + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + if new_page: c.showPage() - c.save() - - def report_flow_pv_g2(self, top=720, new_page=False): - """create the one time correlation function report - Two images: - Two Time Correlation Function - two one-time correlatoin function from multi-one-time and from diagonal two-time - """ - c = self.c - uid = self.uid - # add sub-title, Time-dependent plot + c.save() + + + + def report_flow_pv_g2( self, top= 720, new_page=False): + '''create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - + ds = 20 - self.sub_title_num += 1 - c.drawString(10, top, "%s. Flow One Time Analysis" % self.sub_title_num) # add title + self.sub_title_num +=1 + c.drawString(10, top, "%s. Flow One Time Analysis"%self.sub_title_num ) #add title c.setFont("Helvetica", 14) - - top1 = top + + top1=top top = top1 - 330 - # add xsvs fit - + #add xsvs fit + imgf = self.flow_g2v image = self.data_dir + imgf - - img_height = 300 - img_left, img_top = 80, top - str1_left, str1_top, str1 = 210, top + 300, "XPCS Vertical Flow" + + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'XPCS Vertical Flow' str2_left, str2_top = 180, top - 10 - add_image_string( - c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top - ) - - imgf = self.flow_g2v_rate_fit - img_height = 200 - img_left, img_top = 350, top + 50 - str1_left, str1_top, str1 = 210, top + 300, "" + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + imgf = self.flow_g2v_rate_fit + img_height= 200 + img_left,img_top = 350, top +50 + str1_left, str1_top,str1= 210, top + 300, '' str2_left, str2_top = 350, top - 10 + 50 - add_image_string( - c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top - ) - + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + top = top - 340 - # add contrast fit - imgf = self.flow_g2p - img_height = 300 - img_left, img_top = 80, top - str1_left, str1_top, str1 = 210, top + 300, "XPCS Parallel Flow" + #add contrast fit + imgf = self.flow_g2p + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'XPCS Parallel Flow' str2_left, str2_top = 180, top - 10 - add_image_string( - c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top - ) - + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + imgf = self.flow_g2p_rate_fit - img_height = 200 - img_left, img_top = 350, top + 50 - str1_left, str1_top, str1 = 210, top + 300, "" + img_height= 200 + img_left,img_top = 350, top +50 + str1_left, str1_top,str1= 210, top + 300, '' str2_left, str2_top = 350, top - 10 + 50 - add_image_string( - c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top - ) + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) if new_page: c.showPage() - c.save() - - def report_flow_pv_two_time(self, top=720, new_page=False): - """create the two time correlation function report - Two images: - Two Time Correlation Function - two one-time correlatoin function from multi-one-time and from diagonal two-time - """ - c = self.c - uid = self.uid - # add sub-title, Time-dependent plot + c.save() + + + def report_flow_pv_two_time( self, top= 720, new_page=False): + '''create the two time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - + ds = 20 - self.sub_title_num += 1 - c.drawString(10, top, "%s. Flow One &Two Time Comparison" % self.sub_title_num) # add title + self.sub_title_num +=1 + c.drawString(10, top, "%s. Flow One &Two Time Comparison"%self.sub_title_num ) #add title c.setFont("Helvetica", 14) - - top1 = top + + top1=top top = top1 - 330 - # add xsvs fit + #add xsvs fit + if False: imgf = self.two_time image = self.data_dir + imgf - img_height = 300 - img_left, img_top = 80, top - str1_left, str1_top, str1 = 210, top + 300, "Two_time" + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'Two_time' str2_left, str2_top = 180, top - 10 - add_image_string( - c, - imgf, - self.data_dir, - img_left, - img_top, - img_height, - str1_left, - str1_top, - str1, - str2_left, - str2_top, - ) - - imgf = self.flow_g2_g2b_p - img_height = 300 - img_left, img_top = 80, top - str1_left, str1_top, str1 = 210, top + 300, "XPCS Vertical Flow by two-time" + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + imgf = self.flow_g2_g2b_p + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'XPCS Vertical Flow by two-time' str2_left, str2_top = 180, top - 10 - add_image_string( - c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top - ) - - imgf = self.flow_g2bp_rate_fit - img_height = 200 - img_left, img_top = 350, top + 50 - str1_left, str1_top, str1 = 210, top + 300, "" + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + imgf = self.flow_g2bp_rate_fit + img_height= 200 + img_left,img_top = 350, top +50 + str1_left, str1_top,str1= 210, top + 300, '' str2_left, str2_top = 350, top - 10 + 50 - add_image_string( - c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top - ) - + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + top = top - 340 - # add contrast fit - imgf = self.flow_g2_g2b_v - - img_height = 300 - img_left, img_top = 80, top - str1_left, str1_top, str1 = 210, top + 300, "XPCS Parallel Flow by two-time" + #add contrast fit + imgf = self.flow_g2_g2b_v + + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'XPCS Parallel Flow by two-time' str2_left, str2_top = 180, top - 10 - add_image_string( - c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top - ) - - imgf = self.flow_g2bv_rate_fit - img_height = 200 - img_left, img_top = 350, top + 50 - str1_left, str1_top, str1 = 210, top + 300, "" + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + imgf = self.flow_g2bv_rate_fit + img_height= 200 + img_left,img_top = 350, top +50 + str1_left, str1_top,str1= 210, top + 300, '' str2_left, str2_top = 350, top - 10 + 50 - add_image_string( - c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top - ) + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) if new_page: c.showPage() - c.save() - - def report_xsvs(self, top=720, new_page=False): - """create the one time correlation function report - Two images: - Two Time Correlation Function - two one-time correlatoin function from multi-one-time and from diagonal two-time - """ - c = self.c - uid = self.uid - # add sub-title, Time-dependent plot + c.save() + + def report_xsvs( self, top= 720, new_page=False): + '''create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - + ds = 20 - self.sub_title_num += 1 - c.drawString(10, top, "%s. Visibility Analysis" % self.sub_title_num) # add title + self.sub_title_num +=1 + c.drawString(10, top, "%s. Visibility Analysis"%self.sub_title_num ) #add title c.setFont("Helvetica", 14) top = top - 330 - # add xsvs fit - imgf = self.xsvs_fit_file - add_image_string( - c, - imgf, - self.data_dir, - img_left=100, - img_top=top, - img_height=300, - str1_left=210, - str1_top=top + 300, - str1="XSVS_Fit_by_Negtive_Binomal Function", - str2_left=180, - str2_top=top - 10, - ) - - # add contrast fit - top = top - 340 + #add xsvs fit + imgf = self.xsvs_fit_file + add_image_string( c, imgf, self.data_dir, img_left=100, img_top=top, img_height= 300, + + str1_left=210, str1_top = top +300,str1='XSVS_Fit_by_Negtive_Binomal Function', + str2_left = 180, str2_top = top -10 ) + + #add contrast fit + top = top -340 imgf = self.contrast_file - add_image_string( - c, - imgf, - self.data_dir, - img_left=100, - img_top=top, - img_height=300, - str1_left=210, - str1_top=top + 310, - str1="contrast get from xsvs and xpcs", - str2_left=180, - str2_top=top - 10, - ) - + add_image_string( c, imgf, self.data_dir, img_left=100, img_top=top, img_height= 300, + + str1_left=210, str1_top = top + 310,str1='contrast get from xsvs and xpcs', + str2_left = 180, str2_top = top -10 ) + if False: - top1 = top + top1=top top = top1 - 330 - # add xsvs fit + #add xsvs fit imgf = self.xsvs_fit_file image = self.data_dir + imgf - im = Image.open(image) - ratio = float(im.size[1]) / im.size[0] - height = 300 - c.drawImage(image, 100, top, width=height / ratio, height=height, mask=None) + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= 300 + c.drawImage( image, 100, top, width= height/ratio,height=height,mask=None) c.setFont("Helvetica", 16) - c.setFillColor(blue) - c.drawString(210, top + 300, "XSVS_Fit_by_Negtive_Binomal Function") + c.setFillColor( blue) + c.drawString( 210, top + 300 , 'XSVS_Fit_by_Negtive_Binomal Function' ) c.setFont("Helvetica", 12) - c.setFillColor(red) - c.drawString(180, top - 10, "filename: %s" % imgf) + c.setFillColor(red) + c.drawString( 180, top- 10, 'filename: %s'%imgf ) top = top - 340 - # add contrast fit + #add contrast fit imgf = self.contrast_file image = self.data_dir + imgf - im = Image.open(image) - ratio = float(im.size[1]) / im.size[0] - height = 300 - c.drawImage(image, 100, top, width=height / ratio, height=height, mask=None) + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= 300 + c.drawImage( image, 100, top, width= height/ratio,height=height,mask=None) c.setFont("Helvetica", 16) - c.setFillColor(blue) - c.drawString(210, top + 310, "contrast get from xsvs and xpcs") + c.setFillColor( blue) + c.drawString( 210, top + 310, 'contrast get from xsvs and xpcs' ) c.setFont("Helvetica", 12) - c.setFillColor(red) - c.drawString(180, top - 10, "filename: %s" % imgf) + c.setFillColor(red) + c.drawString( 180, top- 10, 'filename: %s'%imgf ) + if new_page: c.showPage() c.save() + + + def new_page(self): - c = self.c + c=self.c c.showPage() - + def save_page(self): - c = self.c + c=self.c c.save() - + def done(self): out_dir = self.out_dir - uid = self.uid - + uid=self.uid + print() - print("*" * 40) - print("The pdf report is created with filename as: %s" % (self.filename)) - print("*" * 40) - - -def create_multi_pdf_reports_for_uids(uids, g2, data_dir, report_type="saxs", append_name=""): - """Aug 16, YG@CHX-NSLS-II - Create multi pdf reports for each uid in uids - uids: a list of uids to be reported - g2: a dictionary, {run_num: sub_num: g2_of_each_uid} - data_dir: - Save pdf report in data dir - """ - for key in list(g2.keys()): - i = 1 - for sub_key in list(g2[key].keys()): + print('*'*40) + print ('The pdf report is created with filename as: %s'%(self.filename )) + print('*'*40) + + + + +def create_multi_pdf_reports_for_uids( uids, g2, data_dir, report_type='saxs', append_name='' ): + ''' Aug 16, YG@CHX-NSLS-II + Create multi pdf reports for each uid in uids + uids: a list of uids to be reported + g2: a dictionary, {run_num: sub_num: g2_of_each_uid} + data_dir: + Save pdf report in data dir + ''' + for key in list( g2.keys()): + i=1 + for sub_key in list( g2[key].keys() ): uid_i = uids[key][sub_key] - data_dir_ = os.path.join(data_dir, "%s/" % uid_i) - if append_name != "": + data_dir_ = os.path.join( data_dir, '%s/'%uid_i ) + if append_name!='': uid_name = uid_i + append_name else: uid_name = uid_i - c = create_pdf_report( - data_dir_, - uid_i, - data_dir, - report_type=report_type, - filename="XPCS_Analysis_Report_for_uid=%s.pdf" % uid_name, - ) - # Page one: Meta-data/Iq-Q/ROI + c= create_pdf_report( data_dir_, uid_i,data_dir, + report_type=report_type, filename="XPCS_Analysis_Report_for_uid=%s.pdf"%uid_name ) + #Page one: Meta-data/Iq-Q/ROI c.report_header(page=1) - c.report_meta(top=730) - # c.report_one_time( top= 500 ) - # c.new_page() - if report_type == "flow": - c.report_flow_pv_g2(top=720) + c.report_meta( top=730) + #c.report_one_time( top= 500 ) + #c.new_page() + if report_type =='flow': + c.report_flow_pv_g2( top= 720) c.save_page() - c.done() - - -def create_one_pdf_reports_for_uids(uids, g2, data_dir, filename="all_in_one", report_type="saxs"): - """Aug 16, YG@CHX-NSLS-II - Create one pdf reports for each uid in uids - uids: a list of uids to be reported - g2: a dictionary, {run_num: sub_num: g2_of_each_uid} - data_dir: - Save pdf report in data dir - """ - c = create_pdf_report(data_dir, uid=filename, out_dir=data_dir, load=False, report_type=report_type) - page = 1 - - for key in list(g2.keys()): - i = 1 - for sub_key in list(g2[key].keys()): + c.done() + + + + + +def create_one_pdf_reports_for_uids( uids, g2, data_dir, filename='all_in_one', report_type='saxs' ): + ''' Aug 16, YG@CHX-NSLS-II + Create one pdf reports for each uid in uids + uids: a list of uids to be reported + g2: a dictionary, {run_num: sub_num: g2_of_each_uid} + data_dir: + Save pdf report in data dir + ''' + c= create_pdf_report( data_dir, uid=filename, out_dir=data_dir, load=False, report_type= report_type) + page=1 + + for key in list( g2.keys()): + i=1 + for sub_key in list( g2[key].keys() ): uid_i = uids[key][sub_key] - data_dir_ = os.path.join(data_dir, "%s/" % uid_i) - + data_dir_ = os.path.join( data_dir, '%s/'%uid_i) + c.uid = uid_i c.data_dir = data_dir_ - c.load_metadata() - - # Page one: Meta-data/Iq-Q/ROI + c.load_metadata() + + #Page one: Meta-data/Iq-Q/ROI c.report_header(page=page) - c.report_meta(top=730) - c.report_one_time(top=500) + c.report_meta( top=730) + c.report_one_time( top= 500 ) c.new_page() page += 1 - c.uid = filename + c.uid = filename c.save_page() - c.done() - - -def save_res_h5(full_uid, data_dir, save_two_time=False): - """ - YG. Nov 10, 2016 - save the results to a h5 file - will save meta data/avg_img/mask/roi (ring_mask or box_mask)/ - will aslo save multi-tau calculated one-time correlation function g2/taus - will also save two-time derived one-time correlation function /g2b/taus2 - if save_two_time if True, will save two-time correaltion function - """ - with h5py.File(data_dir + "%s.h5" % full_uid, "w") as hf: - # write meta data - meta_data = hf.create_dataset("meta_data", (1,), dtype="i") - for key in md.keys(): + c.done() + + +def save_res_h5( full_uid, data_dir, save_two_time=False ): + ''' + YG. Nov 10, 2016 + save the results to a h5 file + will save meta data/avg_img/mask/roi (ring_mask or box_mask)/ + will aslo save multi-tau calculated one-time correlation function g2/taus + will also save two-time derived one-time correlation function /g2b/taus2 + if save_two_time if True, will save two-time correaltion function + ''' + with h5py.File(data_dir + '%s.h5'%full_uid, 'w') as hf: + #write meta data + meta_data = hf.create_dataset("meta_data", (1,), dtype='i') + for key in md.keys(): try: meta_data.attrs[key] = md[key] except: pass - shapes = md["avg_img"].shape - avg_h5 = hf.create_dataset("avg_img", data=md["avg_img"]) - mask_h5 = hf.create_dataset("mask", data=md["mask"]) - roi_h5 = hf.create_dataset("roi", data=md["ring_mask"]) + shapes = md['avg_img'].shape + avg_h5 = hf.create_dataset("avg_img", data = md['avg_img'] ) + mask_h5 = hf.create_dataset("mask", data = md['mask'] ) + roi_h5 = hf.create_dataset("roi", data = md['ring_mask'] ) - g2_h5 = hf.create_dataset("g2", data=g2) - taus_h5 = hf.create_dataset("taus", data=taus) + g2_h5 = hf.create_dataset("g2", data = g2 ) + taus_h5 = hf.create_dataset("taus", data = taus ) if save_two_time: - g12b_h5 = hf.create_dataset("g12b", data=g12b) - g2b_h5 = hf.create_dataset("g2b", data=g2b) - taus2_h5 = hf.create_dataset("taus2", data=taus2) - + g12b_h5 = hf.create_dataset("g12b", data = g12b ) + g2b_h5 = hf.create_dataset("g2b", data = g2b ) + taus2_h5 = hf.create_dataset("taus2", data = taus2 ) def printname(name): - print(name) - - -# f.visit(printname) -def load_res_h5(full_uid, data_dir): - """YG. Nov 10, 2016 - load results from a h5 file - will load meta data/avg_img/mask/roi (ring_mask or box_mask)/ - will aslo load multi-tau calculated one-time correlation function g2/taus - will also load two-time derived one-time correlation function /g2b/taus2 - if save_two_time if True, will load two-time correaltion function - - """ - with h5py.File(data_dir + "%s.h5" % full_uid, "r") as hf: - meta_data_h5 = hf.get("meta_data") + print (name) +#f.visit(printname) +def load_res_h5( full_uid, data_dir ): + '''YG. Nov 10, 2016 + load results from a h5 file + will load meta data/avg_img/mask/roi (ring_mask or box_mask)/ + will aslo load multi-tau calculated one-time correlation function g2/taus + will also load two-time derived one-time correlation function /g2b/taus2 + if save_two_time if True, will load two-time correaltion function + + ''' + with h5py.File(data_dir + '%s.h5'%full_uid, 'r') as hf: + meta_data_h5 = hf.get( "meta_data" ) meta_data = {} - for att in meta_data_h5.attrs: - meta_data[att] = meta_data_h5.attrs[att] - avg_h5 = np.array(hf.get("avg_img")) - mask_h5 = np.array(hf.get("mask")) - roi_h5 = np.array(hf.get("roi")) - g2_h5 = np.array(hf.get("g2")) - taus_h5 = np.array(hf.get("taus")) - g2b_h5 = np.array(hf.get("g2b")) - taus2_h5 = np.array(hf.get("taus2")) - if "g12b" in hf: - g12b_h5 = np.array(hf.get("g12b")) - - if "g12b" in hf: - return meta_data, avg_h5, mask_h5, roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5, g12b - else: - return meta_data, avg_h5, mask_h5, roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5 - - -def make_pdf_report( - data_dir, - uid, - pdf_out_dir, - pdf_filename, - username, - run_fit_form, - run_one_time, - run_two_time, - run_four_time, - run_xsvs, - run_dose=None, - oavs_report=False, - report_type="saxs", - md=None, - report_invariant=False, - return_class=False, - res_h5_filename=None, -): + for att in meta_data_h5.attrs: + meta_data[att] = meta_data_h5.attrs[att] + avg_h5 = np.array( hf.get("avg_img" ) ) + mask_h5 = np.array(hf.get("mask" )) + roi_h5 =np.array( hf.get("roi" )) + g2_h5 = np.array( hf.get("g2" )) + taus_h5 = np.array( hf.get("taus" )) + g2b_h5 = np.array( hf.get("g2b")) + taus2_h5 = np.array( hf.get("taus2")) + if 'g12b' in hf: + g12b_h5 = np.array( hf.get("g12b")) + + if 'g12b' in hf: + return meta_data, avg_h5, mask_h5,roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5, g12b + else: + return meta_data, avg_h5, mask_h5,roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5 + + + + +def make_pdf_report( data_dir, uid, pdf_out_dir, pdf_filename, username, + run_fit_form, run_one_time, run_two_time, run_four_time, run_xsvs, run_dose=None, + oavs_report = False,report_type='saxs', md=None,report_invariant=False, return_class=False, res_h5_filename=None + ): + if uid.startswith("uid=") or uid.startswith("Uid="): uid = uid[4:] - c = create_pdf_report( - data_dir, - uid, - pdf_out_dir, - filename=pdf_filename, - user=username, - report_type=report_type, - md=md, - res_h5_filename=res_h5_filename, - ) - # print( c.md) - # Page one: Meta-data/Iq-Q/ROI + c= create_pdf_report( data_dir, uid, pdf_out_dir, filename= pdf_filename, user= username, report_type=report_type, md = md, res_h5_filename=res_h5_filename ) + #print( c.md) + #Page one: Meta-data/Iq-Q/ROI c.report_header(page=1) - c.report_meta(top=730) - c.report_static(top=540, iq_fit=run_fit_form) - c.report_ROI(top=290) - page = 1 + c.report_meta( top=730) + c.report_static( top=540, iq_fit = run_fit_form ) + c.report_ROI( top= 290) + page = 1 ##Page Two for plot OVAS images if oavs_report is True if oavs_report: c.new_page() - c.report_header(page=2) - c.report_oavs(top=720, oavs_file=None, new_page=True) - page += 1 - - # Page Two: img~t/iq~t/waterfall/mean~t/g2/rate~q + c.report_header(page=2) + c.report_oavs( top= 720, oavs_file=None, new_page=True) + page +=1 + + #Page Two: img~t/iq~t/waterfall/mean~t/g2/rate~q c.new_page() - page += 1 + page +=1 c.report_header(page=page) - - if c.report_type != "ang_saxs": - c.report_time_analysis(top=720) - if run_one_time: - if c.report_type != "ang_saxs": + + if c.report_type != 'ang_saxs': + c.report_time_analysis( top= 720) + if run_one_time: + if c.report_type != 'ang_saxs': top = 350 - else: + else: top = 500 if c.g2_fit_new_page: c.new_page() - page += 1 + page +=1 top = 720 - c.report_one_time(top=top) - - # self.two_g2_new_page = True - # self.g2_fit_new_page = True - - # Page Three: two-time/two g2 - + c.report_one_time( top= top ) + + + #self.two_g2_new_page = True + #self.g2_fit_new_page = True + + #Page Three: two-time/two g2 + if run_two_time: c.new_page() - page += 1 - c.report_header(page=page) - c.report_two_time(top=720) + page +=1 + c.report_header(page= page) + c.report_two_time( top= 720 ) if run_four_time: c.new_page() - page += 1 - c.report_header(page=page) - c.report_four_time(top=720) + page +=1 + c.report_header(page= page) + c.report_four_time( top= 720 ) if run_xsvs: c.new_page() - page += 1 - c.report_header(page=page) - c.report_xsvs(top=720) + page +=1 + c.report_header(page= page) + c.report_xsvs( top= 720 ) if run_dose: c.new_page() - page += 1 - c.report_header(page=page) - c.report_dose(top=702) + page +=1 + c.report_header(page= page) + c.report_dose( top = 702) if report_invariant: c.new_page() - page += 1 - c.report_header(page=page) - c.report_invariant(top=702) - + page +=1 + c.report_header(page= page) + c.report_invariant( top = 702) + else: - c.report_flow_pv_g2(top=720, new_page=True) - c.report_flow_pv_two_time(top=720, new_page=True) + c.report_flow_pv_g2( top= 720, new_page= True) + c.report_flow_pv_two_time( top= 720, new_page= True ) c.save_page() - c.done() + c.done() if return_class: return c - - + + ###################################### ###Deal with saving dict to hdf5 file def save_dict_to_hdf5(dic, filename): """ .... """ - with h5py.File(filename, "w") as h5file: - recursively_save_dict_contents_to_group(h5file, "/", dic) - + with h5py.File(filename, 'w') as h5file: + recursively_save_dict_contents_to_group(h5file, '/', dic) def load_dict_from_hdf5(filename): """ .... """ - with h5py.File(filename, "r") as h5file: - return recursively_load_dict_contents_from_group(h5file, "/") - - -def recursively_save_dict_contents_to_group(h5file, path, dic): + with h5py.File(filename, 'r') as h5file: + return recursively_load_dict_contents_from_group(h5file, '/') + +def recursively_save_dict_contents_to_group( h5file, path, dic): """...""" # argument type checking if not isinstance(dic, dict): - raise ValueError("must provide a dictionary") - + raise ValueError("must provide a dictionary") + if not isinstance(path, str): raise ValueError("path must be a string") if not isinstance(h5file, h5py._hl.files.File): raise ValueError("must be an open h5py file") # save items to the hdf5 file for key, item in dic.items(): - # print(key,item) + #print(key,item) key = str(key) if isinstance(item, list): item = np.array(item) - # print(item) + #print(item) if not isinstance(key, str): raise ValueError("dict keys must be strings to save to hdf5") # save strings, numpy.int64, and numpy.float64 types - if isinstance( - item, (np.int64, np.float64, str, float, np.float32, int) - ): # removed depreciated np.float LW @06/11/2023 - # print( 'here' ) + if isinstance(item, (np.int64, np.float64, str, float, np.float32,int)): # removed depreciated np.float LW @06/11/2023 + #print( 'here' ) h5file[path + key] = item if not h5file[path + key].value == item: - raise ValueError("The data representation in the HDF5 file does not match the original dict.") + raise ValueError('The data representation in the HDF5 file does not match the original dict.') # save numpy arrays - elif isinstance(item, np.ndarray): + elif isinstance(item, np.ndarray): try: h5file[path + key] = item except: - item = np.array(item).astype("|S9") + item = np.array(item).astype('|S9') h5file[path + key] = item if not np.array_equal(h5file[path + key].value, item): - raise ValueError("The data representation in the HDF5 file does not match the original dict.") + raise ValueError('The data representation in the HDF5 file does not match the original dict.') # save dictionaries elif isinstance(item, dict): - recursively_save_dict_contents_to_group(h5file, path + key + "/", item) + recursively_save_dict_contents_to_group(h5file, path + key + '/', item) # other types cannot be saved and will result in an error else: - # print(item) - raise ValueError("Cannot save %s type." % type(item)) - - -def recursively_load_dict_contents_from_group(h5file, path): + #print(item) + raise ValueError('Cannot save %s type.' % type(item)) + + +def recursively_load_dict_contents_from_group( h5file, path): """...""" ans = {} for key, item in h5file[path].items(): if isinstance(item, h5py._hl.dataset.Dataset): ans[key] = item.value elif isinstance(item, h5py._hl.group.Group): - ans[key] = recursively_load_dict_contents_from_group(h5file, path + key + "/") - return ans - - -def export_xpcs_results_to_h5(filename, export_dir, export_dict): - """ - YG. May 10, 2017 - save the results to a h5 file - - YG. Aug28 2019 modify, add try in export pandas to h5 to fit the new version of pandas - - filename: the h5 file name - export_dir: the exported file folder - export_dict: dict, with keys as md, g2, g4 et.al. - """ - + ans[key] = recursively_load_dict_contents_from_group(h5file, path + key + '/') + return ans + + +def export_xpcs_results_to_h5( filename, export_dir, export_dict ): + ''' + YG. May 10, 2017 + save the results to a h5 file + + YG. Aug28 2019 modify, add try in export pandas to h5 to fit the new version of pandas + + filename: the h5 file name + export_dir: the exported file folder + export_dict: dict, with keys as md, g2, g4 et.al. + ''' + fout = export_dir + filename - dicts = ["md", "qval_dict", "qval_dict_v", "qval_dict_p"] - dict_nest = ["taus_uids", "g2_uids"] - - with h5py.File(fout, "w") as hf: - flag = False - for key in list(export_dict.keys()): - # print( key ) - if key in dicts: # =='md' or key == 'qval_dict': - md = export_dict[key] - meta_data = hf.create_dataset(key, (1,), dtype="i") - for key_ in md.keys(): + dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p'] + dict_nest=['taus_uids', 'g2_uids' ] + + with h5py.File(fout, 'w') as hf: + flag=False + for key in list(export_dict.keys()): + #print( key ) + if key in dicts: #=='md' or key == 'qval_dict': + md= export_dict[key] + meta_data = hf.create_dataset( key, (1,), dtype='i') + for key_ in md.keys(): try: - meta_data.attrs[str(key_)] = md[key_] + meta_data.attrs[str(key_)] = md[key_] except: - pass + pass elif key in dict_nest: - # print(key) + #print(key) try: - recursively_save_dict_contents_to_group(hf, "/%s/" % key, export_dict[key]) + recursively_save_dict_contents_to_group(hf, '/%s/'%key, export_dict[key] ) except: - print("Can't export the key: %s in this dataset." % key) - - elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + print("Can't export the key: %s in this dataset."%key) + + elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: try: - export_dict[key].to_hdf( - fout, - key=key, - mode="a", - ) + export_dict[key].to_hdf( fout, key=key, mode='a', ) except: - flag = True + flag=True else: - data = hf.create_dataset(key, data=export_dict[key]) - # add this fill line at Octo 27, 2017 + data = hf.create_dataset(key, data = export_dict[key] ) + #add this fill line at Octo 27, 2017 data.set_fill_value = np.nan - if flag: - for key in list(export_dict.keys()): - if key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: - export_dict[key].to_hdf( - fout, - key=key, - mode="a", - ) - - print("The xpcs analysis results are exported to %s with filename as %s" % (export_dir, filename)) - - -def extract_xpcs_results_from_h5_debug(filename, import_dir, onekey=None, exclude_keys=None): - """ - YG. Dec 22, 2016 - extract data from a h5 file - - filename: the h5 file name - import_dir: the imported file folder - onekey: string, if not None, only extract that key - return: - extact_dict: dict, with keys as md, g2, g4 et.al. - """ - + if flag: + for key in list(export_dict.keys()): + if key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + export_dict[key].to_hdf( fout, key=key, mode='a', ) + + print( 'The xpcs analysis results are exported to %s with filename as %s'%(export_dir , filename)) + + + +def extract_xpcs_results_from_h5_debug( filename, import_dir, onekey=None, exclude_keys=None ): + ''' + YG. Dec 22, 2016 + extract data from a h5 file + + filename: the h5 file name + import_dir: the imported file folder + onekey: string, if not None, only extract that key + return: + extact_dict: dict, with keys as md, g2, g4 et.al. + ''' + + import pandas as pds import numpy as np - import pandas as pds - - extract_dict = {} + extract_dict = {} fp = import_dir + filename pds_type_keys = [] - dicts = ["md", "qval_dict", "qval_dict_v", "qval_dict_p", "taus_uids", "g2_uids"] + dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p', 'taus_uids', 'g2_uids'] if exclude_keys is None: - exclude_keys = [] + exclude_keys =[] if onekey is None: for k in dicts: extract_dict[k] = {} - with h5py.File(fp, "r") as hf: - # print (list( hf.keys()) ) - for key in list(hf.keys()): + with h5py.File( fp, 'r') as hf: + #print (list( hf.keys()) ) + for key in list( hf.keys()): if key not in exclude_keys: if key in dicts: - extract_dict[key] = recursively_load_dict_contents_from_group(hf, "/" + key + "/") - elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: - pds_type_keys.append(key) - else: - extract_dict[key] = np.array(hf.get(key)) + extract_dict[key] = recursively_load_dict_contents_from_group(hf, '/' + key + '/') + elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + pds_type_keys.append( key ) + else: + extract_dict[key] = np.array( hf.get( key )) for key in pds_type_keys: if key not in exclude_keys: - extract_dict[key] = pds.read_hdf(fp, key=key) + extract_dict[key] = pds.read_hdf(fp, key= key ) else: - if onekey == "md": - with h5py.File(fp, "r") as hf: - md = hf.get("md") + if onekey == 'md': + with h5py.File( fp, 'r') as hf: + md = hf.get('md') for key in list(md.attrs): - extract_dict["md"][key] = md.attrs[key] - elif onekey in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: - extract_dict[onekey] = pds.read_hdf(fp, key=onekey) + extract_dict['md'][key] = md.attrs[key] + elif onekey in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + extract_dict[onekey] = pds.read_hdf(fp, key= onekey ) else: try: - with h5py.File(fp, "r") as hf: - extract_dict[onekey] = np.array(hf.get(onekey)) + with h5py.File( fp, 'r') as hf: + extract_dict[onekey] = np.array( hf.get( onekey )) except: - print("The %s dosen't have this %s value" % (fp, onekey)) + print("The %s dosen't have this %s value"%(fp, onekey) ) return extract_dict -def export_xpcs_results_to_h5_old(filename, export_dir, export_dict): - """ - YG. Dec 22, 2016 - save the results to a h5 file - filename: the h5 file name - export_dir: the exported file folder - export_dict: dict, with keys as md, g2, g4 et.al. - """ - import h5py + + + + +def export_xpcs_results_to_h5_old( filename, export_dir, export_dict ): + ''' + YG. Dec 22, 2016 + save the results to a h5 file + + filename: the h5 file name + export_dir: the exported file folder + export_dict: dict, with keys as md, g2, g4 et.al. + ''' + import h5py fout = export_dir + filename - dicts = ["md", "qval_dict", "qval_dict_v", "qval_dict_p"] # {k1: { }} - dict_nest = ["taus_uids", "g2_uids"] # {k1: {k2:}} - with h5py.File(fout, "w") as hf: - for key in list(export_dict.keys()): - # print( key ) - if key in dicts: # =='md' or key == 'qval_dict': - md = export_dict[key] - meta_data = hf.create_dataset(key, (1,), dtype="i") - for key_ in md.keys(): + dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p'] #{k1: { }} + dict_nest= ['taus_uids', 'g2_uids'] #{k1: {k2:}} + with h5py.File(fout, 'w') as hf: + for key in list(export_dict.keys()): + #print( key ) + if key in dicts: #=='md' or key == 'qval_dict': + md= export_dict[key] + meta_data = hf.create_dataset( key, (1,), dtype='i') + for key_ in md.keys(): try: - meta_data.attrs[str(key_)] = md[key_] + meta_data.attrs[str(key_)] = md[key_] except: - pass + pass elif key in dict_nest: k1 = export_dict[key] - v1 = hf.create_dataset(key, (1,), dtype="i") + v1 = hf.create_dataset( key, (1,), dtype='i') for k2 in k1.keys(): - v2 = hf.create_dataset(k1, (1,), dtype="i") - - elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: - export_dict[key].to_hdf( - fout, - key=key, - mode="a", - ) + + v2 = hf.create_dataset( k1, (1,), dtype='i') + + + elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + export_dict[key].to_hdf( fout, key=key, mode='a', ) else: - data = hf.create_dataset(key, data=export_dict[key]) - print("The xpcs analysis results are exported to %s with filename as %s" % (export_dir, filename)) - - -def extract_xpcs_results_from_h5(filename, import_dir, onekey=None, exclude_keys=None, two_time_qindex=None): - """ - YG. Dec 22, 2016 - extract data from a h5 file - - filename: the h5 file name - import_dir: the imported file folder - onekey: string, if not None, only extract that key - return: - extact_dict: dict, with keys as md, g2, g4 et.al. - """ - + data = hf.create_dataset(key, data = export_dict[key] ) + print( 'The xpcs analysis results are exported to %s with filename as %s'%(export_dir , filename)) + + +def extract_xpcs_results_from_h5( filename, import_dir, onekey=None, exclude_keys=None, two_time_qindex = None ): + ''' + YG. Dec 22, 2016 + extract data from a h5 file + + filename: the h5 file name + import_dir: the imported file folder + onekey: string, if not None, only extract that key + return: + extact_dict: dict, with keys as md, g2, g4 et.al. + ''' + + import pandas as pds import numpy as np - import pandas as pds - - extract_dict = {} + extract_dict = {} fp = import_dir + filename pds_type_keys = [] - dicts = ["md", "qval_dict", "qval_dict_v", "qval_dict_p", "taus_uids", "g2_uids"] + dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p', 'taus_uids', 'g2_uids'] if exclude_keys is None: - exclude_keys = [] + exclude_keys =[] if onekey is None: for k in dicts: extract_dict[k] = {} - with h5py.File(fp, "r") as hf: - # print (list( hf.keys()) ) - for key in list(hf.keys()): + with h5py.File( fp, 'r') as hf: + #print (list( hf.keys()) ) + for key in list( hf.keys()): if key not in exclude_keys: if key in dicts: md = hf.get(key) for key_ in list(md.attrs): - # print(key, key_) - if key == "qval_dict": - extract_dict[key][int(key_)] = md.attrs[key_] + #print(key, key_) + if key == 'qval_dict': + extract_dict[key][int(key_)] = md.attrs[key_] else: - extract_dict[key][key_] = md.attrs[key_] + extract_dict[key][key_] = md.attrs[key_] - elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: - pds_type_keys.append(key) - else: - if key == "g12b": + elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + pds_type_keys.append( key ) + else: + if key == 'g12b': if two_time_qindex is not None: - extract_dict[key] = hf.get(key)[:, :, two_time_qindex] + extract_dict[key] = hf.get( key )[:,:,two_time_qindex] else: - extract_dict[key] = hf.get(key)[:] - else: - extract_dict[key] = hf.get(key)[:] # np.array( hf.get( key )) - + extract_dict[key] = hf.get( key )[:] + else: + extract_dict[key] = hf.get( key )[:] #np.array( hf.get( key )) + for key in pds_type_keys: if key not in exclude_keys: - extract_dict[key] = pds.read_hdf(fp, key=key) + extract_dict[key] = pds.read_hdf(fp, key= key ) else: - if onekey == "md": - with h5py.File(fp, "r") as hf: - md = hf.get("md") + if onekey == 'md': + with h5py.File( fp, 'r') as hf: + md = hf.get('md') for key in list(md.attrs): - extract_dict["md"][key] = md.attrs[key] - elif onekey in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: - extract_dict[onekey] = pds.read_hdf(fp, key=onekey) + extract_dict['md'][key] = md.attrs[key] + elif onekey in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + extract_dict[onekey] = pds.read_hdf(fp, key= onekey ) else: try: - with h5py.File(fp, "r") as hf: - if key == "g12b": + with h5py.File( fp, 'r') as hf: + if key == 'g12b': if two_time_qindex is not None: - extract_dict[key] = hf.get(key)[:, :, two_time_qindex] + extract_dict[key] = hf.get( key )[:,:,two_time_qindex] else: - extract_dict[key] = hf.get(key)[:] - else: - extract_dict[key] = hf.get(key)[:] # np.array( hf.get( key )) - # extract_dict[onekey] = hf.get( key )[:] #np.array( hf.get( onekey )) + extract_dict[key] = hf.get( key )[:] + else: + extract_dict[key] = hf.get( key )[:] #np.array( hf.get( key )) + #extract_dict[onekey] = hf.get( key )[:] #np.array( hf.get( onekey )) except: - print("The %s dosen't have this %s value" % (fp, onekey)) + print("The %s dosen't have this %s value"%(fp, onekey) ) return extract_dict -def read_contrast_from_multi_csv(uids, path, times=None, unit=20): - """Y.G. 2016, Dec 23, load contrast from multi csv file""" - N = len(uids) + + +def read_contrast_from_multi_csv( uids, path, times=None, unit=20 ): + '''Y.G. 2016, Dec 23, load contrast from multi csv file''' + + N = len(uids) if times is None: - times = np.array([0] + [2**i for i in range(N)]) * unit - for i, uid in enumerate(uids): - fp = path + uid + "/uid=%s--contrast_factorL.csv" % uid - contri = pds.read_csv(fp) - qs = np.array(contri[contri.columns[0]]) - contri_ = np.array(contri[contri.columns[1]]) - if i == 0: - contr = np.zeros([N, len(qs)]) + times = np.array( [0] + [2**i for i in range(N)] )*unit + for i, uid in enumerate(uids): + fp = path + uid + '/uid=%s--contrast_factorL.csv'%uid + contri = pds.read_csv( fp ) + qs = np.array( contri[contri.columns[0]] ) + contri_ = np.array( contri[contri.columns[1]] ) + if i ==0: + contr = np.zeros( [ N, len(qs)]) contr[i] = contri_ - # contr[0,:] = np.nan + #contr[0,:] = np.nan return times, contr - -def read_contrast_from_multi_h5( - uids, - path, -): - """Y.G. 2016, Dec 23, load contrast from multi h5 file""" - N = len(uids) - times_xsvs = np.zeros(N) - for i, uid in enumerate(uids): - t = extract_xpcs_results_from_h5( - filename="%s_Res.h5" % uid, import_dir=path + uid + "/", onekey="times_xsvs" - ) - times_xsvs[i] = t["times_xsvs"][0] - contri = extract_xpcs_results_from_h5( - filename="%s_Res.h5" % uid, import_dir=path + uid + "/", onekey="contrast_factorL" - ) - if i == 0: - contr = np.zeros([N, contri["contrast_factorL"].shape[0]]) - contr[i] = contri["contrast_factorL"][:, 0] +def read_contrast_from_multi_h5( uids, path, ): + '''Y.G. 2016, Dec 23, load contrast from multi h5 file''' + N = len(uids) + times_xsvs = np.zeros( N ) + for i, uid in enumerate(uids): + t = extract_xpcs_results_from_h5( filename= '%s_Res.h5'%uid, + import_dir = path + uid + '/' , onekey= 'times_xsvs') + times_xsvs[i] = t['times_xsvs'][0] + contri = extract_xpcs_results_from_h5( filename= '%s_Res.h5'%uid, + import_dir = path + uid + '/' , onekey= 'contrast_factorL') + if i ==0: + contr = np.zeros( [ N, contri['contrast_factorL'].shape[0] ]) + contr[i] = contri['contrast_factorL'][:,0] return times_xsvs, contr + + + + + + diff --git a/pyCHX/chx_compress.py b/pyCHX/chx_compress.py index d8f5d6c..706cf7e 100644 --- a/pyCHX/chx_compress.py +++ b/pyCHX/chx_compress.py @@ -1,857 +1,610 @@ -import gc -import os -import pickle as pkl -import shutil -import struct -import sys -from contextlib import closing +import os,shutil from glob import iglob -from multiprocessing import Pool -import dill import matplotlib.pyplot as plt +from pyCHX.chx_libs import (np, roi, time, datetime, os, getpass, db, + LogNorm, RUN_GUI) +from pyCHX.chx_generic_functions import (create_time_slice,get_detector, get_sid_filenames, + load_data,reverse_updown,rot90_clockwise, get_eigerImage_per_file,copy_data,delete_data, ) -# imports handler from CHX -# this is where the decision is made whether or not to use dask -# from chxtools.handlers import EigerImages, EigerHandler -from eiger_io.fs_handler import EigerHandler, EigerImages -from tqdm import tqdm -from pyCHX.chx_generic_functions import ( - copy_data, - create_time_slice, - delete_data, - get_detector, - get_eigerImage_per_file, - get_sid_filenames, - load_data, - reverse_updown, - rot90_clockwise, -) -from pyCHX.chx_libs import RUN_GUI, LogNorm, datetime, db, getpass, np, os, roi, time +import struct +from tqdm import tqdm +from contextlib import closing +from multiprocessing import Pool +import dill +import sys +import gc +import pickle as pkl +# imports handler from CHX +# this is where the decision is made whether or not to use dask +#from chxtools.handlers import EigerImages, EigerHandler +from eiger_io.fs_handler import EigerHandler,EigerImages -def run_dill_encoded(what): +def run_dill_encoded(what): fun, args = dill.loads(what) return fun(*args) - -def apply_async(pool, fun, args, callback=None): - return pool.apply_async(run_dill_encoded, (dill.dumps((fun, args)),), callback=callback) +def apply_async(pool, fun, args, callback=None): + return pool.apply_async( run_dill_encoded, (dill.dumps((fun, args)),), callback= callback) -def map_async(pool, fun, args): - return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),)) +def map_async(pool, fun, args ): + return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),)) - -def pass_FD(FD, n): - # FD.rdframe(n) + +def pass_FD(FD,n): + #FD.rdframe(n) try: FD.seekimg(n) except: pass return False - - -def go_through_FD(FD): - if not pass_FD(FD, FD.beg): +def go_through_FD(FD): + if not pass_FD(FD,FD.beg): for i in range(FD.beg, FD.end): - pass_FD(FD, i) + pass_FD(FD,i) else: pass - - -def compress_eigerdata( - images, - mask, - md, - filename=None, - force_compress=False, - bad_pixel_threshold=1e15, - bad_pixel_low_threshold=0, - hot_pixel_threshold=2**30, - nobytes=2, - bins=1, - bad_frame_list=None, - para_compress=False, - num_sub=100, - dtypes="uid", - reverse=True, - rot90=False, - num_max_para_process=500, - with_pickle=False, - direct_load_data=True, - data_path=None, - images_per_file=100, - copy_rawdata=True, - new_path="/tmp_data/data/", -): - """ + + + + + +def compress_eigerdata( images, mask, md, filename=None, force_compress=False, + bad_pixel_threshold=1e15, bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, nobytes=2,bins=1, bad_frame_list=None, + para_compress= False, num_sub=100, dtypes='uid',reverse =True, rot90=False, + num_max_para_process=500, with_pickle=False, direct_load_data=True, data_path=None, + images_per_file=100, copy_rawdata=True,new_path = '/tmp_data/data/'): + ''' Init 2016, YG@CHX DEV 2018, June, make images_per_file a dummy, will be determined by get_eigerImage_per_file if direct_load_data Add copy_rawdata opt. - - """ - - end = len(images) // bins + + ''' + + end= len(images)//bins if filename is None: - filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % md["uid"] - if dtypes != "uid": - para_compress = False + filename= '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%md['uid'] + if dtypes!= 'uid': + para_compress= False else: if para_compress: - images = "foo" - # para_compress= True - # print( dtypes ) - if direct_load_data: - images_per_file = get_eigerImage_per_file(data_path) + images='foo' + #para_compress= True + #print( dtypes ) + if direct_load_data: + images_per_file = get_eigerImage_per_file( data_path ) if data_path is None: sud = get_sid_filenames(db[uid]) data_path = sud[2][0] if force_compress: - print("Create a new compress file with filename as :%s." % filename) + print ("Create a new compress file with filename as :%s."%filename) if para_compress: # stop connection to be before forking... (let it reset again) db.reg.disconnect() db.mds.reset_connection() - print("Using a multiprocess to compress the data.") - return para_compress_eigerdata( - images, - mask, - md, - filename, - bad_pixel_threshold=bad_pixel_threshold, - hot_pixel_threshold=hot_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold, - nobytes=nobytes, - bins=bins, - num_sub=num_sub, - dtypes=dtypes, - rot90=rot90, - reverse=reverse, - num_max_para_process=num_max_para_process, - with_pickle=with_pickle, - direct_load_data=direct_load_data, - data_path=data_path, - images_per_file=images_per_file, - copy_rawdata=copy_rawdata, - new_path=new_path, - ) + print( 'Using a multiprocess to compress the data.') + return para_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, + bins=bins, num_sub=num_sub, dtypes=dtypes, rot90=rot90, + reverse=reverse, num_max_para_process=num_max_para_process, + with_pickle= with_pickle, direct_load_data= direct_load_data, + data_path=data_path,images_per_file=images_per_file,copy_rawdata=copy_rawdata,new_path=new_path) else: - return init_compress_eigerdata( - images, - mask, - md, - filename, - bad_pixel_threshold=bad_pixel_threshold, - hot_pixel_threshold=hot_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold, - nobytes=nobytes, - bins=bins, - with_pickle=with_pickle, - direct_load_data=direct_load_data, - data_path=data_path, - images_per_file=images_per_file, - ) + return init_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path, + images_per_file=images_per_file) else: - if not os.path.exists(filename): - print("Create a new compress file with filename as :%s." % filename) + if not os.path.exists( filename ): + print ("Create a new compress file with filename as :%s."%filename) if para_compress: - print("Using a multiprocess to compress the data.") - return para_compress_eigerdata( - images, - mask, - md, - filename, - bad_pixel_threshold=bad_pixel_threshold, - hot_pixel_threshold=hot_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold, - nobytes=nobytes, - bins=bins, - num_sub=num_sub, - dtypes=dtypes, - reverse=reverse, - rot90=rot90, - num_max_para_process=num_max_para_process, - with_pickle=with_pickle, - direct_load_data=direct_load_data, - data_path=data_path, - images_per_file=images_per_file, - copy_rawdata=copy_rawdata, - ) + print( 'Using a multiprocess to compress the data.') + return para_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins, + num_sub=num_sub, dtypes=dtypes, reverse=reverse,rot90=rot90, + num_max_para_process=num_max_para_process,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path,images_per_file=images_per_file,copy_rawdata=copy_rawdata) else: - return init_compress_eigerdata( - images, - mask, - md, - filename, - bad_pixel_threshold=bad_pixel_threshold, - hot_pixel_threshold=hot_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold, - nobytes=nobytes, - bins=bins, - with_pickle=with_pickle, - direct_load_data=direct_load_data, - data_path=data_path, - images_per_file=images_per_file, - ) - else: - print("Using already created compressed file with filename as :%s." % filename) - beg = 0 - return read_compressed_eigerdata( - mask, - filename, - beg, - end, - bad_pixel_threshold=bad_pixel_threshold, - hot_pixel_threshold=hot_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold, - bad_frame_list=bad_frame_list, - with_pickle=with_pickle, - direct_load_data=direct_load_data, - data_path=data_path, - images_per_file=images_per_file, - ) - - -def read_compressed_eigerdata( - mask, - filename, - beg, - end, - bad_pixel_threshold=1e15, - hot_pixel_threshold=2**30, - bad_pixel_low_threshold=0, - bad_frame_list=None, - with_pickle=False, - direct_load_data=False, - data_path=None, - images_per_file=100, -): - """ - Read already compress eiger data - Return - mask - avg_img - imsum - bad_frame_list - - """ - # should use try and except instead of with_pickle in the future! + return init_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path, images_per_file=images_per_file) + else: + print ("Using already created compressed file with filename as :%s."%filename) + beg=0 + return read_compressed_eigerdata( mask, filename, beg, end, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold ,bad_frame_list=bad_frame_list,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path, images_per_file=images_per_file) + + + +def read_compressed_eigerdata( mask, filename, beg, end, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0,bad_frame_list=None,with_pickle= False, + direct_load_data=False,data_path=None,images_per_file=100): + ''' + Read already compress eiger data + Return + mask + avg_img + imsum + bad_frame_list + + ''' + #should use try and except instead of with_pickle in the future! CAL = False if not with_pickle: CAL = True else: try: - mask, avg_img, imgsum, bad_frame_list_ = pkl.load(open(filename + ".pkl", "rb")) + mask, avg_img, imgsum, bad_frame_list_ = pkl.load( open(filename + '.pkl', 'rb' ) ) except: CAL = True - if CAL: - FD = Multifile(filename, beg, end) - imgsum = np.zeros(FD.end - FD.beg, dtype=np.float64) - avg_img = np.zeros([FD.md["ncols"], FD.md["nrows"]], dtype=np.float64) - imgsum, bad_frame_list_ = get_each_frame_intensityc( - FD, - sampling=1, - bad_pixel_threshold=bad_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold, - hot_pixel_threshold=hot_pixel_threshold, - plot_=False, - bad_frame_list=bad_frame_list, - ) - avg_img = get_avg_imgc(FD, beg=None, end=None, sampling=1, plot_=False, bad_frame_list=bad_frame_list_) - FD.FID.close() - - return mask, avg_img, imgsum, bad_frame_list_ - - -def para_compress_eigerdata( - images, - mask, - md, - filename, - num_sub=100, - bad_pixel_threshold=1e15, - hot_pixel_threshold=2**30, - bad_pixel_low_threshold=0, - nobytes=4, - bins=1, - dtypes="uid", - reverse=True, - rot90=False, - num_max_para_process=500, - cpu_core_number=72, - with_pickle=True, - direct_load_data=False, - data_path=None, - images_per_file=100, - copy_rawdata=True, - new_path="/tmp_data/data/", -): - data_path_ = data_path - if dtypes == "uid": - uid = md["uid"] # images + if CAL: + FD = Multifile( filename, beg, end) + imgsum = np.zeros( FD.end- FD.beg, dtype= np.float64 ) + avg_img = np.zeros( [FD.md['ncols'], FD.md['nrows'] ] , dtype= np.float64 ) + imgsum, bad_frame_list_ = get_each_frame_intensityc( FD, sampling = 1, + bad_pixel_threshold=bad_pixel_threshold, bad_pixel_low_threshold=bad_pixel_low_threshold, + hot_pixel_threshold=hot_pixel_threshold, plot_ = False, + bad_frame_list=bad_frame_list) + avg_img = get_avg_imgc( FD, beg=None,end=None,sampling = 1, plot_ = False,bad_frame_list=bad_frame_list_ ) + FD.FID.close() + + return mask, avg_img, imgsum, bad_frame_list_ + +def para_compress_eigerdata( images, mask, md, filename, num_sub=100, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='uid',reverse =True,rot90=False, + num_max_para_process=500, cpu_core_number=72, with_pickle=True, + direct_load_data=False, data_path=None,images_per_file=100, + copy_rawdata=True,new_path = '/tmp_data/data/'): + + data_path_ = data_path + if dtypes=='uid': + uid= md['uid'] #images if not direct_load_data: - detector = get_detector(db[uid]) - images_ = load_data(uid, detector, reverse=reverse, rot90=rot90) + detector = get_detector( db[uid ] ) + images_ = load_data( uid, detector, reverse= reverse,rot90=rot90 ) else: - # print('Here for images_per_file: %s'%images_per_file) - # images_ = EigerImages( data_path, images_per_file=images_per_file) - # print('here') - if not copy_rawdata: - images_ = EigerImages(data_path, images_per_file, md) + #print('Here for images_per_file: %s'%images_per_file) + #images_ = EigerImages( data_path, images_per_file=images_per_file) + #print('here') + if not copy_rawdata: + images_ = EigerImages(data_path,images_per_file, md) else: - print("Due to a IO problem running on GPFS. The raw data will be copied to /tmp_data/Data.") - print("Copying...") - copy_data(data_path, new_path) - # print(data_path, new_path) - new_master_file = new_path + os.path.basename(data_path) - data_path_ = new_master_file - images_ = EigerImages(new_master_file, images_per_file, md) - # print(md) + print('Due to a IO problem running on GPFS. The raw data will be copied to /tmp_data/Data.') + print('Copying...') + copy_data( data_path, new_path ) + #print(data_path, new_path) + new_master_file = new_path + os.path.basename(data_path) + data_path_ = new_master_file + images_ = EigerImages( new_master_file, images_per_file, md) + #print(md) if reverse: - images_ = reverse_updown(images_) # Why not np.flipud? - if rot90: - images_ = rot90_clockwise(images_) - - N = len(images_) - + images_ = reverse_updown( images_ ) # Why not np.flipud? + if rot90: + images_ = rot90_clockwise( images_ ) + + N= len(images_) + else: - N = len(images) - N = int(np.ceil(N / bins)) - Nf = int(np.ceil(N / num_sub)) - if Nf > cpu_core_number: - print("The process number is larger than %s (XF11ID server core number)" % cpu_core_number) + N = len(images) + N = int( np.ceil( N/ bins ) ) + Nf = int( np.ceil( N/ num_sub ) ) + if Nf > cpu_core_number: + print("The process number is larger than %s (XF11ID server core number)"%cpu_core_number) num_sub_old = num_sub - num_sub = int(np.ceil(N / cpu_core_number)) - Nf = int(np.ceil(N / num_sub)) - print("The sub compressed file number was changed from %s to %s" % (num_sub_old, num_sub)) - create_compress_header(md, filename + "-header", nobytes, bins, rot90=rot90) - # print( 'done for header here') - # print(data_path_, images_per_file) - results = para_segment_compress_eigerdata( - images=images, - mask=mask, - md=md, - filename=filename, - num_sub=num_sub, - bad_pixel_threshold=bad_pixel_threshold, - hot_pixel_threshold=hot_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold, - nobytes=nobytes, - bins=bins, - dtypes=dtypes, - num_max_para_process=num_max_para_process, - reverse=reverse, - rot90=rot90, - direct_load_data=direct_load_data, - data_path=data_path_, - images_per_file=images_per_file, - ) - - res_ = [results[k].get() for k in list(sorted(results.keys()))] - imgsum = np.zeros(N) - bad_frame_list = np.zeros(N, dtype=bool) + num_sub = int( np.ceil(N/cpu_core_number)) + Nf = int( np.ceil( N/ num_sub ) ) + print ("The sub compressed file number was changed from %s to %s"%( num_sub_old, num_sub )) + create_compress_header( md, filename +'-header', nobytes, bins, rot90=rot90 ) + #print( 'done for header here') + #print(data_path_, images_per_file) + results = para_segment_compress_eigerdata( images=images, mask=mask, md=md,filename=filename, + num_sub=num_sub, bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes=nobytes, bins=bins, dtypes=dtypes, + num_max_para_process=num_max_para_process, + reverse = reverse,rot90=rot90, + direct_load_data=direct_load_data, data_path=data_path_, + images_per_file=images_per_file) + + res_ = [ results[k].get() for k in list(sorted(results.keys())) ] + imgsum = np.zeros( N ) + bad_frame_list = np.zeros( N, dtype=bool ) good_count = 1 - for i in range(Nf): + for i in range( Nf ): mask_, avg_img_, imgsum_, bad_frame_list_ = res_[i] - imgsum[i * num_sub : (i + 1) * num_sub] = imgsum_ - bad_frame_list[i * num_sub : (i + 1) * num_sub] = bad_frame_list_ - if i == 0: + imgsum[i*num_sub: (i+1)*num_sub] = imgsum_ + bad_frame_list[i*num_sub: (i+1)*num_sub] = bad_frame_list_ + if i==0: mask = mask_ - avg_img = np.zeros_like(avg_img_) + avg_img = np.zeros_like( avg_img_ ) else: - mask *= mask_ - if not np.sum(np.isnan(avg_img_)): - avg_img += avg_img_ + mask *= mask_ + if not np.sum( np.isnan( avg_img_)): + avg_img += avg_img_ good_count += 1 - - bad_frame_list = np.where(bad_frame_list)[0] - avg_img /= good_count - + + bad_frame_list = np.where( bad_frame_list )[0] + avg_img /= good_count + if len(bad_frame_list): - print("Bad frame list are: %s" % bad_frame_list) + print ('Bad frame list are: %s' %bad_frame_list) else: - print("No bad frames are involved.") - print("Combining the seperated compressed files together...") - combine_compressed(filename, Nf, del_old=True) + print ('No bad frames are involved.') + print( 'Combining the seperated compressed files together...') + combine_compressed( filename, Nf, del_old=True) del results del res_ - if with_pickle: - pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) + if with_pickle: + pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) ) if copy_rawdata: - delete_data(data_path, new_path) - return mask, avg_img, imgsum, bad_frame_list - + delete_data( data_path, new_path ) + return mask, avg_img, imgsum, bad_frame_list -def combine_compressed(filename, Nf, del_old=True): - old_files = [filename + "-header"] +def combine_compressed( filename, Nf, del_old=True): + old_files = [filename +'-header'] for i in range(Nf): - old_files.append(filename + "_temp-%i.tmp") - combine_binary_files(filename, old_files, del_old) - - -def combine_binary_files(filename, old_files, del_old=False): - """Combine binary files together""" - fn_ = open(filename, "wb") - for ftemp in old_files: - shutil.copyfileobj(open(ftemp, "rb"), fn_) + old_files.append(filename + '_temp-%i.tmp' % i) + combine_binary_files(filename, old_files, del_old) + +def combine_binary_files(filename, old_files, del_old = False): + '''Combine binary files together''' + fn_ = open(filename, 'wb') + for ftemp in old_files: + shutil.copyfileobj( open(ftemp, 'rb'), fn_) if del_old: - os.remove(ftemp) - fn_.close() - - -def para_segment_compress_eigerdata( - images, - mask, - md, - filename, - num_sub=100, - bad_pixel_threshold=1e15, - hot_pixel_threshold=2**30, - bad_pixel_low_threshold=0, - nobytes=4, - bins=1, - dtypes="images", - reverse=True, - rot90=False, - num_max_para_process=50, - direct_load_data=False, - data_path=None, - images_per_file=100, -): - """ + os.remove( ftemp ) + fn_.close() + +def para_segment_compress_eigerdata( images, mask, md, filename, num_sub=100, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='images', + reverse =True, rot90=False, + num_max_para_process=50,direct_load_data=False, data_path=None, + images_per_file=100): + ''' parallelly compressed eiger data without header, this function is for parallel compress - """ - if dtypes == "uid": - uid = md["uid"] # images + ''' + if dtypes=='uid': + uid= md['uid'] #images if not direct_load_data: - detector = get_detector(db[uid]) - images_ = load_data(uid, detector, reverse=reverse, rot90=rot90) + detector = get_detector( db[uid ] ) + images_ = load_data( uid, detector, reverse= reverse, rot90=rot90 ) else: images_ = EigerImages(data_path, images_per_file, md) if reverse: - images_ = reverse_updown(images_) - if rot90: - images_ = rot90_clockwise(images_) - - N = len(images_) - + images_ = reverse_updown( images_ ) + if rot90: + images_ = rot90_clockwise( images_ ) + + N= len(images_) + else: - N = len(images) - - # N = int( np.ceil( N/ bins ) ) - num_sub *= bins - if N % num_sub: - Nf = N // num_sub + 1 - print("The average image intensity would be slightly not correct, about 1% error.") - print("Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image") + N = len(images) + + #N = int( np.ceil( N/ bins ) ) + num_sub *= bins + if N%num_sub: + Nf = N// num_sub +1 + print('The average image intensity would be slightly not correct, about 1% error.') + print( 'Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image') else: - Nf = N // num_sub - print("It will create %i temporary files for parallel compression." % Nf) + Nf = N//num_sub + print( 'It will create %i temporary files for parallel compression.'%Nf) - if Nf > num_max_para_process: - N_runs = np.int(np.ceil(Nf / float(num_max_para_process))) - print("The parallel run number: %s is larger than num_max_para_process: %s" % (Nf, num_max_para_process)) + if Nf> num_max_para_process: + N_runs = np.int( np.ceil( Nf/float(num_max_para_process))) + print('The parallel run number: %s is larger than num_max_para_process: %s'%(Nf, num_max_para_process )) else: - N_runs = 1 - result = {} - # print( mask_filename )# + '*'* 10 + 'here' ) - for nr in range(N_runs): - if (nr + 1) * num_max_para_process > Nf: - inputs = range(num_max_para_process * nr, Nf) - else: - inputs = range(num_max_para_process * nr, num_max_para_process * (nr + 1)) - fns = [filename + "_temp-%i.tmp" % i for i in inputs] - # print( nr, inputs, ) - pool = Pool(processes=len(inputs)) # , maxtasksperchild=1000 ) - # print( inputs ) - for i in inputs: - if i * num_sub <= N: - result[i] = pool.apply_async( - segment_compress_eigerdata, - [ - images, - mask, - md, - filename + "_temp-%i.tmp" % i, - bad_pixel_threshold, - hot_pixel_threshold, - bad_pixel_low_threshold, - nobytes, - bins, - i * num_sub, - (i + 1) * num_sub, - dtypes, - reverse, - rot90, - direct_load_data, - data_path, - images_per_file, - ], - ) - + N_runs= 1 + result = {} + #print( mask_filename )# + '*'* 10 + 'here' ) + for nr in range( N_runs ): + if (nr+1)*num_max_para_process > Nf: + inputs= range( num_max_para_process*nr, Nf ) + else: + inputs= range( num_max_para_process*nr, num_max_para_process*(nr + 1 ) ) + fns = [ filename + '_temp-%i.tmp'%i for i in inputs] + #print( nr, inputs, ) + pool = Pool(processes= len(inputs) ) #, maxtasksperchild=1000 ) + #print( inputs ) + for i in inputs: + if i*num_sub <= N: + result[i] = pool.apply_async( segment_compress_eigerdata, [ + images, mask, md, filename + '_temp-%i.tmp'%i,bad_pixel_threshold, hot_pixel_threshold, bad_pixel_low_threshold, nobytes, bins, i*num_sub, (i+1)*num_sub, dtypes, reverse,rot90, direct_load_data, data_path,images_per_file ] ) + pool.close() pool.join() - pool.terminate() - return result - - -def segment_compress_eigerdata( - images, - mask, - md, - filename, - bad_pixel_threshold=1e15, - hot_pixel_threshold=2**30, - bad_pixel_low_threshold=0, - nobytes=4, - bins=1, - N1=None, - N2=None, - dtypes="images", - reverse=True, - rot90=False, - direct_load_data=False, - data_path=None, - images_per_file=100, -): - """ + pool.terminate() + return result + +def segment_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, nobytes=4, bins=1, + N1=None, N2=None, dtypes='images',reverse =True, rot90=False,direct_load_data=False, data_path=None,images_per_file=100 ): + ''' Create a compressed eiger data without header, this function is for parallel compress for parallel compress don't pass any non-scalar parameters - """ - if dtypes == "uid": - uid = md["uid"] # images + ''' + if dtypes=='uid': + uid= md['uid'] #images if not direct_load_data: - detector = get_detector(db[uid]) - images = load_data(uid, detector, reverse=reverse, rot90=rot90)[N1:N2] - else: - images = EigerImages(data_path, images_per_file, md)[N1:N2] + detector = get_detector( db[uid ] ) + images = load_data( uid, detector, reverse= reverse, rot90=rot90 )[N1:N2] + else: + images = EigerImages(data_path, images_per_file, md)[N1:N2] if reverse: - images = reverse_updown(EigerImages(data_path, images_per_file, md))[N1:N2] - if rot90: - images = rot90_clockwise(images) - - Nimg_ = len(images) - M, N = images[0].shape - avg_img = np.zeros([M, N], dtype=np.float64) - Nopix = float(avg_img.size) - n = 0 + images = reverse_updown( EigerImages(data_path, images_per_file, md) )[N1:N2] + if rot90: + images = rot90_clockwise( images ) + + Nimg_ = len( images) + M,N = images[0].shape + avg_img = np.zeros( [M,N], dtype= np.float64 ) + Nopix = float( avg_img.size ) + n=0 good_count = 0 - # frac = 0.0 - if nobytes == 2: - dtype = np.int16 - elif nobytes == 4: - dtype = np.int32 - elif nobytes == 8: - dtype = np.float64 + #frac = 0.0 + if nobytes==2: + dtype= np.int16 + elif nobytes==4: + dtype= np.int32 + elif nobytes==8: + dtype=np.float64 else: - print("Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") - dtype = np.int32 - - # Nimg = Nimg_//bins - Nimg = int(np.ceil(Nimg_ / bins)) - time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bins)) - # print( time_edge, Nimg_, Nimg, bins, N1, N2 ) - imgsum = np.zeros(Nimg) - if bins != 1: - # print('The frames will be binned by %s'%bins) - dtype = np.float64 - - fp = open(filename, "wb") - for n in range(Nimg): - t1, t2 = time_edge[n] - if bins != 1: - img = np.array(np.average(images[t1:t2], axis=0), dtype=dtype) + print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype= np.int32 + + + #Nimg = Nimg_//bins + Nimg = int( np.ceil( Nimg_ / bins ) ) + time_edge = np.array(create_time_slice( N= Nimg_, + slice_num= Nimg, slice_width= bins )) + #print( time_edge, Nimg_, Nimg, bins, N1, N2 ) + imgsum = np.zeros( Nimg ) + if bins!=1: + #print('The frames will be binned by %s'%bins) + dtype=np.float64 + + fp = open( filename,'wb' ) + for n in range(Nimg): + t1,t2 = time_edge[n] + if bins!=1: + img = np.array( np.average( images[t1:t2], axis=0 ) , dtype= dtype) else: - img = np.array(images[t1], dtype=dtype) - mask &= img < hot_pixel_threshold - p = np.where((np.ravel(img) > 0) * np.ravel(mask))[0] # don't use masked data - v = np.ravel(np.array(img, dtype=dtype))[p] - dlen = len(p) - imgsum[n] = v.sum() - if (dlen == 0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <= bad_pixel_low_threshold): + img = np.array( images[t1], dtype=dtype) + mask &= img < hot_pixel_threshold + p = np.where( (np.ravel(img)>0) * np.ravel(mask) )[0] #don't use masked data + v = np.ravel( np.array( img, dtype= dtype )) [p] + dlen = len(p) + imgsum[n] = v.sum() + if (dlen==0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold): dlen = 0 - fp.write(struct.pack("@I", dlen)) - else: - np.ravel(avg_img)[p] += v - good_count += 1 - fp.write(struct.pack("@I", dlen)) - fp.write(struct.pack("@{}i".format(dlen), *p)) - if bins == 1: - fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) + fp.write( struct.pack( '@I', dlen )) + else: + np.ravel( avg_img )[p] += v + good_count +=1 + fp.write( struct.pack( '@I', dlen )) + fp.write( struct.pack( '@{}i'.format( dlen), *p)) + if bins==1: + fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v)) else: - fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) # n +=1 - del p, v, img - fp.flush() - fp.close() - avg_img /= good_count - bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) - sys.stdout.write("#") - sys.stdout.flush() - # del images, mask, avg_img, imgsum, bad_frame_list - # print( 'Should release memory here') - return mask, avg_img, imgsum, bad_frame_list - - -def create_compress_header(md, filename, nobytes=4, bins=1, rot90=False): - """ + fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) #n +=1 + del p,v, img + fp.flush() + fp.close() + avg_img /= good_count + bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) + sys.stdout.write('#') + sys.stdout.flush() + #del images, mask, avg_img, imgsum, bad_frame_list + #print( 'Should release memory here') + return mask, avg_img, imgsum, bad_frame_list + + + +def create_compress_header( md, filename, nobytes=4, bins=1, rot90=False ): + ''' Create the head for a compressed eiger data, this function is for parallel compress - """ - fp = open(filename, "wb") - # Make Header 1024 bytes - # md = images.md - if bins != 1: - nobytes = 8 - flag = True - # print( list(md.keys()) ) - # print(md) - if "pixel_mask" in list(md.keys()): - sx, sy = md["pixel_mask"].shape[0], md["pixel_mask"].shape[1] - elif "img_shape" in list(md.keys()): - sx, sy = md["img_shape"][0], md["img_shape"][1] + ''' + fp = open( filename,'wb' ) + #Make Header 1024 bytes + #md = images.md + if bins!=1: + nobytes=8 + flag = True + #print( list(md.keys()) ) + #print(md) + if 'pixel_mask' in list(md.keys()): + sx,sy = md['pixel_mask'].shape[0], md['pixel_mask'].shape[1] + elif 'img_shape' in list(md.keys()): + sx,sy = md['img_shape'][0], md['img_shape'][1] else: - sx, sy = 2167, 2070 # by default for 4M - # print(flag) - klst = [ - "beam_center_x", - "beam_center_y", - "count_time", - "detector_distance", - "frame_time", - "incident_wavelength", - "x_pixel_size", - "y_pixel_size", - ] - vs = [0, 0, 0, 0, 0, 0, 75, 75] + sx,sy= 2167, 2070 #by default for 4M + #print(flag) + klst = [ 'beam_center_x','beam_center_y', 'count_time','detector_distance', + 'frame_time','incident_wavelength', 'x_pixel_size','y_pixel_size'] + vs = [ 0 ,0, 0, 0, + 0, 0, 75, 75] for i, k in enumerate(klst): if k in list(md.keys()): - vs[i] = md[k] - if flag: + vs[i] = md[k] + if flag: if rot90: - Header = struct.pack( - "@16s8d7I916x", - b"Version-COMP0001", - vs[0], - vs[1], - vs[2], - vs[3], - vs[4], - vs[5], - vs[6], - vs[7], - nobytes, - sx, - sy, - 0, - sx, - 0, - sy, - ) - + Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', + vs[0], vs[1], vs[2], vs[3], + vs[4], vs[5], vs[6], vs[7], + nobytes,sx, sy, + 0, sx, + 0,sy ) + else: - Header = struct.pack( - "@16s8d7I916x", - b"Version-COMP0001", - vs[0], - vs[1], - vs[2], - vs[3], - vs[4], - vs[5], - vs[6], - vs[7], - # md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], #md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], - nobytes, - sy, - sx, - 0, - sy, - 0, - sx, - ) - - fp.write(Header) - fp.close() - - -def init_compress_eigerdata( - images, - mask, - md, - filename, - bad_pixel_threshold=1e15, - hot_pixel_threshold=2**30, - bad_pixel_low_threshold=0, - nobytes=4, - bins=1, - with_pickle=True, - reverse=True, - rot90=False, - direct_load_data=False, - data_path=None, - images_per_file=100, -): - """ - Compress the eiger data - - Create a new mask by remove hot_pixel - Do image average - Do each image sum - Find badframe_list for where image sum above bad_pixel_threshold - Generate a compressed data with filename - - if bins!=1, will bin the images with bin number as bins - - Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', - 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', - bytes per pixel (either 2 or 4 (Default)), - Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ] - - Return - mask - avg_img - imsum - bad_frame_list - - """ - fp = open(filename, "wb") - # Make Header 1024 bytes - # md = images.md - if bins != 1: - nobytes = 8 - if "count_time" not in list(md.keys()): - md["count_time"] = 0 - if "detector_distance" not in list(md.keys()): - md["detector_distance"] = 0 - if "frame_time" not in list(md.keys()): - md["frame_time"] = 0 - if "incident_wavelength" not in list(md.keys()): - md["incident_wavelength"] = 0 - if "y_pixel_size" not in list(md.keys()): - md["y_pixel_size"] = 0 - if "x_pixel_size" not in list(md.keys()): - md["x_pixel_size"] = 0 - if "beam_center_x" not in list(md.keys()): - md["beam_center_x"] = 0 - if "beam_center_y" not in list(md.keys()): - md["beam_center_y"] = 0 - + Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', + vs[0], vs[1], vs[2], vs[3], + vs[4], vs[5], vs[6], vs[7], +#md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], #md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, sy,sx, + 0, sy, + 0, sx + ) + + + + fp.write( Header) + fp.close() + + + +def init_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0,nobytes=4, bins=1, with_pickle=True, + reverse =True, rot90=False, + direct_load_data=False, data_path=None,images_per_file=100, + ): + ''' + Compress the eiger data + + Create a new mask by remove hot_pixel + Do image average + Do each image sum + Find badframe_list for where image sum above bad_pixel_threshold + Generate a compressed data with filename + + if bins!=1, will bin the images with bin number as bins + + Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ] + + Return + mask + avg_img + imsum + bad_frame_list + + ''' + fp = open( filename,'wb' ) + #Make Header 1024 bytes + #md = images.md + if bins!=1: + nobytes=8 + if 'count_time' not in list( md.keys() ): + md['count_time']=0 + if 'detector_distance' not in list( md.keys() ): + md['detector_distance']=0 + if 'frame_time' not in list( md.keys() ): + md['frame_time']=0 + if 'incident_wavelength' not in list( md.keys() ): + md['incident_wavelength']=0 + if 'y_pixel_size' not in list( md.keys() ): + md['y_pixel_size']=0 + if 'x_pixel_size' not in list( md.keys() ): + md['x_pixel_size']=0 + if 'beam_center_x' not in list( md.keys() ): + md['beam_center_x']=0 + if 'beam_center_y' not in list( md.keys() ): + md['beam_center_y']=0 + if not rot90: - Header = struct.pack( - "@16s8d7I916x", - b"Version-COMP0001", - md["beam_center_x"], - md["beam_center_y"], - md["count_time"], - md["detector_distance"], - md["frame_time"], - md["incident_wavelength"], - md["x_pixel_size"], - md["y_pixel_size"], - nobytes, - md["pixel_mask"].shape[1], - md["pixel_mask"].shape[0], - 0, - md["pixel_mask"].shape[1], - 0, - md["pixel_mask"].shape[0], - ) + Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0], + 0, md['pixel_mask'].shape[1], + 0, md['pixel_mask'].shape[0] + ) else: - Header = struct.pack( - "@16s8d7I916x", - b"Version-COMP0001", - md["beam_center_x"], - md["beam_center_y"], - md["count_time"], - md["detector_distance"], - md["frame_time"], - md["incident_wavelength"], - md["x_pixel_size"], - md["y_pixel_size"], - nobytes, - md["pixel_mask"].shape[0], - md["pixel_mask"].shape[1], - 0, - md["pixel_mask"].shape[0], - 0, - md["pixel_mask"].shape[1], - ) - - fp.write(Header) - - Nimg_ = len(images) - avg_img = np.zeros_like(images[0], dtype=np.float64) - Nopix = float(avg_img.size) - n = 0 + Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, md['pixel_mask'].shape[0], md['pixel_mask'].shape[1], + 0, md['pixel_mask'].shape[0], + 0, md['pixel_mask'].shape[1] + ) + + fp.write( Header) + + Nimg_ = len( images) + avg_img = np.zeros_like( images[0], dtype= np.float64 ) + Nopix = float( avg_img.size ) + n=0 good_count = 0 frac = 0.0 - if nobytes == 2: - dtype = np.int16 - elif nobytes == 4: - dtype = np.int32 - elif nobytes == 8: - dtype = np.float64 + if nobytes==2: + dtype= np.int16 + elif nobytes==4: + dtype= np.int32 + elif nobytes==8: + dtype=np.float64 else: - print("Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") - dtype = np.int32 - - Nimg = Nimg_ // bins - time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bins)) - - imgsum = np.zeros(Nimg) - if bins != 1: - print("The frames will be binned by %s" % bins) - - for n in tqdm(range(Nimg)): - t1, t2 = time_edge[n] - img = np.average(images[t1:t2], axis=0) - mask &= img < hot_pixel_threshold - p = np.where((np.ravel(img) > 0) & np.ravel(mask))[0] # don't use masked data - v = np.ravel(np.array(img, dtype=dtype))[p] - dlen = len(p) + print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype= np.int32 + + + Nimg = Nimg_//bins + time_edge = np.array(create_time_slice( N= Nimg_, + slice_num= Nimg, slice_width= bins )) + + imgsum = np.zeros( Nimg ) + if bins!=1: + print('The frames will be binned by %s'%bins) + + for n in tqdm( range(Nimg) ): + t1,t2 = time_edge[n] + img = np.average( images[t1:t2], axis=0 ) + mask &= img < hot_pixel_threshold + p = np.where( (np.ravel(img)>0) & np.ravel(mask) )[0] #don't use masked data + v = np.ravel( np.array( img, dtype= dtype )) [p] + dlen = len(p) imgsum[n] = v.sum() - if (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <= bad_pixel_low_threshold): - # if imgsum[n] >=bad_pixel_threshold : + if (imgsum[n] >bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold): + #if imgsum[n] >=bad_pixel_threshold : dlen = 0 - fp.write(struct.pack("@I", dlen)) - else: - np.ravel(avg_img)[p] += v - good_count += 1 - frac += dlen / Nopix - # s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2]) - fp.write(struct.pack("@I", dlen)) - fp.write(struct.pack("@{}i".format(dlen), *p)) - if bins == 1: - if nobytes != 8: - fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) + fp.write( struct.pack( '@I', dlen )) + else: + np.ravel(avg_img )[p] += v + good_count +=1 + frac += dlen/Nopix + #s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2]) + fp.write( struct.pack( '@I', dlen )) + fp.write( struct.pack( '@{}i'.format( dlen), *p)) + if bins==1: + if nobytes!=8: + fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v)) else: - fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) + fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) else: - fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) - # n +=1 - - fp.close() - frac /= good_count - print("The fraction of pixel occupied by photon is %6.3f%% " % (100 * frac)) + fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) + #n +=1 + + fp.close() + frac /=good_count + print( "The fraction of pixel occupied by photon is %6.3f%% "%(100*frac) ) avg_img /= good_count - - bad_frame_list = np.where( - (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) - )[0] - # bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0] - # bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0] - # bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) ) - + + bad_frame_list = np.where( (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) )[0] + #bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0] + #bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0] + #bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) ) + + if len(bad_frame_list): - print("Bad frame list are: %s" % bad_frame_list) + print ('Bad frame list are: %s' %bad_frame_list) else: - print("No bad frames are involved.") - if with_pickle: - pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) - return mask, avg_img, imgsum, bad_frame_list - + print ('No bad frames are involved.') + if with_pickle: + pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) ) + return mask, avg_img, imgsum, bad_frame_list + + """ Description: This is code that Mark wrote to open the multifile format @@ -874,246 +627,235 @@ def init_compress_eigerdata( |--------------IMG N+1 begin------------| |----------------etc.....---------------| - - Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', - 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + + Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', bytes per pixel (either 2 or 4 (Default)), - Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End, - - + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End, + + """ class Multifile: - """The class representing the multifile. - The recno is in 1 based numbering scheme (first record is 1) - This is efficient for reading in increasing order. - Note: reading same image twice in a row is like reading an earlier - numbered image and means the program starts for the beginning again. - - """ - - def __init__(self, filename, beg, end, reverse=False): - """Multifile initialization. Open the file. - Here I use the read routine which returns byte objects - (everything is an object in python). I use struct.unpack - to convert the byte object to other data type (int object - etc) - NOTE: At each record n, the file cursor points to record n+1 - """ - self.FID = open(filename, "rb") - # self.FID.seek(0,os.SEEK_SET) + '''The class representing the multifile. + The recno is in 1 based numbering scheme (first record is 1) + This is efficient for reading in increasing order. + Note: reading same image twice in a row is like reading an earlier + numbered image and means the program starts for the beginning again. + + ''' + def __init__(self,filename,beg,end, reverse=False ): + '''Multifile initialization. Open the file. + Here I use the read routine which returns byte objects + (everything is an object in python). I use struct.unpack + to convert the byte object to other data type (int object + etc) + NOTE: At each record n, the file cursor points to record n+1 + ''' + self.FID = open(filename,"rb") +# self.FID.seek(0,os.SEEK_SET) self.filename = filename - # br: bytes read + #br: bytes read br = self.FID.read(1024) - self.beg = beg - self.end = end - self.reverse = reverse - ms_keys = [ - "beam_center_x", - "beam_center_y", - "count_time", - "detector_distance", - "frame_time", - "incident_wavelength", - "x_pixel_size", - "y_pixel_size", - "bytes", - "nrows", - "ncols", - "rows_begin", - "rows_end", - "cols_begin", - "cols_end", - ] - - magic = struct.unpack("@16s", br[:16]) - md_temp = struct.unpack("@8d7I916x", br[16:]) + self.beg=beg + self.end=end + self.reverse=reverse + ms_keys = ['beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + 'bytes', + 'nrows', 'ncols', 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' + ] + + magic = struct.unpack('@16s', br[:16]) + md_temp = struct.unpack('@8d7I916x', br[16:]) self.md = dict(zip(ms_keys, md_temp)) - - self.imgread = 0 + + self.imgread=0 self.recno = 0 if reverse: - nrows = self.md["nrows"] - ncols = self.md["ncols"] - self.md["nrows"] = ncols - self.md["ncols"] = nrows - rbeg = self.md["rows_begin"] - rend = self.md["rows_end"] - cbeg = self.md["cols_begin"] - cend = self.md["cols_end"] - self.md["rows_begin"] = cbeg - self.md["rows_end"] = cend - self.md["cols_begin"] = rbeg - self.md["cols_end"] = rend - - # some initialization stuff - self.byts = self.md["bytes"] - if self.byts == 2: + nrows = self.md['nrows'] + ncols = self.md['ncols'] + self.md['nrows'] = ncols + self.md['ncols'] = nrows + rbeg = self.md['rows_begin'] + rend = self.md['rows_end'] + cbeg = self.md['cols_begin'] + cend = self.md['cols_end'] + self.md['rows_begin']=cbeg + self.md['rows_end']=cend + self.md['cols_begin']=rbeg + self.md['cols_end']=rend + + + + # some initialization stuff + self.byts = self.md['bytes'] + if (self.byts==2): self.valtype = np.uint16 - elif self.byts == 4: + elif (self.byts == 4): self.valtype = np.uint32 - elif self.byts == 8: + elif (self.byts == 8): self.valtype = np.float64 - # now convert pieces of these bytes to our data - self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] - + #now convert pieces of these bytes to our data + self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] + # now read first image - # print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts) + #print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts) def _readHeader(self): - self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] def _readImageRaw(self): - p = np.fromfile(self.FID, dtype=np.int32, count=self.dlen) - v = np.fromfile(self.FID, dtype=self.valtype, count=self.dlen) - self.imgread = 1 - return (p, v) + + p= np.fromfile(self.FID, dtype = np.int32,count= self.dlen) + v= np.fromfile(self.FID, dtype = self.valtype,count= self.dlen) + self.imgread=1 + return(p,v) def _readImage(self): - (p, v) = self._readImageRaw() - img = np.zeros((self.md["ncols"], self.md["nrows"])) - np.put(np.ravel(img), p, v) - return img - - def seekimg(self, n=None): - """Position file to read the nth image. - For now only reads first image ignores n - """ - # the logic involving finding the cursor position - if n is None: + (p,v)=self._readImageRaw() + img = np.zeros( ( self.md['ncols'], self.md['nrows'] ) ) + np.put( np.ravel(img), p, v ) + return(img) + + def seekimg(self,n=None): + + '''Position file to read the nth image. + For now only reads first image ignores n + ''' + # the logic involving finding the cursor position + if (n is None): n = self.recno - if n < self.beg or n > self.end: - raise IndexError("Error, record out of range") - # print (n, self.recno, self.FID.tell() ) - if (n == self.recno) and (self.imgread == 0): - pass # do nothing - + if (n < self.beg or n > self.end): + raise IndexError('Error, record out of range') + #print (n, self.recno, self.FID.tell() ) + if ((n == self.recno) and (self.imgread==0)): + pass # do nothing + else: - if n <= self.recno: # ensure cursor less than search pos - self.FID.seek(1024, os.SEEK_SET) - self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + if (n <= self.recno): #ensure cursor less than search pos + self.FID.seek(1024,os.SEEK_SET) + self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] self.recno = 0 - self.imgread = 0 + self.imgread=0 if n == 0: - return - # have to iterate on seeking since dlen varies - # remember for rec recno, cursor is always at recno+1 - if self.imgread == 0: # move to next header if need to - self.FID.seek(self.dlen * (4 + self.byts), os.SEEK_CUR) - for i in range(self.recno + 1, n): - # the less seeks performed the faster - # print (i) - self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] - # print 's',self.dlen - self.FID.seek(self.dlen * (4 + self.byts), os.SEEK_CUR) + return + #have to iterate on seeking since dlen varies + #remember for rec recno, cursor is always at recno+1 + if(self.imgread==0 ): #move to next header if need to + self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR) + for i in range(self.recno+1,n): + #the less seeks performed the faster + #print (i) + self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] + #print 's',self.dlen + self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR) # we are now at recno in file, read the header and data - # self._clearImage() + #self._clearImage() self._readHeader() - self.imgread = 0 + self.imgread=0 self.recno = n + def rdframe(self,n): + if self.seekimg(n)!=-1: + return(self._readImage()) - def rdframe(self, n): - if self.seekimg(n) != -1: - return self._readImage() + def rdrawframe(self,n): + if self.seekimg(n)!=-1: + return(self._readImageRaw()) - def rdrawframe(self, n): - if self.seekimg(n) != -1: - return self._readImageRaw() + - -class Multifile_Bins(object): - """ +class Multifile_Bins( object ): + ''' Bin a compressed file with bins number See Multifile for details for Multifile_class - """ - + ''' def __init__(self, FD, bins=100): - """ + ''' FD: the handler of a compressed Eiger frames bins: bins number - """ - - self.FD = FD - if (FD.end - FD.beg) % bins: - print("Please give a better bins number and make the length of FD/bins= integer") - else: + ''' + + self.FD=FD + if (FD.end - FD.beg)%bins: + print ('Please give a better bins number and make the length of FD/bins= integer') + else: self.bins = bins self.md = FD.md - # self.beg = FD.beg + #self.beg = FD.beg self.beg = 0 - Nimg = FD.end - FD.beg - slice_num = Nimg // bins - self.end = slice_num - self.time_edge = np.array(create_time_slice(N=Nimg, slice_num=slice_num, slice_width=bins)) + FD.beg + Nimg = (FD.end - FD.beg) + slice_num = Nimg//bins + self.end = slice_num + self.time_edge = np.array(create_time_slice( N= Nimg, + slice_num= slice_num, slice_width= bins )) + FD.beg self.get_bin_frame() - - def get_bin_frame(self): - FD = self.FD - self.frames = np.zeros([FD.md["ncols"], FD.md["nrows"], len(self.time_edge)]) - for n in tqdm(range(len(self.time_edge))): - # print (n) - t1, t2 = self.time_edge[n] - # print( t1, t2) - self.frames[:, :, n] = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False, show_progress=False) - - def rdframe(self, n): - return self.frames[:, :, n] - - def rdrawframe(self, n): - x_ = np.ravel(self.rdframe(n)) - p = np.where(x_)[0] - v = np.array(x_[p]) - return (np.array(p, dtype=np.int32), v) - - + + def get_bin_frame(self): + FD= self.FD + self.frames = np.zeros( [ FD.md['ncols'],FD.md['nrows'], len(self.time_edge)] ) + for n in tqdm( range(len(self.time_edge))): + #print (n) + t1,t2 = self.time_edge[n] + #print( t1, t2) + self.frames[:,:,n] = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1, + plot_ = False, show_progress = False ) + def rdframe(self,n): + return self.frames[:,:,n] + + def rdrawframe(self,n): + x_= np.ravel( self.rdframe(n) ) + p= np.where( x_ ) [0] + v = np.array( x_[ p ]) + return ( np.array(p, dtype=np.int32), v) + + class MultifileBNL: - """ + ''' Re-write multifile from scratch. - """ - + ''' HEADER_SIZE = 1024 - - def __init__(self, filename, mode="rb"): - """ - Prepare a file for reading or writing. - mode : either 'rb' or 'wb' - """ - if mode == "wb": + def __init__(self, filename, mode='rb'): + ''' + Prepare a file for reading or writing. + mode : either 'rb' or 'wb' + ''' + if mode == 'wb': raise ValueError("Write mode 'wb' not supported yet") - if mode != "rb" and mode != "wb": - raise ValueError("Error, mode must be 'rb' or 'wb'" "got : {}".format(mode)) + if mode != 'rb' and mode != 'wb': + raise ValueError("Error, mode must be 'rb' or 'wb'" + "got : {}".format(mode)) self._filename = filename self._mode = mode # open the file descriptor # create a memmap - if mode == "rb": - self._fd = np.memmap(filename, dtype="c") - elif mode == "wb": + if mode == 'rb': + self._fd = np.memmap(filename, dtype='c') + elif mode == 'wb': self._fd = open(filename, "wb") # these are only necessary for writing self.md = self._read_main_header() - self._cols = int(self.md["nrows"]) - self._rows = int(self.md["ncols"]) + self._cols = int(self.md['nrows']) + self._rows = int(self.md['ncols']) # some initialization stuff - self.nbytes = self.md["bytes"] - if self.nbytes == 2: - self.valtype = " self.Nframes: raise KeyError("Error, only {} frames, asked for {}".format(self.Nframes, n)) # dlen is 4 bytes cur = self.frame_indexes[n] - dlen = np.frombuffer(self._fd[cur : cur + 4], dtype=" nbytes - vals = self._fd[cur : cur + dlen * self.nbytes] + vals = self._fd[cur: cur+dlen*self.nbytes] vals = np.frombuffer(vals, dtype=self.valtype) return pos, vals - def rdframe(self, n): # read header then image pos, vals = self._read_raw(n) - img = np.zeros((self._rows * self._cols,)) + img = np.zeros((self._rows*self._cols,)) img[pos] = vals return img.reshape((self._rows, self._cols)) - def rdrawframe(self, n): # read header then image return self._read_raw(n) - - + class MultifileBNLCustom(MultifileBNL): def __init__(self, filename, beg=0, end=None, **kwargs): super().__init__(filename, **kwargs) self.beg = beg if end is None: - end = self.Nframes - 1 + end = self.Nframes-1 self.end = end - def rdframe(self, n): if n > self.end or n < self.beg: raise IndexError("Index out of range") - # return super().rdframe(n - self.beg) - return super().rdframe(n) - + #return super().rdframe(n - self.beg) + return super().rdframe( n ) def rdrawframe(self, n): - # return super().rdrawframe(n - self.beg) + #return super().rdrawframe(n - self.beg) if n > self.end or n < self.beg: - raise IndexError("Index out of range") - return super().rdrawframe(n) - - -def get_avg_imgc( - FD, beg=None, end=None, sampling=100, plot_=False, bad_frame_list=None, show_progress=True, *argv, **kwargs -): - """Get average imagef from a data_series by every sampling number to save time""" - # avg_img = np.average(data_series[:: sampling], axis=0) - + raise IndexError("Index out of range") + return super().rdrawframe(n ) + + + +def get_avg_imgc( FD, beg=None,end=None, sampling = 100, plot_ = False, bad_frame_list=None, + show_progress=True, *argv,**kwargs): + '''Get average imagef from a data_series by every sampling number to save time''' + #avg_img = np.average(data_series[:: sampling], axis=0) + if beg is None: beg = FD.beg if end is None: end = FD.end - + avg_img = FD.rdframe(beg) - n = 1 - flag = True - if show_progress: - # print( sampling-1 + beg , end, sampling ) + n=1 + flag=True + if show_progress: + #print( sampling-1 + beg , end, sampling ) if bad_frame_list is None: - bad_frame_list = [] - fra_num = int((end - beg) / sampling) - len(bad_frame_list) - for i in tqdm(range(sampling - 1 + beg, end, sampling), desc="Averaging %s images" % fra_num): + bad_frame_list =[] + fra_num = int( (end - beg )/sampling ) - len( bad_frame_list ) + for i in tqdm(range( sampling-1 + beg , end, sampling ), desc= 'Averaging %s images'% fra_num): if bad_frame_list is not None: if i in bad_frame_list: - flag = False + flag= False else: - flag = True - # print(i, flag) + flag=True + #print(i, flag) if flag: - (p, v) = FD.rdrawframe(i) - if len(p) > 0: - np.ravel(avg_img)[p] += v - n += 1 + (p,v) = FD.rdrawframe(i) + if len(p)>0: + np.ravel(avg_img )[p] += v + n += 1 else: - for i in range(sampling - 1 + beg, end, sampling): + for i in range( sampling-1 + beg , end, sampling ): if bad_frame_list is not None: if i in bad_frame_list: - flag = False + flag= False else: - flag = True + flag=True if flag: - (p, v) = FD.rdrawframe(i) - if len(p) > 0: - np.ravel(avg_img)[p] += v - n += 1 - - avg_img /= n + (p,v) = FD.rdrawframe(i) + if len(p)>0: + np.ravel(avg_img )[p] += v + n += 1 + + avg_img /= n if plot_: if RUN_GUI: fig = Figure() ax = fig.add_subplot(111) else: fig, ax = plt.subplots() - uid = "uid" - if "uid" in kwargs.keys(): - uid = kwargs["uid"] - im = ax.imshow(avg_img, cmap="viridis", origin="lower", norm=LogNorm(vmin=0.001, vmax=1e2)) - # ax.set_title("Masked Averaged Image") - ax.set_title("uid= %s--Masked-Averaged-Image-" % uid) + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + im = ax.imshow(avg_img , cmap='viridis',origin='lower', + norm= LogNorm(vmin=0.001, vmax=1e2)) + #ax.set_title("Masked Averaged Image") + ax.set_title('uid= %s--Masked-Averaged-Image-'%uid) fig.colorbar(im) if save: - # dt =datetime.now() - # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - path = kwargs["path"] - if "uid" in kwargs: - uid = kwargs["uid"] + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs['path'] + if 'uid' in kwargs: + uid = kwargs['uid'] else: - uid = "uid" - # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' - fp = path + "uid=%s--avg-img-" % uid + ".png" - plt.savefig(fp, dpi=fig.dpi) - # plt.show() + uid = 'uid' + #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-"%uid + '.png' + plt.savefig( fp, dpi=fig.dpi) + #plt.show() return avg_img + -def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor=False): +def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor = False): """Compute the mean intensity for each ROI in the compressed file (FD), support parallel computation Parameters @@ -1316,14 +1041,12 @@ def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor=False): index : list The labels for each element of the `mean_intensity` list """ - - qind, pixelist = roi.extract_label_indices(labeled_array) - sx, sy = (FD.rdframe(FD.beg)).shape - if labeled_array.shape != (sx, sy): + + qind, pixelist = roi.extract_label_indices( labeled_array ) + sx,sy = ( FD.rdframe(FD.beg) ).shape + if labeled_array.shape != ( sx,sy ): raise ValueError( - " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" - % (sx, sy, labeled_array.shape[0], labeled_array.shape[1]) - ) + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( sx,sy, labeled_array.shape[0], labeled_array.shape[1]) ) # handle various input for `index` if index is None: index = list(np.unique(labeled_array)) @@ -1334,138 +1057,133 @@ def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor=False): except TypeError: index = [index] - index = np.array(index) - # print ('here') - good_ind = np.zeros(max(qind), dtype=np.int32) - good_ind[index - 1] = np.arange(len(index)) + 1 - w = np.where(good_ind[qind - 1])[0] - qind = good_ind[qind[w] - 1] + index = np.array( index ) + #print ('here') + good_ind = np.zeros( max(qind), dtype= np.int32 ) + good_ind[ index -1 ] = np.arange( len(index) ) +1 + w = np.where( good_ind[qind -1 ] )[0] + qind = good_ind[ qind[w] -1 ] pixelist = pixelist[w] + # pre-allocate an array for performance # might be able to use list comprehension to make this faster - - mean_intensity = np.zeros([int((FD.end - FD.beg) / sampling), len(index)]) - # fra_pix = np.zeros_like( pixelist, dtype=np.float64) - timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) - timg[pixelist] = np.arange(1, len(pixelist) + 1) - # maxqind = max(qind) - norm = np.bincount(qind)[1:] - n = 0 - # for i in tqdm(range( FD.beg , FD.end )): + + mean_intensity = np.zeros( [ int( ( FD.end - FD.beg)/sampling ) , len(index)] ) + #fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + #maxqind = max(qind) + norm = np.bincount( qind )[1:] + n= 0 + #for i in tqdm(range( FD.beg , FD.end )): if not multi_cor: - for i in tqdm(range(FD.beg, FD.end, sampling), desc="Get ROI intensity of each frame"): - (p, v) = FD.rdrawframe(i) - w = np.where(timg[p])[0] - pxlist = timg[p[w]] - 1 - mean_intensity[n] = np.bincount(qind[pxlist], weights=v[w], minlength=len(index) + 1)[1:] - n += 1 + for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get ROI intensity of each frame' ): + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + mean_intensity[n] = np.bincount( qind[pxlist], weights = v[w], minlength = len(index)+1 )[1:] + n +=1 else: - ring_masks = [np.array(labeled_array == i, dtype=np.int64) for i in np.unique(labeled_array)[1:]] - inputs = range(len(ring_masks)) + ring_masks = [ np.array(labeled_array==i, dtype = np.int64) for i in np.unique( labeled_array )[1:] ] + inputs = range( len(ring_masks) ) go_through_FD(FD) - pool = Pool(processes=len(inputs)) - print("Starting assign the tasks...") - results = {} - for i in tqdm(inputs): - results[i] = apply_async(pool, _get_mean_intensity_one_q, (FD, sampling, ring_masks[i])) - pool.close() - print("Starting running the tasks...") - res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] - # return res - for i in inputs: - mean_intensity[:, i] = res[i] - print("ROI mean_intensit calculation is DONE!") + pool = Pool(processes= len(inputs) ) + print( 'Starting assign the tasks...') + results = {} + for i in tqdm ( inputs ): + results[i] = apply_async( pool, _get_mean_intensity_one_q, ( FD, sampling, ring_masks[i] ) ) + pool.close() + print( 'Starting running the tasks...') + res = [ results[k].get() for k in tqdm( list(sorted(results.keys())) ) ] + #return res + for i in inputs: + mean_intensity[:,i] = res[i] + print( 'ROI mean_intensit calculation is DONE!') del results - del res - - mean_intensity /= norm + del res + + mean_intensity /= norm return mean_intensity, index -def _get_mean_intensity_one_q(FD, sampling, labels): - mi = np.zeros(int((FD.end - FD.beg) / sampling)) - n = 0 - qind, pixelist = roi.extract_label_indices(labels) - # iterate over the images to compute multi-tau correlation - fra_pix = np.zeros_like(pixelist, dtype=np.float64) - timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) - timg[pixelist] = np.arange(1, len(pixelist) + 1) - for i in range(FD.beg, FD.end, sampling): - (p, v) = FD.rdrawframe(i) - w = np.where(timg[p])[0] - pxlist = timg[p[w]] - 1 - mi[n] = np.bincount(qind[pxlist], weights=v[w], minlength=2)[1:] - n += 1 +def _get_mean_intensity_one_q( FD, sampling, labels ): + mi = np.zeros( int( ( FD.end - FD.beg)/sampling ) ) + n=0 + qind, pixelist = roi.extract_label_indices( labels ) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + for i in range( FD.beg, FD.end, sampling ): + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + mi[n] = np.bincount( qind[pxlist], weights = v[w], minlength = 2 )[1:] + n +=1 return mi - - -def get_each_frame_intensityc( - FD, - sampling=1, - bad_pixel_threshold=1e10, - bad_pixel_low_threshold=0, - hot_pixel_threshold=2**30, - plot_=False, - bad_frame_list=None, - save=False, - *argv, - **kwargs -): - """Get the total intensity of each frame by sampling every N frames - Also get bad_frame_list by check whether above bad_pixel_threshold - - Usuage: - imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, - bad_pixel_threshold=1e10, plot_ = True) - """ - - # print ( argv, kwargs ) - # mask &= img < hot_pixel_threshold - imgsum = np.zeros(int((FD.end - FD.beg) / sampling)) - n = 0 - for i in tqdm(range(FD.beg, FD.end, sampling), desc="Get each frame intensity"): - (p, v) = FD.rdrawframe(i) - if len(p) > 0: - imgsum[n] = np.sum(v) + + + +def get_each_frame_intensityc( FD, sampling = 1, + bad_pixel_threshold=1e10, bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, + plot_ = False, bad_frame_list=None, save=False, *argv,**kwargs): + '''Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + ''' + + #print ( argv, kwargs ) + #mask &= img < hot_pixel_threshold + imgsum = np.zeros( int( (FD.end - FD.beg )/ sampling ) ) + n=0 + for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get each frame intensity' ): + (p,v) = FD.rdrawframe(i) + if len(p)>0: + imgsum[n] = np.sum( v ) n += 1 - + if plot_: - uid = "uid" - if "uid" in kwargs.keys(): - uid = kwargs["uid"] - fig, ax = plt.subplots() - ax.plot(imgsum, "bo") - ax.set_title("uid= %s--imgsum" % uid) - ax.set_xlabel("Frame_bin_%s" % sampling) - ax.set_ylabel("Total_Intensity") - + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + fig, ax = plt.subplots() + ax.plot( imgsum,'bo') + ax.set_title('uid= %s--imgsum'%uid) + ax.set_xlabel( 'Frame_bin_%s'%sampling ) + ax.set_ylabel( 'Total_Intensity' ) + if save: - # dt =datetime.now() - # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - path = kwargs["path"] - if "uid" in kwargs: - uid = kwargs["uid"] + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs['path'] + if 'uid' in kwargs: + uid = kwargs['uid'] else: - uid = "uid" - # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' - fp = path + "uid=%s--imgsum-" % uid + ".png" - fig.savefig(fp, dpi=fig.dpi) - + uid = 'uid' + #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-"%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + plt.show() - - bad_frame_list_ = ( - np.where((np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold))[0] - + FD.beg - ) - - if bad_frame_list is not None: - bad_frame_list = np.unique(np.concatenate([bad_frame_list, bad_frame_list_])) + + bad_frame_list_ = np.where( ( np.array(imgsum) > bad_pixel_threshold ) | ( np.array(imgsum) <= bad_pixel_low_threshold) )[0] + FD.beg + + if bad_frame_list is not None: + bad_frame_list = np.unique( np.concatenate([bad_frame_list, bad_frame_list_]) ) else: - bad_frame_list = bad_frame_list_ - + bad_frame_list = bad_frame_list_ + if len(bad_frame_list): - print("Bad frame list length is: %s" % len(bad_frame_list)) + print ('Bad frame list length is: %s' %len(bad_frame_list)) else: - print("No bad frames are involved.") - return imgsum, bad_frame_list + print ('No bad frames are involved.') + return imgsum,bad_frame_list + + + + diff --git a/pyCHX/chx_correlationc.py b/pyCHX/chx_correlationc.py index 127b3d9..af0dbd4 100644 --- a/pyCHX/chx_correlationc.py +++ b/pyCHX/chx_correlationc.py @@ -7,32 +7,20 @@ from __future__ import absolute_import, division, print_function -import logging +from skbeam.core.utils import multi_tau_lags +from skbeam.core.roi import extract_label_indices from collections import namedtuple - import numpy as np import skbeam.core.roi as roi -from skbeam.core.roi import extract_label_indices -from skbeam.core.utils import multi_tau_lags +import logging logger = logging.getLogger(__name__) from tqdm import tqdm -def _one_time_process( - buf, - G, - past_intensity_norm, - future_intensity_norm, - label_array, - num_bufs, - num_pixels, - img_per_level, - level, - buf_no, - norm, - lev_len, -): +def _one_time_process(buf, G, past_intensity_norm, future_intensity_norm, + label_array, num_bufs, num_pixels, img_per_level, + level, buf_no, norm, lev_len): """Reference implementation of the inner loop of multi-tau one time correlation This helper function calculates G, past_intensity_norm and @@ -78,10 +66,10 @@ def _one_time_process( # in multi-tau correlation, the subsequent levels have half as many # buffers as the first i_min = num_bufs // 2 if level else 0 - # maxqind=G.shape[1] + #maxqind=G.shape[1] for i in range(i_min, min(img_per_level[level], num_bufs)): # compute the index into the autocorrelation matrix - t_index = int(level * num_bufs / 2 + i) + t_index = int( level * num_bufs / 2 + i ) delay_no = (buf_no - i) % num_bufs # get the images for correlating past_img = buf[level, delay_no] @@ -89,41 +77,29 @@ def _one_time_process( # find the normalization that can work both for bad_images # and good_images ind = int(t_index - lev_len[:level].sum()) - normalize = img_per_level[level] - i - norm[level + 1][ind] + normalize = img_per_level[level] - i - norm[level+1][ind] # take out the past_ing and future_img created using bad images # (bad images are converted to np.nan array) if np.isnan(past_img).any() or np.isnan(future_img).any(): norm[level + 1][ind] += 1 else: - for w, arr in zip( - [past_img * future_img, past_img, future_img], [G, past_intensity_norm, future_intensity_norm] - ): + for w, arr in zip([past_img*future_img, past_img, future_img], + [G, past_intensity_norm, future_intensity_norm]): binned = np.bincount(label_array, weights=w)[1:] - # nonz = np.where(w)[0] - # binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] - arr[t_index] += (binned / num_pixels - arr[t_index]) / normalize + #nonz = np.where(w)[0] + #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + arr[t_index] += ((binned / num_pixels - + arr[t_index]) / normalize) return None # modifies arguments in place! -def _one_time_process_error( - buf, - G, - past_intensity_norm, - future_intensity_norm, - label_array, - num_bufs, - num_pixels, - img_per_level, - level, - buf_no, - norm, - lev_len, - G_err, - past_intensity_norm_err, - future_intensity_norm_err, -): + +def _one_time_process_error(buf, G, past_intensity_norm, future_intensity_norm, + label_array, num_bufs, num_pixels, img_per_level, + level, buf_no, norm, lev_len, + G_err, past_intensity_norm_err, future_intensity_norm_err ): """Reference implementation of the inner loop of multi-tau one time - correlation with the calculation of errorbar (statistical error due to multipixel measurements ) + correlation with the calculation of errorbar (statistical error due to multipixel measurements ) The statistical error: var( g2(Q) ) = sum( [g2(Qi)- g2(Q)]^2 )/N(N-1), Lumma, RSI, 2000 This helper function calculates G, past_intensity_norm and future_intensity_norm at each level, symmetric normalization is used. @@ -168,10 +144,10 @@ def _one_time_process_error( # in multi-tau correlation, the subsequent levels have half as many # buffers as the first i_min = num_bufs // 2 if level else 0 - # maxqind=G.shape[1] + #maxqind=G.shape[1] for i in range(i_min, min(img_per_level[level], num_bufs)): # compute the index into the autocorrelation matrix - t_index = int(level * num_bufs / 2 + i) + t_index = int( level * num_bufs / 2 + i ) delay_no = (buf_no - i) % num_bufs # get the images for correlating past_img = buf[level, delay_no] @@ -179,94 +155,89 @@ def _one_time_process_error( # find the normalization that can work both for bad_images # and good_images ind = int(t_index - lev_len[:level].sum()) - normalize = img_per_level[level] - i - norm[level + 1][ind] + normalize = img_per_level[level] - i - norm[level+1][ind] # take out the past_ing and future_img created using bad images # (bad images are converted to np.nan array) if np.isnan(past_img).any() or np.isnan(future_img).any(): norm[level + 1][ind] += 1 else: - # for w, arr in zip([past_img*future_img, past_img, future_img], - # [G, past_intensity_norm, future_intensity_norm, + + #for w, arr in zip([past_img*future_img, past_img, future_img], + # [G, past_intensity_norm, future_intensity_norm, # ]): # binned = np.bincount(label_array, weights=w)[1:] # #nonz = np.where(w)[0] - # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] # arr[t_index] += ((binned / num_pixels - # arr[t_index]) / normalize) - for w, arr in zip( - [past_img * future_img, past_img, future_img], - [ - G_err, - past_intensity_norm_err, - future_intensity_norm_err, - ], - ): - arr[t_index] += (w - arr[t_index]) / normalize + for w, arr in zip([past_img*future_img, past_img, future_img], + [ + G_err, past_intensity_norm_err, future_intensity_norm_err, + ]): + arr[t_index] += ( w - arr[t_index]) / normalize return None # modifies arguments in place! -results = namedtuple("correlation_results", ["g2", "lag_steps", "internal_state"]) +results = namedtuple( + 'correlation_results', + ['g2', 'lag_steps', 'internal_state'] +) _internal_state = namedtuple( - "correlation_state", - [ - "buf", - "G", - "past_intensity", - "future_intensity", - "img_per_level", - "label_array", - "track_level", - "cur", - "pixel_list", - "num_pixels", - "lag_steps", - "norm", - "lev_len", - ], + 'correlation_state', + ['buf', + 'G', + 'past_intensity', + 'future_intensity', + 'img_per_level', + 'label_array', + 'track_level', + 'cur', + 'pixel_list', + 'num_pixels', + 'lag_steps', + 'norm', + 'lev_len'] ) _internal_state_err = namedtuple( - "correlation_state", - [ - "buf", - "G", - "past_intensity", - "future_intensity", - "img_per_level", - "label_array", - "track_level", - "cur", - "pixel_list", - "num_pixels", - "lag_steps", - "norm", - "lev_len", - "G_all", - "past_intensity_all", - "future_intensity_all", - ], + 'correlation_state', + ['buf', + 'G', + 'past_intensity', + 'future_intensity', + 'img_per_level', + 'label_array', + 'track_level', + 'cur', + 'pixel_list', + 'num_pixels', + 'lag_steps', + 'norm', + 'lev_len', + 'G_all', + 'past_intensity_all', + 'future_intensity_all' + ] ) _two_time_internal_state = namedtuple( - "two_time_correlation_state", - [ - "buf", - "img_per_level", - "label_array", - "track_level", - "cur", - "pixel_list", - "num_pixels", - "lag_steps", - "g2", - "count_level", - "current_img_time", - "time_ind", - "norm", - "lev_len", - ], + 'two_time_correlation_state', + ['buf', + 'img_per_level', + 'label_array', + 'track_level', + 'cur', + 'pixel_list', + 'num_pixels', + 'lag_steps', + 'g2', + 'count_level', + 'current_img_time', + 'time_ind', + 'norm', + 'lev_len'] ) @@ -309,11 +280,13 @@ def _validate_and_transform_inputs(num_bufs, num_levels, labels): length of each levels """ if num_bufs % 2 != 0: - raise ValueError("There must be an even number of `num_bufs`. You " "provided %s" % num_bufs) + raise ValueError("There must be an even number of `num_bufs`. You " + "provided %s" % num_bufs) label_array, pixel_list = extract_label_indices(labels) # map the indices onto a sequential list of integers starting at 1 - label_mapping = {label: n + 1 for n, label in enumerate(np.unique(label_array))} + label_mapping = {label: n+1 + for n, label in enumerate(np.unique(label_array))} # remap the label array to go from 1 -> max(_labels) for label, n in label_mapping.items(): label_array[label_array == label] = n @@ -334,7 +307,8 @@ def _validate_and_transform_inputs(num_bufs, num_levels, labels): # Ring buffer, a buffer with periodic boundary conditions. # Images must be keep for up to maximum delay in buf. - buf = np.zeros((num_levels, num_bufs, len(pixel_list)), dtype=np.float64) + buf = np.zeros((num_levels, num_bufs, len(pixel_list)), + dtype=np.float64) # to track how many images processed in each level img_per_level = np.zeros(num_levels, dtype=np.int64) # to track which levels have already been processed @@ -342,22 +316,11 @@ def _validate_and_transform_inputs(num_bufs, num_levels, labels): # to increment buffer cur = np.ones(num_levels, dtype=np.int64) - return ( - label_array, - pixel_list, - num_rois, - num_pixels, - lag_steps, - buf, - img_per_level, - track_level, - cur, - norm, - lev_len, - ) - + return (label_array, pixel_list, num_rois, num_pixels, + lag_steps, buf, img_per_level, track_level, cur, + norm, lev_len) -def _init_state_one_time(num_levels, num_bufs, labels, cal_error=False): +def _init_state_one_time(num_levels, num_bufs, labels, cal_error = False): """Initialize a stateful namedtuple for the generator-based multi-tau for one time correlation Parameters @@ -373,36 +336,28 @@ def _init_state_one_time(num_levels, num_bufs, labels, cal_error=False): `lazy_one_time` requires so that it can be used to pick up processing after it was interrupted """ - ( - label_array, - pixel_list, - num_rois, - num_pixels, - lag_steps, - buf, - img_per_level, - track_level, - cur, - norm, - lev_len, - ) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + (label_array, pixel_list, num_rois, num_pixels, lag_steps, buf, + img_per_level, track_level, cur, norm, + lev_len) = _validate_and_transform_inputs(num_bufs, num_levels, labels) # G holds the un normalized auto- correlation result. We # accumulate computations into G as the algorithm proceeds. - G = np.zeros((int((num_levels + 1) * num_bufs / 2), num_rois), dtype=np.float64) - + G = np.zeros(( int( (num_levels + 1) * num_bufs / 2), num_rois), + dtype=np.float64) + # matrix for normalizing G into g2 past_intensity = np.zeros_like(G) # matrix for normalizing G into g2 future_intensity = np.zeros_like(G) if cal_error: - G_all = np.zeros((int((num_levels + 1) * num_bufs / 2), len(pixel_list)), dtype=np.float64) - + G_all = np.zeros(( int( (num_levels + 1) * num_bufs / 2), len(pixel_list)), + dtype=np.float64) + # matrix for normalizing G into g2 past_intensity_all = np.zeros_like(G_all) # matrix for normalizing G into g2 - future_intensity_all = np.zeros_like(G_all) + future_intensity_all = np.zeros_like(G_all) return _internal_state_err( buf, G, @@ -419,8 +374,8 @@ def _init_state_one_time(num_levels, num_bufs, labels, cal_error=False): lev_len, G_all, past_intensity_all, - future_intensity_all, - ) + future_intensity_all + ) else: return _internal_state( buf, @@ -439,92 +394,87 @@ def _init_state_one_time(num_levels, num_bufs, labels, cal_error=False): ) -def fill_pixel(p, v, pixelist): - fra_pix = np.zeros_like(pixelist) - fra_pix[np.in1d(pixelist, p)] = v[np.in1d(p, pixelist)] - return fra_pix - +def fill_pixel( p, v, pixelist): + fra_pix = np.zeros_like( pixelist ) + fra_pix[ np.in1d( pixelist,p ) ] = v[np.in1d( p, pixelist )] + return fra_pix + -def lazy_one_time( - FD, - num_levels, - num_bufs, - labels, - internal_state=None, - bad_frame_list=None, - imgsum=None, - norm=None, - cal_error=False, -): + + + +def lazy_one_time(FD, num_levels, num_bufs, labels, + internal_state=None, bad_frame_list=None, imgsum=None, norm = None, cal_error=False ): + """Generator implementation of 1-time multi-tau correlation - If you do not want multi-tau correlation, set num_levels to 1 and - num_bufs to the number of images you wish to correlate - The number of bins (of size 1) is one larger than the largest value in - `x`. If `minlength` is specified, there will be at least this number - of bins in the output array (though it will be longer if necessary, - depending on the contents of `x`). - Each bin gives the number of occurrences of its index value in `x`. - If `weights` is specified the input array is weighted by it, i.e. if a - value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead - of ``out[n] += 1``. - - Jan 2, 2018 YG. Add error bar calculation - - Parameters - ---------- - image_iterable : FD, a compressed eiger file by Multifile class - num_levels : int - how many generations of downsampling to perform, i.e., the depth of - the binomial tree of averaged frames - num_bufs : int, must be even - maximum lag step to compute in each generation of downsampling - labels : array - Labeled array of the same shape as the image stack. - Each ROI is represented by sequential integers starting at one. For - example, if you have four ROIs, they must be labeled 1, 2, 3, - 4. Background is labeled as 0 - internal_state : namedtuple, optional - internal_state is a bucket for all of the internal state of the - generator. It is part of the `results` object that is yielded from - this generator - - For the sake of normalization: - - imgsum: a list with the same length as FD, sum of each frame - qp, iq: the circular average radius (in pixel) and intensity - center: beam center - - Yields - ------ + If you do not want multi-tau correlation, set num_levels to 1 and + num_bufs to the number of images you wish to correlate +The number of bins (of size 1) is one larger than the largest value in +`x`. If `minlength` is specified, there will be at least this number +of bins in the output array (though it will be longer if necessary, +depending on the contents of `x`). +Each bin gives the number of occurrences of its index value in `x`. +If `weights` is specified the input array is weighted by it, i.e. if a +value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead +of ``out[n] += 1``. - Returns - ------- + Jan 2, 2018 YG. Add error bar calculation + + Parameters + ---------- + image_iterable : FD, a compressed eiger file by Multifile class + num_levels : int + how many generations of downsampling to perform, i.e., the depth of + the binomial tree of averaged frames + num_bufs : int, must be even + maximum lag step to compute in each generation of downsampling + labels : array + Labeled array of the same shape as the image stack. + Each ROI is represented by sequential integers starting at one. For + example, if you have four ROIs, they must be labeled 1, 2, 3, + 4. Background is labeled as 0 + internal_state : namedtuple, optional + internal_state is a bucket for all of the internal state of the + generator. It is part of the `results` object that is yielded from + this generator + + For the sake of normalization: + + imgsum: a list with the same length as FD, sum of each frame + qp, iq: the circular average radius (in pixel) and intensity + center: beam center + + Yields + ------ - A `results` object is yielded after every image has been processed. - This `reults` object contains, in this order: - - `g2`: the normalized correlation - shape is (len(lag_steps), num_rois) - - `lag_steps`: the times at which the correlation was computed - - `_internal_state`: all of the internal state. Can be passed back in - to `lazy_one_time` as the `internal_state` parameter - Notes - ----- - The normalized intensity-intensity time-autocorrelation function - is defined as - .. math:: - g_2(q, t') = \\frac{ }{^2} - t' > 0 - Here, ``I(q, t)`` refers to the scattering strength at the momentum - transfer vector ``q`` in reciprocal space at time ``t``, and the brackets - ``<...>`` refer to averages over time ``t``. The quantity ``t'`` denotes - the delay time - This implementation is based on published work. [1]_ - References - ---------- - .. [1] D. Lumma, L. B. Lurio, S. G. J. Mochrie and M. Sutton, - "Area detector based photon correlation in the regime of - short data batches: Data reduction for dynamic x-ray - scattering," Rev. Sci. Instrum., vol 71, p 3274-3289, 2000. +Returns +------- + + A `results` object is yielded after every image has been processed. + This `reults` object contains, in this order: + - `g2`: the normalized correlation + shape is (len(lag_steps), num_rois) + - `lag_steps`: the times at which the correlation was computed + - `_internal_state`: all of the internal state. Can be passed back in + to `lazy_one_time` as the `internal_state` parameter + Notes + ----- + The normalized intensity-intensity time-autocorrelation function + is defined as + .. math:: + g_2(q, t') = \\frac{ }{^2} + t' > 0 + Here, ``I(q, t)`` refers to the scattering strength at the momentum + transfer vector ``q`` in reciprocal space at time ``t``, and the brackets + ``<...>`` refer to averages over time ``t``. The quantity ``t'`` denotes + the delay time + This implementation is based on published work. [1]_ + References + ---------- + .. [1] D. Lumma, L. B. Lurio, S. G. J. Mochrie and M. Sutton, + "Area detector based photon correlation in the regime of + short data batches: Data reduction for dynamic x-ray + scattering," Rev. Sci. Instrum., vol 71, p 3274-3289, 2000. """ if internal_state is None: @@ -532,90 +482,66 @@ def lazy_one_time( # create a shorthand reference to the results and state named tuple s = internal_state - qind, pixelist = roi.extract_label_indices(labels) - # iterate over the images to compute multi-tau correlation - - fra_pix = np.zeros_like(pixelist, dtype=np.float64) - - timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) - timg[pixelist] = np.arange(1, len(pixelist) + 1) - + qind, pixelist = roi.extract_label_indices( labels ) + # iterate over the images to compute multi-tau correlation + + fra_pix = np.zeros_like( pixelist, dtype=np.float64) + + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + if bad_frame_list is None: - bad_frame_list = [] - for i in tqdm(range(FD.beg, FD.end)): + bad_frame_list=[] + for i in tqdm(range( FD.beg , FD.end )): if i in bad_frame_list: - fra_pix[:] = np.nan + fra_pix[:]= np.nan else: - (p, v) = FD.rdrawframe(i) - w = np.where(timg[p])[0] - pxlist = timg[p[w]] - 1 - + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + if imgsum is None: if norm is None: - fra_pix[pxlist] = v[w] - else: + fra_pix[ pxlist] = v[w] + else: S = norm.shape - if len(S) > 1: - fra_pix[pxlist] = v[w] / norm[i, pxlist] # -1.0 - else: - fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 + if len(S)>1: + fra_pix[ pxlist] = v[w]/ norm[i,pxlist] #-1.0 + else: + fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0 else: if norm is None: - fra_pix[pxlist] = v[w] / imgsum[i] + fra_pix[ pxlist] = v[w] / imgsum[i] else: S = norm.shape - if len(S) > 1: - fra_pix[pxlist] = v[w] / imgsum[i] / norm[i, pxlist] - else: - fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] - level = 0 + if len(S)>1: + fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[i,pxlist] + else: + fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist] + level = 0 # increment buffer - s.cur[0] = (1 + s.cur[0]) % num_bufs - # Put the ROI pixels into the ring buffer. - s.buf[0, s.cur[0] - 1] = fra_pix - fra_pix[:] = 0 - - # print( i, len(p), len(w), len( pixelist)) - - # print ('i= %s init fra_pix'%i ) + s.cur[0] = (1 + s.cur[0]) % num_bufs + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:]=0 + + #print( i, len(p), len(w), len( pixelist)) + + #print ('i= %s init fra_pix'%i ) buf_no = s.cur[0] - 1 # Compute the correlations between the first level # (undownsampled) frames. This modifies G, # past_intensity, future_intensity, # and img_per_level in place! if cal_error: - _one_time_process_error( - s.buf, - s.G, - s.past_intensity, - s.future_intensity, - s.label_array, - num_bufs, - s.num_pixels, - s.img_per_level, - level, - buf_no, - s.norm, - s.lev_len, - s.G_all, - s.past_intensity_all, - s.future_intensity_all, - ) - else: - _one_time_process( - s.buf, - s.G, - s.past_intensity, - s.future_intensity, - s.label_array, - num_bufs, - s.num_pixels, - s.img_per_level, - level, - buf_no, - s.norm, - s.lev_len, - ) + _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len, + s.G_all, s.past_intensity_all, s.future_intensity_all) + else: + _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len) # check whether the number of levels is one, otherwise # continue processing the next level @@ -627,12 +553,13 @@ def lazy_one_time( s.track_level[level] = True processing = False else: - prev = 1 + (s.cur[level - 1] - 2) % num_bufs - s.cur[level] = 1 + s.cur[level] % num_bufs + prev = (1 + (s.cur[level - 1] - 2) % num_bufs) + s.cur[level] = ( + 1 + s.cur[level] % num_bufs) - s.buf[level, s.cur[level] - 1] = ( - s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] - ) / 2 + s.buf[level, s.cur[level] - 1] = (( + s.buf[level - 1, prev - 1] + + s.buf[level - 1, s.cur[level - 1] - 1]) / 2) # make the track_level zero once that level is processed s.track_level[level] = False @@ -642,38 +569,14 @@ def lazy_one_time( # on previous call above. buf_no = s.cur[level] - 1 if cal_error: - _one_time_process_error( - s.buf, - s.G, - s.past_intensity, - s.future_intensity, - s.label_array, - num_bufs, - s.num_pixels, - s.img_per_level, - level, - buf_no, - s.norm, - s.lev_len, - s.G_all, - s.past_intensity_all, - s.future_intensity_all, - ) - else: - _one_time_process( - s.buf, - s.G, - s.past_intensity, - s.future_intensity, - s.label_array, - num_bufs, - s.num_pixels, - s.img_per_level, - level, - buf_no, - s.norm, - s.lev_len, - ) + _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len, + s.G_all, s.past_intensity_all, s.future_intensity_all) + else: + _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len) level += 1 @@ -683,108 +586,77 @@ def lazy_one_time( # If any past intensities are zero, then g2 cannot be normalized at # those levels. This if/else code block is basically preventing # divide-by-zero errors. - if not cal_error: + if not cal_error: if len(np.where(s.past_intensity == 0)[0]) != 0: g_max1 = np.where(s.past_intensity == 0)[0][0] else: - g_max1 = s.past_intensity.shape[0] + g_max1 = s.past_intensity.shape[0] if len(np.where(s.future_intensity == 0)[0]) != 0: g_max2 = np.where(s.future_intensity == 0)[0][0] else: - g_max2 = s.future_intensity.shape[0] - g_max = min(g_max1, g_max2) - g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) - yield results(g2, s.lag_steps[:g_max], s) + g_max2 = s.future_intensity.shape[0] + g_max = min( g_max1, g_max2) + g2 = (s.G[:g_max] / (s.past_intensity[:g_max] * + s.future_intensity[:g_max])) + yield results(g2, s.lag_steps[:g_max], s) else: - yield results(None, s.lag_steps, s) - - -def lazy_one_time_debug( - FD, - num_levels, - num_bufs, - labels, - internal_state=None, - bad_frame_list=None, - imgsum=None, - norm=None, - cal_error=False, -): + yield results(None,s.lag_steps, s) + + + +def lazy_one_time_debug(FD, num_levels, num_bufs, labels, + internal_state=None, bad_frame_list=None, imgsum=None, norm = None, cal_error=False ): if internal_state is None: internal_state = _init_state_one_time(num_levels, num_bufs, labels, cal_error) # create a shorthand reference to the results and state named tuple s = internal_state - qind, pixelist = roi.extract_label_indices(labels) - # iterate over the images to compute multi-tau correlation - fra_pix = np.zeros_like(pixelist, dtype=np.float64) - timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) - timg[pixelist] = np.arange(1, len(pixelist) + 1) + qind, pixelist = roi.extract_label_indices( labels ) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) if bad_frame_list is None: - bad_frame_list = [] - for i in range(FD.beg, FD.end): + bad_frame_list=[] + for i in range( FD.beg , FD.end ): print(i) if i in bad_frame_list: - fra_pix[:] = np.nan + fra_pix[:]= np.nan else: - (p, v) = FD.rdrawframe(i) - w = np.where(timg[p])[0] - pxlist = timg[p[w]] - 1 + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 if imgsum is None: if norm is None: - fra_pix[pxlist] = v[w] - else: - fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 + fra_pix[ pxlist] = v[w] + else: + fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0 else: if norm is None: - fra_pix[pxlist] = v[w] / imgsum[i] + fra_pix[ pxlist] = v[w] / imgsum[i] else: - fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] - level = 0 + fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist] + level = 0 # increment buffer - s.cur[0] = (1 + s.cur[0]) % num_bufs - # Put the ROI pixels into the ring buffer. - s.buf[0, s.cur[0] - 1] = fra_pix - fra_pix[:] = 0 - # print( i, len(p), len(w), len( pixelist)) - # print ('i= %s init fra_pix'%i ) + s.cur[0] = (1 + s.cur[0]) % num_bufs + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:]=0 + #print( i, len(p), len(w), len( pixelist)) + #print ('i= %s init fra_pix'%i ) buf_no = s.cur[0] - 1 # Compute the correlations between the first level # (undownsampled) frames. This modifies G, # past_intensity, future_intensity, # and img_per_level in place! if cal_error: - _one_time_process_error( - s.buf, - s.G, - s.past_intensity, - s.future_intensity, - s.label_array, - num_bufs, - s.num_pixels, - s.img_per_level, - level, - buf_no, - s.norm, - s.lev_len, - s.G_all, - s.past_intensity_all, - s.future_intensity_all, - ) - else: - _one_time_process( - s.buf, - s.G, - s.past_intensity, - s.future_intensity, - s.label_array, - num_bufs, - s.num_pixels, - s.img_per_level, - level, - buf_no, - s.norm, - s.lev_len, - ) + _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len, + s.G_all, s.past_intensity_all, s.future_intensity_all) + else: + _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len) # check whether the number of levels is one, otherwise # continue processing the next level @@ -795,12 +667,13 @@ def lazy_one_time_debug( s.track_level[level] = True processing = False else: - prev = 1 + (s.cur[level - 1] - 2) % num_bufs - s.cur[level] = 1 + s.cur[level] % num_bufs + prev = (1 + (s.cur[level - 1] - 2) % num_bufs) + s.cur[level] = ( + 1 + s.cur[level] % num_bufs) - s.buf[level, s.cur[level] - 1] = ( - s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] - ) / 2 + s.buf[level, s.cur[level] - 1] = (( + s.buf[level - 1, prev - 1] + + s.buf[level - 1, s.cur[level - 1] - 1]) / 2) # make the track_level zero once that level is processed s.track_level[level] = False # call processing_func for each multi-tau level greater @@ -808,38 +681,14 @@ def lazy_one_time_debug( # on previous call above. buf_no = s.cur[level] - 1 if cal_error: - _one_time_process_error( - s.buf, - s.G, - s.past_intensity, - s.future_intensity, - s.label_array, - num_bufs, - s.num_pixels, - s.img_per_level, - level, - buf_no, - s.norm, - s.lev_len, - s.G_all, - s.past_intensity_all, - s.future_intensity_all, - ) - else: - _one_time_process( - s.buf, - s.G, - s.past_intensity, - s.future_intensity, - s.label_array, - num_bufs, - s.num_pixels, - s.img_per_level, - level, - buf_no, - s.norm, - s.lev_len, - ) + _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len, + s.G_all, s.past_intensity_all, s.future_intensity_all) + else: + _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len) level += 1 # Checking whether there is next level for processing @@ -847,23 +696,25 @@ def lazy_one_time_debug( # If any past intensities are zero, then g2 cannot be normalized at # those levels. This if/else code block is basically preventing # divide-by-zero errors. - if not cal_error: + if not cal_error: if len(np.where(s.past_intensity == 0)[0]) != 0: g_max1 = np.where(s.past_intensity == 0)[0][0] else: - g_max1 = s.past_intensity.shape[0] + g_max1 = s.past_intensity.shape[0] if len(np.where(s.future_intensity == 0)[0]) != 0: g_max2 = np.where(s.future_intensity == 0)[0][0] else: - g_max2 = s.future_intensity.shape[0] - g_max = min(g_max1, g_max2) - g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) - yield results(g2, s.lag_steps[:g_max], s) - # yield( i ) - + g_max2 = s.future_intensity.shape[0] + g_max = min( g_max1, g_max2) + g2 = (s.G[:g_max] / (s.past_intensity[:g_max] * + s.future_intensity[:g_max])) + yield results(g2, s.lag_steps[:g_max], s) + #yield( i ) + else: - yield results(None, s.lag_steps, s) - + yield results(None,s.lag_steps, s) + + def auto_corr_scat_factor(lags, beta, relaxation_rate, baseline=1): """ @@ -905,11 +756,10 @@ def auto_corr_scat_factor(lags, beta, relaxation_rate, baseline=1): J. Synchrotron Rad. vol 21, p 1288-1295, 2014 """ return beta * np.exp(-2 * relaxation_rate * lags) + baseline + - -def multi_tau_auto_corr( - num_levels, num_bufs, labels, images, bad_frame_list=None, imgsum=None, norm=None, cal_error=False -): +def multi_tau_auto_corr(num_levels, num_bufs, labels, images, bad_frame_list=None, + imgsum=None, norm=None,cal_error=False ): """Wraps generator implementation of multi-tau Original code(in Yorick) for multi tau auto correlation author: Mark Sutton @@ -920,25 +770,17 @@ def multi_tau_auto_corr( the `lazy_one_time()` function. The semantics of the variables remain unchanged. """ - gen = lazy_one_time( - images, - num_levels, - num_bufs, - labels, - bad_frame_list=bad_frame_list, - imgsum=imgsum, - norm=norm, - cal_error=cal_error, - ) + gen = lazy_one_time(images, num_levels, num_bufs, labels,bad_frame_list=bad_frame_list, imgsum=imgsum, + norm=norm,cal_error=cal_error ) for result in gen: pass if cal_error: return result.g2, result.lag_steps, result.internal_state - else: + else: return result.g2, result.lag_steps - -def multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list=None, imgsum=None, norm=None): +def multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list =None, + imgsum= None, norm = None ): """Wraps generator implementation of multi-tau two time correlation This function computes two-time correlation Original code : author: Yugang Zhang @@ -947,28 +789,21 @@ def multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list results : namedtuple For parameter definition, see the docstring for the `lazy_two_time()` function in this module - """ - gen = lazy_two_time( - FD, - num_lev, - num_buf, - ring_mask, - two_time_internal_state=None, - bad_frame_list=bad_frame_list, - imgsum=imgsum, - norm=norm, - ) + """ + gen = lazy_two_time(FD, num_lev, num_buf, ring_mask, + two_time_internal_state= None, + bad_frame_list=bad_frame_list, imgsum=imgsum, norm = norm ) for result in gen: pass return two_time_state_to_results(result) - - -def lazy_two_time( - FD, num_levels, num_bufs, labels, two_time_internal_state=None, bad_frame_list=None, imgsum=None, norm=None -): - # def lazy_two_time(labels, images, num_frames, num_bufs, num_levels=1, - # two_time_internal_state=None): - """Generator implementation of two-time correlation + + +def lazy_two_time(FD, num_levels, num_bufs, labels, + two_time_internal_state=None, bad_frame_list=None, imgsum= None, norm = None ): + +#def lazy_two_time(labels, images, num_frames, num_bufs, num_levels=1, +# two_time_internal_state=None): + """ Generator implementation of two-time correlation If you do not want multi-tau correlation, set num_levels to 1 and num_bufs to the number of images you wish to correlate Multi-tau correlation uses a scheme to achieve long-time correlations @@ -978,14 +813,14 @@ def lazy_two_time( ** see comments on multi_tau_auto_corr Parameters ---------- - FD: the handler of compressed data + FD: the handler of compressed data num_levels : int, optional how many generations of downsampling to perform, i.e., the depth of the binomial tree of averaged frames default is one num_bufs : int, must be even maximum lag step to compute in each generation of - downsampling + downsampling labels : array labeled array of the same shape as the image stack; each ROI is represented by a distinct label (i.e., integer) @@ -1022,59 +857,51 @@ def lazy_two_time( and aging in collodial gels studied by x-ray photon correlation spectroscopy," Phys. Rev. E., vol 76, p 010401(1-4), 2007. """ - - num_frames = FD.end - FD.beg + + num_frames = FD.end - FD.beg if two_time_internal_state is None: - two_time_internal_state = _init_state_two_time(num_levels, num_bufs, labels, num_frames) + two_time_internal_state = _init_state_two_time(num_levels, num_bufs,labels, num_frames) # create a shorthand reference to the results and state named tuple - s = two_time_internal_state - qind, pixelist = roi.extract_label_indices(labels) + s = two_time_internal_state + qind, pixelist = roi.extract_label_indices( labels ) # iterate over the images to compute multi-tau correlation - fra_pix = np.zeros_like(pixelist, dtype=np.float64) - timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) - timg[pixelist] = np.arange(1, len(pixelist) + 1) + fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) if bad_frame_list is None: - bad_frame_list = [] - - for i in tqdm(range(FD.beg, FD.end)): + bad_frame_list=[] + + for i in tqdm(range( FD.beg , FD.end )): if i in bad_frame_list: - fra_pix[:] = np.nan + fra_pix[:]= np.nan else: - (p, v) = FD.rdrawframe(i) - w = np.where(timg[p])[0] - pxlist = timg[p[w]] - 1 + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 if imgsum is None: if norm is None: - fra_pix[pxlist] = v[w] - else: - fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 + fra_pix[ pxlist] = v[w] + else: + fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0 else: if norm is None: - fra_pix[pxlist] = v[w] / imgsum[i] + fra_pix[ pxlist] = v[w] / imgsum[i] else: - fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] - - level = 0 + fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist] + + level = 0 # increment buffer - s.cur[0] = (1 + s.cur[0]) % num_bufs + s.cur[0] = (1 + s.cur[0]) % num_bufs s.count_level[0] = 1 + s.count_level[0] # get the current image time - s = s._replace(current_img_time=(s.current_img_time + 1)) - # Put the ROI pixels into the ring buffer. - s.buf[0, s.cur[0] - 1] = fra_pix - fra_pix[:] = 0 - _two_time_process( - s.buf, - s.g2, - s.label_array, - num_bufs, - s.num_pixels, - s.img_per_level, - s.lag_steps, - s.current_img_time, - level=0, - buf_no=s.cur[0] - 1, - ) + s = s._replace(current_img_time=(s.current_img_time + 1)) + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:]=0 + _two_time_process(s.buf, s.g2, s.label_array, num_bufs, + s.num_pixels, s.img_per_level, s.lag_steps, + s.current_img_time, + level=0, buf_no=s.cur[0] - 1) # time frame for each level s.time_ind[0].append(s.current_img_time) # check whether the number of levels is one, otherwise @@ -1089,14 +916,14 @@ def lazy_two_time( else: prev = 1 + (s.cur[level - 1] - 2) % num_bufs s.cur[level] = 1 + s.cur[level] % num_bufs - s.count_level[level] = 1 + s.count_level[level] - s.buf[level, s.cur[level] - 1] = ( - s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] - ) / 2 + s.count_level[level] = 1 + s.count_level[level] + s.buf[level, s.cur[level] - 1] = ( s.buf[level - 1, prev - 1] + + s.buf[level - 1, s.cur[level - 1] - 1] )/2 t1_idx = (s.count_level[level] - 1) * 2 - current_img_time = ((s.time_ind[level - 1])[t1_idx] + (s.time_ind[level - 1])[t1_idx + 1]) / 2.0 + current_img_time = ((s.time_ind[level - 1])[t1_idx] + + (s.time_ind[level - 1])[t1_idx + 1])/2. # time frame for each level s.time_ind[level].append(current_img_time) # make the track_level zero once that level is processed @@ -1105,23 +932,15 @@ def lazy_two_time( # for multi-tau levels greater than one # Again, this is modifying things in place. See comment # on previous call above. - _two_time_process( - s.buf, - s.g2, - s.label_array, - num_bufs, - s.num_pixels, - s.img_per_level, - s.lag_steps, - current_img_time, - level=level, - buf_no=s.cur[level] - 1, - ) + _two_time_process(s.buf, s.g2, s.label_array, num_bufs, + s.num_pixels, s.img_per_level, s.lag_steps, + current_img_time, + level=level, buf_no=s.cur[level]-1) level += 1 # Checking whether there is next level for processing processing = level < num_levels - # print (s.g2[1,:,1] ) + #print (s.g2[1,:,1] ) yield s @@ -1139,13 +958,15 @@ def two_time_state_to_results(state): """ for q in range(np.max(state.label_array)): x0 = (state.g2)[q, :, :] - (state.g2)[q, :, :] = np.tril(x0) + np.tril(x0).T - np.diag(np.diag(x0)) + (state.g2)[q, :, :] = (np.tril(x0) + np.tril(x0).T - + np.diag(np.diag(x0))) return results(state.g2, state.lag_steps, state) - -def _two_time_process( - buf, g2, label_array, num_bufs, num_pixels, img_per_level, lag_steps, current_img_time, level, buf_no -): + + +def _two_time_process(buf, g2, label_array, num_bufs, num_pixels, + img_per_level, lag_steps, current_img_time, + level, buf_no): """ Parameters ---------- @@ -1182,36 +1003,41 @@ def _two_time_process( if level == 0: i_min = 0 else: - i_min = num_bufs // 2 + i_min = num_bufs//2 for i in range(i_min, min(img_per_level[level], num_bufs)): - t_index = level * num_bufs / 2 + i + t_index = level*num_bufs/2 + i delay_no = (buf_no - i) % num_bufs past_img = buf[level, delay_no] - future_img = buf[level, buf_no] - - # print( np.sum( past_img ), np.sum( future_img )) - + future_img = buf[level, buf_no] + + #print( np.sum( past_img ), np.sum( future_img )) + # get the matrix of correlation function without normalizations - tmp_binned = np.bincount(label_array, weights=past_img * future_img)[1:] + tmp_binned = (np.bincount(label_array, + weights=past_img*future_img)[1:]) # get the matrix of past intensity normalizations - pi_binned = np.bincount(label_array, weights=past_img)[1:] + pi_binned = (np.bincount(label_array, + weights=past_img)[1:]) # get the matrix of future intensity normalizations - fi_binned = np.bincount(label_array, weights=future_img)[1:] - - tind1 = current_img_time - 1 - tind2 = current_img_time - lag_steps[int(t_index)] - 1 - # print( current_img_time ) + fi_binned = (np.bincount(label_array, + weights=future_img)[1:]) + tind1 = (current_img_time - 1) + tind2 = (current_img_time - lag_steps[int(t_index)] - 1) + #print( current_img_time ) + if not isinstance(current_img_time, int): - nshift = 2 ** (level - 1) - for i in range(-nshift + 1, nshift + 1): - g2[:, int(tind1 + i), int(tind2 + i)] = (tmp_binned / (pi_binned * fi_binned)) * num_pixels + nshift = 2**(level-1) + for i in range(-nshift+1, nshift+1): + g2[:, int(tind1+i), + int(tind2+i)] = (tmp_binned/(pi_binned * + fi_binned))*num_pixels else: - g2[:, int(tind1), int(tind2)] = tmp_binned / (pi_binned * fi_binned) * num_pixels - - # print( num_pixels ) + g2[:, int(tind1), int(tind2)] = tmp_binned/(pi_binned * fi_binned)*num_pixels + + #print( num_pixels ) def _init_state_two_time(num_levels, num_bufs, labels, num_frames): @@ -1232,19 +1058,9 @@ def _init_state_two_time(num_levels, num_bufs, labels, num_frames): `lazy_two_time` requires so that it can be used to pick up processing after it was interrupted """ - ( - label_array, - pixel_list, - num_rois, - num_pixels, - lag_steps, - buf, - img_per_level, - track_level, - cur, - norm, - lev_len, - ) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + (label_array, pixel_list, num_rois, num_pixels, lag_steps, + buf, img_per_level, track_level, cur, norm, + lev_len) = _validate_and_transform_inputs(num_bufs, num_levels, labels) # to count images in each level count_level = np.zeros(num_levels, dtype=np.int64) @@ -1275,7 +1091,6 @@ def _init_state_two_time(num_levels, num_bufs, labels, num_frames): lev_len, ) - def one_time_from_two_time(two_time_corr): """ This will provide the one-time correlation data from two-time @@ -1295,148 +1110,143 @@ def one_time_from_two_time(two_time_corr): one_time_corr = np.zeros((two_time_corr.shape[0], two_time_corr.shape[2])) for g in two_time_corr: for j in range(two_time_corr.shape[2]): - one_time_corr[:, j] = np.trace(g, offset=j) / two_time_corr.shape[2] + one_time_corr[:, j] = np.trace(g, offset=j)/two_time_corr.shape[2] return one_time_corr - - -def cal_c12c(FD, ring_mask, bad_frame_list=None, good_start=0, num_buf=8, num_lev=None, imgsum=None, norm=None): - """calculation two_time correlation by using a multi-tau algorithm""" - - # noframes = FD.end - good_start # number of frames, not "no frames" - + + +def cal_c12c( FD, ring_mask, + bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None, imgsum=None, norm=None ): + '''calculation two_time correlation by using a multi-tau algorithm''' + + #noframes = FD.end - good_start # number of frames, not "no frames" + FD.beg = max(FD.beg, good_start) - noframes = FD.end - FD.beg # number of frames, not "no frames" - # num_buf = 8 # number of buffers + noframes = FD.end - FD.beg # number of frames, not "no frames" + #num_buf = 8 # number of buffers if num_lev is None: - num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 - print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) - if bad_frame_list is not None: - if len(bad_frame_list) != 0: - print("Bad frame involved and will be precessed!") - noframes -= len(np.where(np.in1d(bad_frame_list, range(good_start, FD.end)))[0]) - print("%s frames will be processed..." % (noframes)) - - c12, lag_steps, state = multi_tau_two_time_auto_corr( - num_lev, num_buf, ring_mask, FD, bad_frame_list, imgsum=imgsum, norm=norm - ) - - print("Two Time Calculation is DONE!") + num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list)!=0: + print ('Bad frame involved and will be precessed!') + noframes -= len(np.where(np.in1d( bad_frame_list, + range(good_start, FD.end)))[0]) + print ('%s frames will be processed...'%(noframes)) + + c12, lag_steps, state = multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list, + imgsum=imgsum, norm = norm ) + + print( 'Two Time Calculation is DONE!') m, n, n = c12.shape - # print( m,n,n) - c12_ = np.zeros([n, n, m]) - for i in range(m): - c12_[:, :, i] = c12[i] + #print( m,n,n) + c12_ = np.zeros( [n,n,m] ) + for i in range( m): + c12_[:,:,i ] = c12[i] return c12_, lag_steps -def cal_g2c( - FD, - ring_mask, - bad_frame_list=None, - good_start=0, - num_buf=8, - num_lev=None, - imgsum=None, - norm=None, - cal_error=False, -): - """calculation g2 by using a multi-tau algorithm""" - - # noframes = FD.end - good_start # number of frames, not "no frames" +def cal_g2c( FD, ring_mask, + bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None, imgsum=None, norm=None,cal_error=False ): + '''calculation g2 by using a multi-tau algorithm''' + + #noframes = FD.end - good_start # number of frames, not "no frames" + FD.beg = max(FD.beg, good_start) - noframes = FD.end - FD.beg # number of frames, not "no frames" - # num_buf = 8 # number of buffers + noframes = FD.end - FD.beg # number of frames, not "no frames" + #num_buf = 8 # number of buffers if num_lev is None: - num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 - print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) - if bad_frame_list is not None: - if len(bad_frame_list) != 0: - print("Bad frame involved and will be precessed!") - noframes -= len(np.where(np.in1d(bad_frame_list, range(good_start, FD.end)))[0]) - - print("%s frames will be processed..." % (noframes)) + num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list)!=0: + print ('Bad frame involved and will be precessed!') + noframes -= len(np.where(np.in1d( bad_frame_list, + range(good_start, FD.end)))[0]) + + print ('%s frames will be processed...'%(noframes)) if cal_error: - g2, lag_steps, s = multi_tau_auto_corr( - num_lev, num_buf, ring_mask, FD, bad_frame_list, imgsum=imgsum, norm=norm, cal_error=cal_error - ) - - g2 = np.zeros_like(s.G) - g2_err = np.zeros_like(g2) - qind, pixelist = extract_label_indices(ring_mask) + g2, lag_steps, s = multi_tau_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list, + imgsum=imgsum, norm = norm,cal_error=cal_error ) + + g2 = np.zeros_like( s.G ) + g2_err = np.zeros_like(g2) + qind, pixelist = extract_label_indices(ring_mask) noqs = len(np.unique(qind)) - nopr = np.bincount(qind, minlength=(noqs + 1))[1:] - Ntau, Nq = s.G.shape + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + Ntau, Nq = s.G.shape g_max = 1e30 - for qi in range(1, 1 + Nq): - pixelist_qi = np.where(qind == qi)[0] - s_Gall_qi = s.G_all[:, pixelist_qi] - s_Pall_qi = s.past_intensity_all[:, pixelist_qi] - s_Fall_qi = s.future_intensity_all[:, pixelist_qi] - avgGi = np.average(s_Gall_qi, axis=1) - devGi = np.std(s_Gall_qi, axis=1) - avgPi = np.average(s_Pall_qi, axis=1) - devPi = np.std(s_Pall_qi, axis=1) - avgFi = np.average(s_Fall_qi, axis=1) - devFi = np.std(s_Fall_qi, axis=1) - + for qi in range(1,1+Nq): + pixelist_qi = np.where( qind == qi)[0] + s_Gall_qi = s.G_all[:,pixelist_qi] + s_Pall_qi = s.past_intensity_all[:,pixelist_qi] + s_Fall_qi = s.future_intensity_all[:,pixelist_qi] + avgGi = (np.average( s_Gall_qi, axis=1)) + devGi = (np.std( s_Gall_qi, axis=1)) + avgPi = (np.average( s_Pall_qi, axis=1)) + devPi = (np.std( s_Pall_qi, axis=1)) + avgFi = (np.average( s_Fall_qi, axis=1)) + devFi = (np.std( s_Fall_qi, axis=1)) + if len(np.where(avgPi == 0)[0]) != 0: g_max1 = np.where(avgPi == 0)[0][0] else: - g_max1 = avgPi.shape[0] + g_max1 = avgPi.shape[0] if len(np.where(avgFi == 0)[0]) != 0: g_max2 = np.where(avgFi == 0)[0][0] else: - g_max2 = avgFi.shape[0] - g_max = min(g_max1, g_max2) - # print(g_max) - # g2_ = (s.G[:g_max] / (s.past_intensity[:g_max] * + g_max2 = avgFi.shape[0] + g_max = min( g_max1, g_max2) + #print(g_max) + #g2_ = (s.G[:g_max] / (s.past_intensity[:g_max] * # s.future_intensity[:g_max])) - g2[:g_max, qi - 1] = avgGi[:g_max] / (avgPi[:g_max] * avgFi[:g_max]) - g2_err[:g_max, qi - 1] = np.sqrt( - (1 / (avgFi[:g_max] * avgPi[:g_max])) ** 2 * devGi[:g_max] ** 2 - + (avgGi[:g_max] / (avgFi[:g_max] ** 2 * avgPi[:g_max])) ** 2 * devFi[:g_max] ** 2 - + (avgGi[:g_max] / (avgFi[:g_max] * avgPi[:g_max] ** 2)) ** 2 * devPi[:g_max] ** 2 - ) - - print("G2 with error bar calculation DONE!") - return g2[:g_max, :], lag_steps[:g_max], g2_err[:g_max, :] / np.sqrt(nopr), s - else: - g2, lag_steps = multi_tau_auto_corr( - num_lev, num_buf, ring_mask, FD, bad_frame_list, imgsum=imgsum, norm=norm, cal_error=cal_error - ) - - print("G2 calculation DONE!") + g2[:g_max,qi-1] = avgGi[:g_max]/( avgPi[:g_max] * avgFi[:g_max] ) + g2_err[:g_max,qi-1] = np.sqrt( + ( 1/ ( avgFi[:g_max] * avgPi[:g_max] ))**2 * devGi[:g_max] ** 2 + + ( avgGi[:g_max]/ ( avgFi[:g_max]**2 * avgPi[:g_max] ))**2 * devFi[:g_max] ** 2 + + ( avgGi[:g_max]/ ( avgFi[:g_max] * avgPi[:g_max]**2 ))**2 * devPi[:g_max] ** 2 + ) + + print( 'G2 with error bar calculation DONE!') + return g2[:g_max,:], lag_steps[:g_max], g2_err[:g_max,:]/np.sqrt(nopr), s + else: + g2, lag_steps = multi_tau_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list, + imgsum=imgsum, norm = norm,cal_error=cal_error ) + + print( 'G2 calculation DONE!') return g2, lag_steps -def get_pixelist_interp_iq(qp, iq, ring_mask, center): - qind, pixelist = roi.extract_label_indices(ring_mask) - # pixely = pixelist%FD.md['nrows'] -center[1] - # pixelx = pixelist//FD.md['nrows'] - center[0] - - pixely = pixelist % ring_mask.shape[1] - center[1] - pixelx = pixelist // ring_mask.shape[1] - center[0] - - r = np.hypot(pixelx, pixely) # leave as float. - # r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 - return np.interp(r, qp, iq) - +def get_pixelist_interp_iq( qp, iq, ring_mask, center): + + qind, pixelist = roi.extract_label_indices( ring_mask ) + #pixely = pixelist%FD.md['nrows'] -center[1] + #pixelx = pixelist//FD.md['nrows'] - center[0] + + pixely = pixelist%ring_mask.shape[1] -center[1] + pixelx = pixelist//ring_mask.shape[1] - center[0] + + r= np.hypot(pixelx, pixely) #leave as float. + #r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 + return np.interp( r, qp, iq ) + + class Get_Pixel_Arrayc_todo(object): - """ - a class to get intested pixels from a images sequence, - load ROI of all images into memory + ''' + a class to get intested pixels from a images sequence, + load ROI of all images into memory get_data: to get a 2-D array, shape as (len(images), len(pixellist)) - - One example: + + One example: data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() - """ - - def __init__(self, FD, pixelist, beg=None, end=None, norm=None, imgsum=None, norm_inten=None, qind=None): - """ + ''' + + def __init__(self, FD, pixelist,beg=None, end=None, norm=None, imgsum = None, + norm_inten = None, qind=None): + ''' indexable: a images sequences pixelist: 1-D array, interest pixel list norm: each q-ROI of each frame is normalized by the corresponding q-ROI of time averaged intensity @@ -1444,428 +1254,423 @@ def __init__(self, FD, pixelist, beg=None, end=None, norm=None, imgsum=None, nor norm_inten: if True, each q-ROI of each frame is normlized by total intensity of the correponding q-ROI of the corresponding frame qind: the index of each ROI in one frame, i.e., q if norm_inten is True: qind has to be given - - """ + + ''' if beg is None: self.beg = FD.beg if end is None: self.end = FD.end - # if self.beg ==0: + #if self.beg ==0: # self.length = self.end - self.beg - # else: + #else: # self.length = self.end - self.beg + 1 - - self.length = self.end - self.beg - + + self.length = self.end - self.beg + self.FD = FD - self.pixelist = pixelist - self.norm = norm + self.pixelist = pixelist + self.norm = norm self.imgsum = imgsum - self.norm_inten = norm_inten + self.norm_inten= norm_inten self.qind = qind if self.norm_inten is not None: if self.qind is None: - print("Please give qind.") - - def get_data(self): - """ + print('Please give qind.') + + def get_data(self ): + ''' To get intested pixels array Return: 2-D array, shape as (len(images), len(pixellist)) - """ - - data_array = np.zeros( - [self.length, len(self.pixelist)], dtype=np.float64 - ) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 - # fra_pix = np.zeros_like( pixelist, dtype=np.float64) - timg = np.zeros(self.FD.md["ncols"] * self.FD.md["nrows"], dtype=np.int32) - timg[self.pixelist] = np.arange(1, len(self.pixelist) + 1) - + ''' + + data_array = np.zeros([ self.length,len(self.pixelist)], dtype=np.float64) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 + #fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( self.FD.md['ncols'] * self.FD.md['nrows'] , dtype=np.int32 ) + timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) + if self.norm_inten is not None: - # Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) - Mean_Int_Qind = np.ones( - len(self.qind), dtype=np.float64 - ) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 - noqs = len(np.unique(self.qind)) - nopr = np.bincount(self.qind - 1) - noprs = np.concatenate([np.array([0]), np.cumsum(nopr)]) - qind_ = np.zeros_like(self.qind) + #Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) + Mean_Int_Qind = np.ones( len( self.qind), dtype = np.float64) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 + noqs = len(np.unique( self.qind )) + nopr = np.bincount(self.qind-1) + noprs = np.concatenate( [ np.array([0]), np.cumsum(nopr) ] ) + qind_ = np.zeros_like( self.qind ) for j in range(noqs): - qind_[noprs[j] : noprs[j + 1]] = np.where(self.qind == j + 1)[0] - - n = 0 - for i in tqdm(range(self.beg, self.end)): - (p, v) = self.FD.rdrawframe(i) - w = np.where(timg[p])[0] - pxlist = timg[p[w]] - 1 - # np.bincount( qind[pxlist], weight= - - if self.mean_int_sets is not None: # for each frame will normalize each ROI by it's averaged value + qind_[ noprs[j]: noprs[j+1] ] = np.where(self.qind==j+1)[0] + + n=0 + for i in tqdm(range( self.beg , self.end )): + (p,v) = self.FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + #np.bincount( qind[pxlist], weight= + + + if self.mean_int_sets is not None:#for each frame will normalize each ROI by it's averaged value for j in range(noqs): - # if i ==100: + #if i ==100: # if j==0: - # print( self.mean_int_sets[i][j] ) + # print( self.mean_int_sets[i][j] ) # print( qind_[ noprs[j]: noprs[j+1] ] ) - Mean_Int_Qind[qind_[noprs[j] : noprs[j + 1]]] = self.mean_int_sets[i][j] - norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] # self.mean_int_set or Mean_Int_Qind[pxlist] - - # if i==100: + Mean_Int_Qind[ qind_[ noprs[j]: noprs[j+1] ] ] = self.mean_int_sets[i][j] + norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] #self.mean_int_set or Mean_Int_Qind[pxlist] + + #if i==100: # print( i, Mean_Int_Qind[ self.qind== 11 ]) - # print('Do norm_mean_int here') - # if i ==10: + #print('Do norm_mean_int here') + #if i ==10: # print( norm_Mean_Int_Qind ) else: - norm_Mean_Int_Qind = 1.0 - if self.imgsum is not None: - norm_imgsum = self.imgsum[i] + norm_Mean_Int_Qind = 1.0 + if self.imgsum is not None: + norm_imgsum = self.imgsum[i] else: - norm_imgsum = 1.0 + norm_imgsum = 1.0 if self.norm is not None: - norm_avgimg_roi = self.norm[pxlist] + norm_avgimg_roi = self.norm[pxlist] else: - norm_avgimg_roi = 1.0 - - norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi - # if i==100: + norm_avgimg_roi = 1.0 + + norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi + #if i==100: # print(norm_Mean_Int_Qind[:100]) - data_array[n][pxlist] = v[w] / norms - n += 1 - - return data_array - - + data_array[n][ pxlist] = v[w]/ norms + n +=1 + + return data_array + + + + + class Get_Pixel_Arrayc(object): - """ - a class to get intested pixels from a images sequence, - load ROI of all images into memory + ''' + a class to get intested pixels from a images sequence, + load ROI of all images into memory get_data: to get a 2-D array, shape as (len(images), len(pixellist)) - - One example: + + One example: data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() - """ - - def __init__(self, FD, pixelist, beg=None, end=None, norm=None, imgsum=None, mean_int_sets=None, qind=None): - """ + ''' + + def __init__(self, FD, pixelist,beg=None, end=None, norm=None, imgsum = None, + mean_int_sets = None, qind=None ): + ''' indexable: a images sequences pixelist: 1-D array, interest pixel list norm: each q-ROI of each frame is normalized by the corresponding q-ROI of time averaged intensity imgsum: each q-ROI of each frame is normalized by the total intensity of the corresponding frame, should have the same time sequences as FD, e.g., imgsum[10] corresponding to FD[10] mean_int_sets: each q-ROI of each frame is normlized by total intensity of the correponding q-ROI of the corresponding frame qind: the index of each ROI in one frame, i.e., q - if mean_int_sets is not None: qind has to be not None - - """ + if mean_int_sets is not None: qind has to be not None + + ''' if beg is None: self.beg = FD.beg if end is None: self.end = FD.end - # if self.beg ==0: + #if self.beg ==0: # self.length = self.end - self.beg - # else: + #else: # self.length = self.end - self.beg + 1 - - self.length = self.end - self.beg - + + self.length = self.end - self.beg + self.FD = FD - self.pixelist = pixelist - self.norm = norm + self.pixelist = pixelist + self.norm = norm self.imgsum = imgsum - self.mean_int_sets = mean_int_sets + self.mean_int_sets= mean_int_sets self.qind = qind if self.mean_int_sets is not None: if self.qind is None: - print("Please give qind.") - - def get_data(self): - """ + print('Please give qind.') + + def get_data(self ): + ''' To get intested pixels array Return: 2-D array, shape as (len(images), len(pixellist)) - """ - - data_array = np.zeros([self.length, len(self.pixelist)], dtype=np.float64) - # fra_pix = np.zeros_like( pixelist, dtype=np.float64) - timg = np.zeros(self.FD.md["ncols"] * self.FD.md["nrows"], dtype=np.int32) - timg[self.pixelist] = np.arange(1, len(self.pixelist) + 1) - + ''' + + data_array = np.zeros([ self.length,len(self.pixelist)], dtype=np.float64) + #fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( self.FD.md['ncols'] * self.FD.md['nrows'] , dtype=np.int32 ) + timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) + if self.mean_int_sets is not None: - # Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) - Mean_Int_Qind = np.ones(len(self.qind), dtype=np.float64) - noqs = len(np.unique(self.qind)) - nopr = np.bincount(self.qind - 1) - noprs = np.concatenate([np.array([0]), np.cumsum(nopr)]) - qind_ = np.zeros_like(self.qind) + #Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) + Mean_Int_Qind = np.ones( len( self.qind), dtype = np.float64) + noqs = len(np.unique( self.qind )) + nopr = np.bincount(self.qind-1) + noprs = np.concatenate( [ np.array([0]), np.cumsum(nopr) ] ) + qind_ = np.zeros_like( self.qind ) for j in range(noqs): - qind_[noprs[j] : noprs[j + 1]] = np.where(self.qind == j + 1)[0] - - n = 0 - for i in tqdm(range(self.beg, self.end)): - (p, v) = self.FD.rdrawframe(i) - w = np.where(timg[p])[0] - pxlist = timg[p[w]] - 1 - - if self.mean_int_sets is not None: # for normalization of each averaged ROI of each frame + qind_[ noprs[j]: noprs[j+1] ] = np.where(self.qind==j+1)[0] + + n=0 + for i in tqdm(range( self.beg , self.end )): + (p,v) = self.FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + + if self.mean_int_sets is not None:#for normalization of each averaged ROI of each frame for j in range(noqs): - # if i ==100: + #if i ==100: # if j==0: - # print( self.mean_int_sets[i][j] ) + # print( self.mean_int_sets[i][j] ) # print( qind_[ noprs[j]: noprs[j+1] ] ) - Mean_Int_Qind[qind_[noprs[j] : noprs[j + 1]]] = self.mean_int_sets[i][j] - norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] # self.mean_int_set or Mean_Int_Qind[pxlist] - - # if i==100: + Mean_Int_Qind[ qind_[ noprs[j]: noprs[j+1] ] ] = self.mean_int_sets[i][j] + norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] #self.mean_int_set or Mean_Int_Qind[pxlist] + + #if i==100: # print( i, Mean_Int_Qind[ self.qind== 11 ]) - # print('Do norm_mean_int here') - # if i ==10: + #print('Do norm_mean_int here') + #if i ==10: # print( norm_Mean_Int_Qind ) else: - norm_Mean_Int_Qind = 1.0 - if self.imgsum is not None: - norm_imgsum = self.imgsum[i] + norm_Mean_Int_Qind = 1.0 + if self.imgsum is not None: + norm_imgsum = self.imgsum[i] else: - norm_imgsum = 1.0 + norm_imgsum = 1.0 if self.norm is not None: - if len((self.norm).shape) > 1: - norm_avgimg_roi = self.norm[i][pxlist] - # print('here') - - else: - norm_avgimg_roi = self.norm[pxlist] + if len( (self.norm).shape )>1: + norm_avgimg_roi = self.norm[i][pxlist] + #print('here') + + else: + norm_avgimg_roi = self.norm[pxlist] else: - norm_avgimg_roi = 1.0 - - norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi - # if i==100: + norm_avgimg_roi = 1.0 + + norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi + #if i==100: # print(norm_Mean_Int_Qind[:100]) - data_array[n][pxlist] = v[w] / norms - n += 1 - - return data_array + data_array[n][ pxlist] = v[w]/ norms + n +=1 + + return data_array -def auto_two_Arrayc(data_pixel, rois, index=None): - """ +def auto_two_Arrayc( data_pixel, rois, index=None): + + ''' Dec 16, 2015, Y.G.@CHX - a numpy operation method to get two-time correlation function - + a numpy operation method to get two-time correlation function + Parameters: data: images sequence, shape as [img[0], img[1], imgs_length] rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs Options: - - data_pixel: if not None, + + data_pixel: if not None, 2-D array, shape as (len(images), len(qind)), - use function Get_Pixel_Array( ).get_data( ) to get - - + use function Get_Pixel_Array( ).get_data( ) to get + + Return: g12: a 3-D array, shape as ( imgs_length, imgs_length, q) - - One example: - g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) - """ - - qind, pixelist = roi.extract_label_indices(rois) - noqs = len(np.unique(qind)) - nopr = np.bincount(qind, minlength=(noqs + 1))[1:] - noframes = data_pixel.shape[0] + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + ''' + + qind, pixelist = roi.extract_label_indices( rois ) + noqs = len( np.unique(qind) ) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + noframes = data_pixel.shape[0] if index is None: - index = np.arange(1, noqs + 1) + index = np.arange( 1, noqs + 1 ) else: try: len(index) - index = np.array(index) + index = np.array( index ) except TypeError: - index = np.array([index]) - # print( index ) - qlist = np.arange(1, noqs + 1)[index - 1] - # print( qlist ) + index = np.array( [index] ) + #print( index ) + qlist = np.arange( 1, noqs + 1 )[ index -1 ] + #print( qlist ) try: - g12b = np.zeros([noframes, noframes, len(qlist)]) + g12b = np.zeros( [noframes, noframes, len(qlist) ] ) DO = True except: - print( - "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" - ) - """TO be done here """ - DO = False - + print("The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely") + '''TO be done here ''' + DO = False + if DO: i = 0 - for qi in tqdm(qlist): - # print (qi-1) - pixelist_qi = np.where(qind == qi)[0] - # print (pixelist_qi.shape, data_pixel[qi].shape) - data_pixel_qi = data_pixel[:, pixelist_qi] - sum1 = (np.average(data_pixel_qi, axis=1)).reshape(1, noframes) - sum2 = sum1.T - # print( qi, qlist, ) - # print( g12b[:,:,qi -1 ] ) - g12b[:, :, i] = np.dot(data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1] - i += 1 + for qi in tqdm(qlist ): + #print (qi-1) + pixelist_qi = np.where( qind == qi)[0] + #print (pixelist_qi.shape, data_pixel[qi].shape) + data_pixel_qi = data_pixel[:,pixelist_qi] + sum1 = (np.average( data_pixel_qi, axis=1)).reshape( 1, noframes ) + sum2 = sum1.T + #print( qi, qlist, ) + #print( g12b[:,:,qi -1 ] ) + g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2 / nopr[qi -1] + i +=1 return g12b - -def auto_two_Arrayc_ExplicitNorm(data_pixel, rois, norm=None, index=None): - """ +def auto_two_Arrayc_ExplicitNorm( data_pixel, rois, norm=None, index=None): + + ''' Dec 16, 2015, Y.G.@CHX a numpy operation method to get two-time correlation function by giving explict normalization - + Parameters: data: images sequence, shape as [img[0], img[1], imgs_length] rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs norm: if not None, shoud be the shape as data_pixel, will normalize two time by this norm if None, will return two time without normalization - + Options: - - data_pixel: if not None, + + data_pixel: if not None, 2-D array, shape as (len(images), len(qind)), - use function Get_Pixel_Array( ).get_data( ) to get - - + use function Get_Pixel_Array( ).get_data( ) to get + + Return: g12: a 3-D array, shape as ( imgs_length, imgs_length, q) - - One example: - g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) - """ - - qind, pixelist = roi.extract_label_indices(rois) - noqs = len(np.unique(qind)) - nopr = np.bincount(qind, minlength=(noqs + 1))[1:] - noframes = data_pixel.shape[0] + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + ''' + + qind, pixelist = roi.extract_label_indices( rois ) + noqs = len( np.unique(qind) ) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + noframes = data_pixel.shape[0] if index is None: - index = np.arange(1, noqs + 1) + index = np.arange( 1, noqs + 1 ) else: try: len(index) - index = np.array(index) + index = np.array( index ) except TypeError: - index = np.array([index]) - # print( index ) - qlist = np.arange(1, noqs + 1)[index - 1] - # print( qlist ) + index = np.array( [index] ) + #print( index ) + qlist = np.arange( 1, noqs + 1 )[ index -1 ] + #print( qlist ) try: - g12b = np.zeros([noframes, noframes, len(qlist)]) + g12b = np.zeros( [noframes, noframes, len(qlist) ] ) DO = True except: - print( - "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" - ) - """TO be done here """ - DO = False + print("The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely") + '''TO be done here ''' + DO = False if DO: i = 0 - for qi in tqdm(qlist): - pixelist_qi = np.where(qind == qi)[0] - data_pixel_qi = data_pixel[:, pixelist_qi] + for qi in tqdm(qlist ): + pixelist_qi = np.where( qind == qi)[0] + data_pixel_qi = data_pixel[:,pixelist_qi] if norm is not None: - norm1 = norm[:, pixelist_qi] - sum1 = (np.average(norm1, axis=1)).reshape(1, noframes) - sum2 = sum1.T + norm1 = norm[:,pixelist_qi] + sum1 = (np.average( norm1, axis=1)).reshape( 1, noframes ) + sum2 = sum1.T else: - sum1 = 1 - sum2 = 1 - g12b[:, :, i] = np.dot(data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1] - i += 1 + sum1=1 + sum2=1 + g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2/ nopr[qi -1] + i +=1 return g12b - - -def two_time_norm(data_pixel, rois, index=None): - """ + + +def two_time_norm( data_pixel, rois, index=None): + + ''' Dec 16, 2015, Y.G.@CHX - a numpy operation method to get two-time correlation function - + a numpy operation method to get two-time correlation function + Parameters: data: images sequence, shape as [img[0], img[1], imgs_length] rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs - + Options: - - data_pixel: if not None, + + data_pixel: if not None, 2-D array, shape as (len(images), len(qind)), - use function Get_Pixel_Array( ).get_data( ) to get - - + use function Get_Pixel_Array( ).get_data( ) to get + + Return: g12: a 3-D array, shape as ( imgs_length, imgs_length, q) - - One example: - g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) - """ - - qind, pixelist = roi.extract_label_indices(rois) - noqs = len(np.unique(qind)) - nopr = np.bincount(qind, minlength=(noqs + 1))[1:] - noframes = data_pixel.shape[0] + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + ''' + + qind, pixelist = roi.extract_label_indices( rois ) + noqs = len( np.unique(qind) ) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + noframes = data_pixel.shape[0] if index is None: - index = np.arange(1, noqs + 1) + index = np.arange( 1, noqs + 1 ) else: try: len(index) - index = np.array(index) + index = np.array( index ) except TypeError: - index = np.array([index]) - # print( index ) - qlist = np.arange(1, noqs + 1)[index - 1] - # print( qlist ) + index = np.array( [index] ) + #print( index ) + qlist = np.arange( 1, noqs + 1 )[ index -1 ] + #print( qlist ) try: - norm = np.zeros(len(qlist)) + norm = np.zeros( len(qlist) ) DO = True except: - print( - "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" - ) - """TO be done here """ - DO = False - + print("The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely") + '''TO be done here ''' + DO = False + if DO: i = 0 - for qi in tqdm(qlist): - # print (qi-1) - pixelist_qi = np.where(qind == qi)[0] - # print (pixelist_qi.shape, data_pixel[qi].shape) - data_pixel_qi = data_pixel[:, pixelist_qi] - sum1 = (np.average(data_pixel_qi, axis=1)).reshape(1, noframes) - norm[i] = np.average(sum1) - # sum2 = sum1.T - # print( qi, qlist, ) - # print( g12b[:,:,qi -1 ] ) - # g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2 / nopr[qi -1] - i += 1 + for qi in tqdm(qlist ): + #print (qi-1) + pixelist_qi = np.where( qind == qi)[0] + #print (pixelist_qi.shape, data_pixel[qi].shape) + data_pixel_qi = data_pixel[:,pixelist_qi] + sum1 = (np.average( data_pixel_qi, axis=1)).reshape( 1, noframes ) + norm[i] = np.average(sum1 ) + #sum2 = sum1.T + #print( qi, qlist, ) + #print( g12b[:,:,qi -1 ] ) + #g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2 / nopr[qi -1] + i +=1 return norm + + -def check_normalization(frame_num, q_list, imgsa, data_pixel): - """check the ROI intensity before and after normalization +def check_normalization( frame_num, q_list, imgsa, data_pixel ): + '''check the ROI intensity before and after normalization Input: frame_num: integer, the number of frame to be checked q_list: list of integer, the list of q to be checked imgsa: the raw data data_pixel: the normalized data, caculated by fucntion Get_Pixel_Arrayc - Plot the intensities - """ - fig, ax = plt.subplots(2) - n = 0 + Plot the intensities + ''' + fig,ax=plt.subplots(2) + n=0 for q in q_list: - norm_data = data_pixel[frame_num][qind == q] - raw_data = np.ravel(np.array(imgsa[frame_num]))[pixelist[qind == q]] - # print(raw_data.mean()) - plot1D(raw_data, ax=ax[0], legend="q=%s" % (q), m=markers[n], title="fra=%s_raw_data" % (frame_num)) - - # plot1D( raw_data/mean_int_sets_[frame_num][q-1], ax=ax[1], legend='q=%s'%(q), m=markers[n], + norm_data = data_pixel[frame_num][qind==q] + raw_data = np.ravel( np.array(imgsa[frame_num]) )[pixelist[qind==q]] + #print(raw_data.mean()) + plot1D( raw_data,ax=ax[0], legend='q=%s'%(q), m=markers[n], + title='fra=%s_raw_data'%(frame_num)) + + #plot1D( raw_data/mean_int_sets_[frame_num][q-1], ax=ax[1], legend='q=%s'%(q), m=markers[n], # xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) - # print( mean_int_sets_[frame_num][q-1] ) - plot1D( - norm_data, - ax=ax[1], - legend="q=%s" % (q), - m=markers[n], - xlabel="pixel", - title="fra=%s_norm_data" % (frame_num), - ) - n += 1 + #print( mean_int_sets_[frame_num][q-1] ) + plot1D( norm_data, ax=ax[1], legend='q=%s'%(q), m=markers[n], + xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) + n +=1 + diff --git a/pyCHX/chx_generic_functions.py b/pyCHX/chx_generic_functions.py index 0f5c994..d0a88e0 100644 --- a/pyCHX/chx_generic_functions.py +++ b/pyCHX/chx_generic_functions.py @@ -1,50 +1,29 @@ -import copy -import datetime -from os import listdir -from shutil import copyfile +from pyCHX.chx_libs import * +#from tqdm import * +from pyCHX.chx_libs import ( colors, markers ) +from scipy.special import erf +from skimage.filters import prewitt +from skimage.draw import line_aa, line, polygon, ellipse, disk + +from modest_image import imshow import matplotlib.cm as mcm -import numpy as np -import PIL -import pytz -import scipy from matplotlib import cm -from modest_image import imshow -from scipy.special import erf -from skbeam.core.utils import angle_grid, radial_grid, radius_to_twotheta, twotheta_to_q -from skimage.draw import disk, ellipse, line, line_aa, polygon -from skimage.filters import prewitt +import copy, scipy +import PIL +from shutil import copyfile +import datetime, pytz +from skbeam.core.utils import radial_grid, angle_grid, radius_to_twotheta, twotheta_to_q +from os import listdir +import numpy as np + + +markers = ['o', 'D', 'v', '^', '<', '>', 'p', 's', 'H', + 'h', '*', 'd', + '8', '1', '3', '2', '4', '+', 'x', '_', '|', ',', '1',] +markers = np.array( markers *100 ) + -# from tqdm import * -from pyCHX.chx_libs import * -from pyCHX.chx_libs import colors, markers - -markers = [ - "o", - "D", - "v", - "^", - "<", - ">", - "p", - "s", - "H", - "h", - "*", - "d", - "8", - "1", - "3", - "2", - "4", - "+", - "x", - "_", - "|", - ",", - "1", -] -markers = np.array(markers * 100) flatten_nestlist = lambda l: [item for sublist in l for item in sublist] @@ -54,45 +33,46 @@ """ -def get_frames_from_dscan(uid, detector="eiger4m_single_image"): - """Get frames from a dscan by giving uid and detector""" +def get_frames_from_dscan( uid, detector = 'eiger4m_single_image' ): + '''Get frames from a dscan by giving uid and detector ''' hdr = db[uid] - return db.get_images(hdr, detector) + return db.get_images(hdr, detector ) -def get_roi_intensity(img, roi_mask): +def get_roi_intensity( img, roi_mask): qind, pixelist = roi.extract_label_indices(roi_mask) noqs = len(np.unique(qind)) avgs = np.zeros(noqs) - for i in tqdm(range(1, 1 + noqs)): - avgs[i - 1] = np.average(img[roi_mask == i]) + for i in tqdm( range(1,1+noqs)): + avgs[i-1] = ( np.average( img[roi_mask==i] ) ) return avgs def generate_h5_list(inDir, filename): - """YG DEV at 9/19/2019@CHX generate a lst file containing all h5 fiels in inDir + '''YG DEV at 9/19/2019@CHX generate a lst file containing all h5 fiels in inDir Input: inDir: the input direction filename: the filename for output (have to lst as extension) Output: Save the all h5 filenames in a lst file - """ - fp_list = listdir(inDir) - if filename[-4:] != ".lst": - filename += ".lst" + ''' + fp_list = listdir( inDir ) + if filename[-4:] !='.lst': + filename += '.lst' for FP in fp_list: - FP_ = inDir + FP + FP_ = inDir+FP if os.path.isdir(FP_): - fp = listdir(FP_) + fp = listdir( FP_ ) for fp_ in fp: - if ".h5" in fp_: - append_txtfile(filename=filename, data=np.array([FP_ + "/" + fp_])) - print("The full path of all the .h5 in %s has been saved in %s." % (inDir, filename)) - print("You can use ./analysis/run_gui to visualize all the h5 file.") + if '.h5' in fp_: + append_txtfile( filename = filename, + data = np.array( [ FP_+'/'+fp_ ])) + print('The full path of all the .h5 in %s has been saved in %s.'%(inDir, filename)) + print( 'You can use ./analysis/run_gui to visualize all the h5 file.') -def fit_one_peak_curve(x, y, fit_range=None): - """YG Dev@Aug 10, 2019 fit a curve with a single Lorentzian shape +def fit_one_peak_curve( x,y, fit_range=None ): + '''YG Dev@Aug 10, 2019 fit a curve with a single Lorentzian shape Parameters: x: one-d array, x-axis data y: one-d array, y-axis data @@ -105,185 +85,163 @@ def fit_one_peak_curve(x, y, fit_range=None): xf: the x in the fit out: the fitting class resutled from lmfit - """ + ''' from lmfit.models import LinearModel, LorentzianModel - peak = LorentzianModel() background = LinearModel() model = peak + background - if fit_range is not None: - x1, x2 = fit_range - xf = x[x1:x2] + if fit_range != None: + x1,x2=fit_range + xf= x[x1:x2] yf = y[x1:x2] else: - xf = x - yf = y - model.set_param_hint("slope", value=5) - model.set_param_hint("intercept", value=0) - model.set_param_hint("center", value=0.005) - model.set_param_hint("amplitude", value=0.1) - model.set_param_hint("sigma", value=0.003) - # out=model.fit(yf, x=xf)#, method='nelder') - out = model.fit(yf, x=xf, method="leastsq") - cen = out.params["center"].value - cen_std = out.params["center"].stderr - wid = out.params["sigma"].value * 2 - wid_std = out.params["sigma"].stderr * 2 - return cen, cen_std, wid, wid_std, xf, out - - -def plot_xy_with_fit( - x, - y, - xf, - out, - cen, - cen_std, - wid, - wid_std, - xlim=[1e-3, 0.01], - xlabel="q (" r"$\AA^{-1}$)", - ylabel="I(q)", - filename=None, -): - """YG Dev@Aug 10, 2019 to plot x,y with fit, - currently this code is dedicated to plot q-Iq with fit and show the fittign parameter, peak pos, peak wid""" - - yf2 = out.model.eval(params=out.params, x=xf) - fig, ax = plt.subplots() - plot1D(x=x, y=y, ax=ax, m="o", ls="", c="k", legend="data") - plot1D(x=xf, y=yf2, ax=ax, m="", ls="-", c="r", legend="fit", logy=True) - ax.set_xlim(xlim) - # ax.set_ylim( 0.1, 4) - # ax.set_title(uid+'--t=%.2f'%tt) - ax.set_xlabel(xlabel) - ax.set_ylabel(ylabel) - txts = r"peak" + r" = %.5f +/- %.5f " % (cen, cen_std) - ax.text(x=0.02, y=0.2, s=txts, fontsize=14, transform=ax.transAxes) - txts = r"wid" + r" = %.4f +/- %.4f" % (wid, wid_std) - # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' - ax.text(x=0.02, y=0.1, s=txts, fontsize=14, transform=ax.transAxes) + xf = x + yf = y + model.set_param_hint('slope', value=5 ) + model.set_param_hint('intercept', value=0 ) + model.set_param_hint('center', value=0.005 ) + model.set_param_hint('amplitude', value= 0.1 ) + model.set_param_hint('sigma', value=0.003 ) + #out=model.fit(yf, x=xf)#, method='nelder') + out=model.fit(yf, x=xf, method= 'leastsq' ) + cen = out.params['center'].value + cen_std = out.params['center'].stderr + wid = out.params['sigma'].value *2 + wid_std = out.params['sigma'].stderr *2 + return cen, cen_std, wid, wid_std , xf, out + + +def plot_xy_with_fit( x, y, xf, out, + cen, cen_std,wid, wid_std, + xlim=[1e-3,0.01],xlabel= 'q ('r'$\AA^{-1}$)', + ylabel='I(q)', filename=None): + '''YG Dev@Aug 10, 2019 to plot x,y with fit, + currently this code is dedicated to plot q-Iq with fit and show the fittign parameter, peak pos, peak wid ''' + + yf2=out.model.eval(params=out.params, x=xf) + fig, ax = plt.subplots( ) + plot1D(x=x,y=y,ax=ax,m='o', ls='',c='k', legend='data') + plot1D(x=xf,y=yf2,ax=ax,m='', ls='-',c='r', legend='fit',logy=True) + ax.set_xlim( xlim ) + #ax.set_ylim( 0.1, 4) + #ax.set_title(uid+'--t=%.2f'%tt) + ax.set_xlabel( xlabel ) + ax.set_ylabel(ylabel ) + txts = r'peak' + r' = %.5f +/- %.5f '%( cen, cen_std ) + ax.text(x =0.02, y=.2, s=txts, fontsize=14, transform=ax.transAxes) + txts = r'wid' + r' = %.4f +/- %.4f'%( wid, wid_std) + #txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x =0.02, y=.1, s=txts, fontsize=14, transform=ax.transAxes) plt.tight_layout() - if filename is not None: - plt.savefig(filename) + if filename != None: + plt.savefig( filename ) return ax -def get_touched_qwidth(qcenters): - """YG Dev@CHX April 2019, get touched qwidth by giving qcenters""" + + + +def get_touched_qwidth( qcenters ): + '''YG Dev@CHX April 2019, get touched qwidth by giving qcenters + ''' qwX = np.zeros_like(qcenters) - qW = qcenters[1:] - qcenters[:-1] + qW= qcenters[1:] - qcenters[:-1] qwX[0] = qW[0] - for i in range(1, len(qcenters) - 1): - # print(i) - qwX[i] = min(qW[i - 1], qW[i]) + for i in range(1,len(qcenters)-1): + #print(i) + qwX[i] = min( qW[i-1], qW[i] ) qwX[-1] = qW[-1] - qwX *= 0.9999 + qwX *=0.9999 return qwX -def append_txtfile(filename, data, fmt="%s", *argv, **kwargs): - """YG. Dev May 10, 2109 append data to a file + +def append_txtfile( filename, data, fmt='%s', *argv,**kwargs ): + '''YG. Dev May 10, 2109 append data to a file Create an empty file if the file dose not exist, otherwise, will append the data to it Input: fp: filename data: the data to be append fmt: the parameter defined in np.savetxt - """ + ''' from numpy import savetxt - - exists = os.path.isfile(filename) + exists = os.path.isfile( filename) if not exists: - np.savetxt( - filename, - [], - fmt="%s", - ) - print("create new file") - - f = open(filename, "a") - savetxt(f, data, fmt=fmt, *argv, **kwargs) - f.close() + np.savetxt( filename, [ ] , fmt='%s', ) + print('create new file') + f=open( filename, 'a') + savetxt( f, data, fmt = fmt , *argv,**kwargs ) + f.close() -def get_roi_mask_qval_qwid_by_shift( - new_cen, new_mask, old_cen, old_roi_mask, setup_pargs, geometry, limit_qnum=None -): - """YG Dev April 22, 2019 Get roi_mask, qval_dict, qwid_dict by shift the pre-defined big roi_mask""" - center = setup_pargs["center"] - roi_mask1 = shift_mask( - new_cen=center, new_mask=new_mask, old_cen=old_cen, old_roi_mask=old_roi_mask, limit_qnum=limit_qnum - ) +def get_roi_mask_qval_qwid_by_shift( new_cen, new_mask, old_cen,old_roi_mask, + setup_pargs, geometry, + limit_qnum= None): + '''YG Dev April 22, 2019 Get roi_mask, qval_dict, qwid_dict by shift the pre-defined big roi_mask''' + center=setup_pargs['center'] + roi_mask1 = shift_mask( new_cen=center, new_mask=new_mask, old_cen=old_cen, + old_roi_mask=old_roi_mask, limit_qnum= limit_qnum) qval_dict_, qwid_dict_ = get_masked_qval_qwid_dict_using_Rmax( - new_mask=new_mask, setup_pargs=setup_pargs, old_roi_mask=old_roi_mask, old_cen=old_cen, geometry=geometry - ) - w, w1 = get_zero_nozero_qind_from_roi_mask(roi_mask1, new_mask) - # print(w,w1) - qval_dictx = {k: v for (k, v) in list(qval_dict_.items()) if k in w1} - qwid_dictx = {k: v for (k, v) in list(qwid_dict_.items()) if k in w1} - qval_dict = {} - qwid_dict = {} - for i, k in enumerate(list(qval_dictx.keys())): + new_mask=new_mask, setup_pargs=setup_pargs, + old_roi_mask=old_roi_mask, old_cen=old_cen, geometry = geometry ) + w,w1 = get_zero_nozero_qind_from_roi_mask(roi_mask1,new_mask) + #print(w,w1) + qval_dictx = { k:v for (k,v) in list(qval_dict_.items()) if k in w1 } + qwid_dictx = { k:v for (k,v) in list(qwid_dict_.items()) if k in w1 } + qval_dict={} + qwid_dict={} + for i, k in enumerate( list(qval_dictx.keys())): qval_dict[i] = qval_dictx[k] qwid_dict[i] = qwid_dictx[k] return roi_mask1, qval_dict, qwid_dict -def get_zero_nozero_qind_from_roi_mask(roi_mask, mask): - """YG Dev April 22, 2019 Get unique qind of roi_mask with zero and non-zero pixel number""" - qind, pixelist = roi.extract_label_indices(roi_mask * mask) +def get_zero_nozero_qind_from_roi_mask(roi_mask,mask): + '''YG Dev April 22, 2019 Get unique qind of roi_mask with zero and non-zero pixel number''' + qind, pixelist = roi.extract_label_indices(roi_mask*mask) noqs = len(np.unique(qind)) - nopr = np.bincount(qind, minlength=(noqs + 1))[1:] - w = np.where(nopr == 0)[0] - w1 = np.where(nopr != 0)[0] + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + w=np.where(nopr==0)[0] + w1=np.where(nopr!=0)[0] return w, w1 -def get_masked_qval_qwid_dict_using_Rmax(new_mask, setup_pargs, old_roi_mask, old_cen, geometry): - """YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask using a Rmax method""" - cy, cx = setup_pargs["center"] - my, mx = new_mask.shape - Rmax = int( - np.ceil(max(np.hypot(cx, cy), np.hypot(cx - mx, cy - my), np.hypot(cx, cy - my), np.hypot(cx - mx, cy))) - ) - Fmask = np.zeros([Rmax * 2, Rmax * 2], dtype=int) - Fmask[Rmax - cy : Rmax - cy + my, Rmax - cx : Rmax - cx + mx] = new_mask - roi_mask1 = shift_mask( - new_cen=[Rmax, Rmax], - new_mask=np.ones_like(Fmask), - old_cen=old_cen, - old_roi_mask=old_roi_mask, - limit_qnum=None, - ) - setup_pargs_ = { - "center": [Rmax, Rmax], - "dpix": setup_pargs["dpix"], - "Ldet": setup_pargs["Ldet"], - "lambda_": setup_pargs["lambda_"], - } - qval_dict1, qwid_dict1 = get_masked_qval_qwid_dict(roi_mask1, Fmask, setup_pargs_, geometry) - # w = get_zero_qind_from_roi_mask(roi_mask1,Fmask) - return qval_dict1, qwid_dict1 # ,w +def get_masked_qval_qwid_dict_using_Rmax( new_mask, setup_pargs, old_roi_mask, old_cen, geometry ): + '''YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask using a Rmax method ''' + cy,cx= setup_pargs['center'] + my,mx=new_mask.shape + Rmax = int(np.ceil(max( np.hypot(cx,cy),np.hypot(cx-mx,cy-my),np.hypot(cx,cy-my),np.hypot(cx-mx,cy) ))) + Fmask = np.zeros([Rmax*2,Rmax*2],dtype=int) + Fmask[ Rmax-cy : Rmax-cy+my, Rmax-cx: Rmax-cx + mx]=new_mask + roi_mask1 = shift_mask( new_cen=[Rmax,Rmax], new_mask=np.ones_like(Fmask), old_cen=old_cen, + old_roi_mask=old_roi_mask, limit_qnum= None) + setup_pargs_={ 'center':[Rmax,Rmax], 'dpix': setup_pargs['dpix'], 'Ldet': setup_pargs['Ldet'], + 'lambda_': setup_pargs['lambda_'], } + qval_dict1, qwid_dict1 = get_masked_qval_qwid_dict( roi_mask1, Fmask, setup_pargs_, geometry ) + #w = get_zero_qind_from_roi_mask(roi_mask1,Fmask) + return qval_dict1, qwid_dict1#,w -def get_masked_qval_qwid_dict(roi_mask, mask, setup_pargs, geometry): - """YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask""" - qval_dict_, qwid_dict_ = get_qval_qwid_dict(roi_mask, setup_pargs, geometry=geometry) - w, w1 = get_zero_nozero_qind_from_roi_mask(roi_mask, mask) - qval_dictx = {k: v for (k, v) in list(qval_dict_.items()) if k not in w} - qwid_dictx = {k: v for (k, v) in list(qwid_dict_.items()) if k not in w} - qval_dict = {} - qwid_dict = {} - for i, k in enumerate(list(qval_dictx.keys())): + +def get_masked_qval_qwid_dict( roi_mask, mask, setup_pargs, geometry ): + '''YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask ''' + + qval_dict_, qwid_dict_ = get_qval_qwid_dict( roi_mask, setup_pargs, geometry= geometry) + w,w1 = get_zero_nozero_qind_from_roi_mask(roi_mask,mask) + qval_dictx = { k:v for (k,v) in list(qval_dict_.items()) if k not in w } + qwid_dictx = { k:v for (k,v) in list(qwid_dict_.items()) if k not in w } + qval_dict={} + qwid_dict={} + for i, k in enumerate( list(qval_dictx.keys())): qval_dict[i] = qval_dictx[k] qwid_dict[i] = qwid_dictx[k] return qval_dict, qwid_dict -def get_qval_qwid_dict(roi_mask, setup_pargs, geometry="saxs"): - """YG Dev April 6, 2019 +def get_qval_qwid_dict( roi_mask, setup_pargs, geometry='saxs'): + '''YG Dev April 6, 2019 Get qval_dict and qwid_dict by giving roi_mask, setup_pargs Input: roi_mask: integer type 2D array @@ -308,67 +266,68 @@ def get_qval_qwid_dict(roi_mask, setup_pargs, geometry="saxs"): TODOLIST: to make GiSAXS work - """ + ''' - origin = setup_pargs["center"] # [::-1] + origin = setup_pargs['center']#[::-1] shape = roi_mask.shape qp_map = radial_grid(origin, shape) - phi_map = np.degrees(angle_grid(origin, shape)) - two_theta = radius_to_twotheta(setup_pargs["Ldet"], setup_pargs["dpix"] * qp_map) - q_map = utils.twotheta_to_q(two_theta, setup_pargs["lambda_"]) + phi_map = np.degrees( angle_grid(origin, shape) ) + two_theta = radius_to_twotheta( setup_pargs['Ldet'], setup_pargs['dpix'] * qp_map ) + q_map = utils.twotheta_to_q(two_theta, setup_pargs['lambda_']) qind, pixelist = roi.extract_label_indices(roi_mask) Qval = np.unique(qind) qval_dict_ = {} qwid_dict_ = {} - for j, i in enumerate(Qval): - qval = q_map[roi_mask == i] - # print( qval ) - if geometry == "saxs": - qval_dict_[j] = [(qval.max() + qval.min()) / 2] # np.mean(qval) - qwid_dict_[j] = [(qval.max() - qval.min())] - - elif geometry == "ang_saxs": - aval = phi_map[roi_mask == i] - # print(j,i,qval, aval) + for j, i in enumerate( Qval): + qval = q_map[ roi_mask == i ] + #print( qval ) + if geometry=='saxs': + qval_dict_[j] = [( qval.max() + qval.min() )/2] # np.mean(qval) + qwid_dict_[j] = [( qval.max() - qval.min() ) ] + + elif geometry=='ang_saxs': + aval = phi_map[ roi_mask == i ] + #print(j,i,qval, aval) qval_dict_[j] = np.zeros(2) qwid_dict_[j] = np.zeros(2) - qval_dict_[j][0] = (qval.max() + qval.min()) / 2 # np.mean(qval) - qwid_dict_[j][0] = qval.max() - qval.min() + qval_dict_[j][0] = ( qval.max() + qval.min() )/2 # np.mean(qval) + qwid_dict_[j][0] = ( qval.max() - qval.min() ) - if ((aval.max() * aval.min()) < 0) & (aval.max() > 90): - qval_dict_[j][1] = (aval.max() + aval.min()) / 2 - 180 # np.mean(qval) - qwid_dict_[j][1] = abs(aval.max() - aval.min() - 360) - # print('here -- %s'%j) + if ( (aval.max() * aval.min())<0 ) & ( aval.max() > 90 ): + qval_dict_[j][1] = ( aval.max() + aval.min() )/2 -180 # np.mean(qval) + qwid_dict_[j][1] = abs( aval.max() - aval.min() -360 ) + #print('here -- %s'%j) else: - qval_dict_[j][1] = (aval.max() + aval.min()) / 2 # np.mean(qval) - qwid_dict_[j][1] = abs(aval.max() - aval.min()) + qval_dict_[j][1] = ( aval.max() + aval.min() )/2 # np.mean(qval) + qwid_dict_[j][1] = abs( aval.max() - aval.min() ) - elif geometry == "flow_saxs": - sx, sy = roi_mask.shape - cx, cy = origin - aval = (phi_map[cx:])[roi_mask[cx:] == i] - if len(aval) == 0: - aval = (phi_map[:cx])[roi_mask[:cx] == i] + 180 + elif geometry=='flow_saxs': + sx,sy = roi_mask.shape + cx,cy = origin + aval = (phi_map[cx:])[ roi_mask[cx:] == i ] + if len(aval)==0: + aval = (phi_map[:cx])[ roi_mask[:cx] == i ] + 180 qval_dict_[j] = np.zeros(2) qwid_dict_[j] = np.zeros(2) - qval_dict_[j][0] = (qval.max() + qval.min()) / 2 # np.mean(qval) - qwid_dict_[j][0] = qval.max() - qval.min() - # print(aval) - if ((aval.max() * aval.min()) < 0) & (aval.max() > 90): - qval_dict_[j][1] = (aval.max() + aval.min()) / 2 - 180 # np.mean(qval) - qwid_dict_[j][1] = abs(aval.max() - aval.min() - 360) - # print('here -- %s'%j) + qval_dict_[j][0] = ( qval.max() + qval.min() )/2 # np.mean(qval) + qwid_dict_[j][0] = ( qval.max() - qval.min() ) + #print(aval) + if ( (aval.max() * aval.min())<0 ) & ( aval.max() > 90 ): + qval_dict_[j][1] = ( aval.max() + aval.min() )/2 -180 # np.mean(qval) + qwid_dict_[j][1] = abs( aval.max() - aval.min() -360 ) + #print('here -- %s'%j) else: - qval_dict_[j][1] = (aval.max() + aval.min()) / 2 # np.mean(qval) - qwid_dict_[j][1] = abs(aval.max() - aval.min()) + qval_dict_[j][1] = ( aval.max() + aval.min() )/2 # np.mean(qval) + qwid_dict_[j][1] = abs( aval.max() - aval.min() ) return qval_dict_, qwid_dict_ -def get_SG_norm(FD, pixelist, bins=1, mask=None, window_size=11, order=5): - """Get normalization of a time series by SavitzkyGolay filter + +def get_SG_norm( FD, pixelist, bins=1, mask=None, window_size= 11, order= 5 ): + '''Get normalization of a time series by SavitzkyGolay filter Input: FD: file handler for a compressed data pixelist: pixel list for a roi_mask @@ -378,65 +337,64 @@ def get_SG_norm(FD, pixelist, bins=1, mask=None, window_size=11, order=5): window_size, order, for the control of SG filter, see chx_generic_functions.py/sgolay2d for details Return: norm: shape as ( length of FD, length of pixelist ) - """ - if mask is None: + ''' + if mask == None: mask = 1 beg = FD.beg end = FD.end - N = end - beg + N = end-beg BEG = beg - if bins == 1: + if bins==1: END = end NB = N - MOD = 0 + MOD=0 else: - END = N // bins - MOD = N % bins + END = N//bins + MOD = N%bins NB = END - norm = np.zeros([end, len(pixelist)]) - for i in tqdm(range(NB)): + norm = np.zeros( [ end, len(pixelist) ] ) + for i in tqdm( range( NB ) ): if bins == 1: img = FD.rdframe(i + BEG) else: - for j in range(bins): - ct = i * bins + j + BEG - # print(ct) - if j == 0: - img = FD.rdframe(ct) + for j in range( bins): + ct = i * bins + j + BEG + #print(ct) + if j==0: + img = FD.rdframe( ct ) n = 1.0 else: - (p, v) = FD.rdrawframe(ct) - np.ravel(img)[p] += v - # img += FD.rdframe( ct ) + (p,v) = FD.rdrawframe(ct) + np.ravel( img )[p] += v + #img += FD.rdframe( ct ) n += 1 - img /= n - avg_imgf = sgolay2d(img, window_size=window_size, order=order) * mask + img /= n + avg_imgf = sgolay2d( img, window_size= window_size, order= order) * mask normi = np.ravel(avg_imgf)[pixelist] - if bins == 1: - norm[i + beg] = normi + if bins==1: + norm[i+beg] = normi else: - norm[i * bins + beg : (i + 1) * bins + beg] = normi + norm[ i*bins+beg: (i+1)*bins+beg ] = normi if MOD: for j in range(MOD): - ct = (1 + i) * bins + j + BEG - if j == 0: - img = FD.rdframe(ct) + ct = (1+i) * bins + j + BEG + if j==0: + img = FD.rdframe( ct ) n = 1.0 else: - (p, v) = FD.rdrawframe(ct) - np.ravel(img)[p] += v + (p,v) = FD.rdrawframe(ct) + np.ravel( img )[p] += v n += 1 - img /= n - # print(ct,n) - img = FD.rdframe(ct) - avg_imgf = sgolay2d(img, window_size=window_size, order=order) * mask + img /= n + #print(ct,n) + img = FD.rdframe( ct ) + avg_imgf = sgolay2d( img, window_size= window_size, order= order) * mask normi = np.ravel(avg_imgf)[pixelist] - norm[(i + 1) * bins + beg : (i + 2) * bins + beg] = normi + norm[ (i+1)*bins + beg: (i+2)*bins + beg ] = normi return norm - -def shift_mask(new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None): - """Y.G. Dev April 2019@CHX to make a new roi_mask by shift and crop the old roi_mask, which is much bigger than the new mask +def shift_mask( new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None ): + '''Y.G. Dev April 2019@CHX to make a new roi_mask by shift and crop the old roi_mask, which is much bigger than the new mask Input: new_cen: [x,y] in uint of pixel new_mask: provide the shape of the new roi_mask and also multiply this mask to the shifted mask @@ -446,42 +404,29 @@ def shift_mask(new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None): Output: the shifted/croped roi_mask - """ - nsx, nsy = new_mask.shape - down, up, left, right = new_cen[0], nsx - new_cen[0], new_cen[1], nsy - new_cen[1] - x1, x2, y1, y2 = [old_cen[0] - down, old_cen[0] + up, old_cen[1] - left, old_cen[1] + right] - nroi_mask_ = old_roi_mask[x1:x2, y1:y2] * new_mask - nroi_mask = np.zeros_like(nroi_mask_) + ''' + nsx,nsy = new_mask.shape + down, up, left, right = new_cen[0], nsx - new_cen[0], new_cen[1], nsy - new_cen[1] + x1,x2,y1,y2 = [ old_cen[0] - down, old_cen[0] + up , old_cen[1] - left, old_cen[1] + right ] + nroi_mask_ = old_roi_mask[ x1:x2, y1:y2 ] * new_mask + nroi_mask = np.zeros_like( nroi_mask_ ) qind, pixelist = roi.extract_label_indices(nroi_mask_) qu = np.unique(qind) - # noqs = len( qu ) - # nopr = np.bincount(qind, minlength=(noqs+1))[1:] - # qm = nopr>0 + #noqs = len( qu ) + #nopr = np.bincount(qind, minlength=(noqs+1))[1:] + #qm = nopr>0 for j, qv in enumerate(qu): - nroi_mask[nroi_mask_ == qv] = j + 1 - if limit_qnum is not None: - nroi_mask[nroi_mask > limit_qnum] = 0 + nroi_mask[nroi_mask_ == qv] = j +1 + if limit_qnum != None: + nroi_mask[ nroi_mask > limit_qnum ]=0 return nroi_mask -def plot_q_g2fitpara_general( - g2_dict, - g2_fitpara, - geometry="saxs", - ylim=None, - plot_all_range=True, - plot_index_range=None, - show_text=True, - return_fig=False, - show_fit=True, - ylabel="g2", - qth_interest=None, - max_plotnum_fig=1600, - qphi_analysis=False, - *argv, - **kwargs, -): - """ +def plot_q_g2fitpara_general( g2_dict, g2_fitpara, geometry ='saxs', ylim = None, + plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False, + show_fit=True, ylabel='g2', qth_interest = None, max_plotnum_fig=1600,qphi_analysis=False, + *argv,**kwargs): + ''' Mar 29,2019, Y.G.@CHX plot q~fit parameters @@ -499,165 +444,141 @@ def plot_q_g2fitpara_general( Otherwise, power is variable. show_fit:, bool, if False, not show the fit - """ + ''' - if "uid" in kwargs.keys(): - uid_ = kwargs["uid"] + if 'uid' in kwargs.keys(): + uid_ = kwargs['uid'] else: - uid_ = "uid" - if "path" in kwargs.keys(): - path = kwargs["path"] + uid_ = 'uid' + if 'path' in kwargs.keys(): + path = kwargs['path'] else: - path = "" + path = '' data_dir = path - if ylabel == "g2": - ylabel = "g_2" - if ylabel == "g4": - ylabel = "g_4" + if ylabel=='g2': + ylabel='g_2' + if ylabel=='g4': + ylabel='g_4' - if geometry == "saxs": + if geometry =='saxs': if qphi_analysis: - geometry = "ang_saxs" - - qval_dict_, fit_res_ = g2_dict, g2_fitpara - - ( - qr_label, - qz_label, - num_qz, - num_qr, - num_short, - num_long, - short_label, - long_label, - short_ulabel, - long_ulabel, - ind_long, - master_plot, - mastp, - ) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) - fps = [] + geometry = 'ang_saxs' + - # print(qr_label, qz_label, short_ulabel, long_ulabel) - # $print( num_short, num_long ) - beta, relaxation_rate, baseline, alpha = ( - g2_fitpara["beta"], - g2_fitpara["relaxation_rate"], - g2_fitpara["baseline"], - g2_fitpara["alpha"], - ) + qval_dict_, fit_res_ = g2_dict, g2_fitpara + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) fps = [] - for s_ind in range(num_short): - ind_long_i = ind_long[s_ind] - num_long_i = len(ind_long_i) - betai, relaxation_ratei, baselinei, alphai = ( - beta[ind_long_i], - relaxation_rate[ind_long_i], - baseline[ind_long_i], - alpha[ind_long_i], - ) + + #print(qr_label, qz_label, short_ulabel, long_ulabel) + #$print( num_short, num_long ) + beta, relaxation_rate, baseline, alpha = ( g2_fitpara['beta'], + g2_fitpara['relaxation_rate'], + g2_fitpara['baseline'], + g2_fitpara['alpha'] ) + + fps=[] + for s_ind in range( num_short ): + ind_long_i = ind_long[ s_ind ] + num_long_i = len( ind_long_i ) + betai, relaxation_ratei, baselinei, alphai = (beta[ind_long_i], relaxation_rate[ind_long_i], + baseline[ind_long_i], alpha[ind_long_i] ) qi = long_ulabel - # print(s_ind, qi, np.array( betai) ) + #print(s_ind, qi, np.array( betai) ) if RUN_GUI: fig = Figure(figsize=(10, 12)) else: - # fig = plt.figure( ) - if num_long_i <= 4: - if master_plot != "qz": + #fig = plt.figure( ) + if num_long_i <=4: + if master_plot != 'qz': fig = plt.figure(figsize=(8, 6)) else: - if num_short > 1: + if num_short>1: fig = plt.figure(figsize=(8, 4)) else: fig = plt.figure(figsize=(10, 6)) - # print('Here') + #print('Here') elif num_long_i > max_plotnum_fig: - num_fig = int(np.ceil(num_long_i / max_plotnum_fig)) # num_long_i //16 - fig = [plt.figure(figsize=figsize) for i in range(num_fig)] - # print( figsize ) + num_fig = int(np.ceil(num_long_i/max_plotnum_fig)) #num_long_i //16 + fig = [ plt.figure(figsize=figsize) for i in range(num_fig) ] + #print( figsize ) else: - # print('Here') - if master_plot != "qz": + #print('Here') + if master_plot != 'qz': fig = plt.figure(figsize=figsize) else: fig = plt.figure(figsize=(10, 10)) - if master_plot == "qz": - if geometry == "ang_saxs": - title_short = "Angle= %.2f" % (short_ulabel[s_ind]) + r"$^\circ$" - elif geometry == "gi_saxs": - title_short = r"$Q_z= $" + "%.4f" % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + if master_plot == 'qz': + if geometry=='ang_saxs': + title_short = 'Angle= %.2f'%( short_ulabel[s_ind] ) + r'$^\circ$' + elif geometry=='gi_saxs': + title_short = r'$Q_z= $' + '%.4f'%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' else: - title_short = "" - else: # qr - if geometry == "ang_saxs" or geometry == "gi_saxs": - title_short = r"$Q_r= $" + "%.5f " % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + title_short = '' + else: #qr + if geometry=='ang_saxs' or geometry=='gi_saxs': + title_short = r'$Q_r= $' + '%.5f '%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' else: - title_short = "" - # print(geometry) - # filename ='' - til = "%s:--->%s" % (uid_, title_short) - if num_long_i <= 4: - plt.title(til, fontsize=14, y=1.15) + title_short='' + #print(geometry) + #filename ='' + til = '%s:--->%s'%(uid_, title_short ) + if num_long_i <=4: + plt.title( til,fontsize= 14, y =1.15) else: - plt.title(til, fontsize=20, y=1.06) - # print( num_long ) - if num_long != 1: - # print( 'here') - plt.axis("off") - # sy = min(num_long_i,4) - sy = min(num_long_i, int(np.ceil(min(max_plotnum_fig, num_long_i) / 4))) + plt.title( til,fontsize=20, y =1.06) + #print( num_long ) + if num_long!=1: + #print( 'here') + plt.axis('off') + #sy = min(num_long_i,4) + sy = min(num_long_i, int( np.ceil( min(max_plotnum_fig,num_long_i)/4)) ) else: - sy = 1 - sx = min(4, int(np.ceil(min(max_plotnum_fig, num_long_i) / float(sy)))) + sy =1 + sx = min(4, int( np.ceil( min(max_plotnum_fig,num_long_i)/float(sy) ) )) temp = sy sy = sx sx = temp - if sx == 1: - if sy == 1: - plt.axis("on") - ax1 = fig.add_subplot(4, 1, 1) - ax2 = fig.add_subplot(4, 1, 2) - ax3 = fig.add_subplot(4, 1, 3) - ax4 = fig.add_subplot(4, 1, 4) - plot1D(x=qi, y=betai, m="o", ls="--", c="k", ax=ax1, legend=r"$\beta$", title="") - plot1D(x=qi, y=alphai, m="o", ls="--", c="r", ax=ax2, legend=r"$\alpha$", title="") - plot1D(x=qi, y=baselinei, m="o", ls="--", c="g", ax=ax3, legend=r"$baseline$", title="") - plot1D(x=qi, y=relaxation_ratei, m="o", c="b", ls="--", ax=ax4, legend=r"$\gamma$ $(s^{-1})$", title="") - - ax4.set_ylabel(r"$\gamma$ $(s^{-1})$") + if sx==1: + if sy==1: + plt.axis('on') + ax1 = fig.add_subplot( 4,1,1 ) + ax2 = fig.add_subplot( 4,1,2 ) + ax3 = fig.add_subplot( 4,1,3 ) + ax4 = fig.add_subplot( 4,1,4 ) + plot1D(x=qi, y=betai, m='o', ls='--', c='k', ax=ax1, legend=r'$\beta$', title='') + plot1D(x=qi, y=alphai, m='o', ls='--',c='r', ax=ax2, legend=r'$\alpha$', title='') + plot1D(x=qi, y=baselinei, m='o', ls='--', c='g', ax=ax3, legend=r'$baseline$', title='') + plot1D(x=qi, y=relaxation_ratei, m='o', c='b', ls='--', ax=ax4, legend= r'$\gamma$ $(s^{-1})$' , title='') + + ax4.set_ylabel( r'$\gamma$ $(s^{-1})$' ) ax4.set_xlabel(r"$q $ $(\AA)$", fontsize=16) - ax3.set_ylabel(r"$baseline") - ax2.set_ylabel(r"$\alpha$") - ax1.set_ylabel(r"$\beta$") + ax3.set_ylabel( r'$baseline' ) + ax2.set_ylabel( r'$\alpha$' ) + ax1.set_ylabel( r'$\beta$' ) fig.tight_layout() - fp = data_dir + uid_ + "g2_q_fit_para_%s.png" % short_ulabel[s_ind] - fig.savefig(fp, dpi=fig.dpi) + fp = data_dir + uid_ + 'g2_q_fit_para_%s.png'%short_ulabel[s_ind] + fig.savefig( fp , dpi=fig.dpi) fps.append(fp) - outputfile = data_dir + "%s_g2_q_fitpara_plot" % uid_ + ".png" - # print(uid) - combine_images(fps, outputfile, outsize=[2000, 2400]) - - -def plot_q_rate_general( - qval_dict, - rate, - geometry="saxs", - ylim=None, - logq=True, - lograte=True, - plot_all_range=True, - plot_index_range=None, - show_text=True, - return_fig=False, - show_fit=True, - *argv, - **kwargs, -): - """ + outputfile = data_dir + '%s_g2_q_fitpara_plot'%uid_ + '.png' + #print(uid) + combine_images( fps, outputfile, outsize= [ 2000,2400 ] ) + + + + + +def plot_q_rate_general( qval_dict, rate, geometry ='saxs', ylim = None, logq=True, lograte=True, + plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False, + show_fit=True, + *argv,**kwargs): + ''' Mar 29,2019, Y.G.@CHX plot q~rate in log-log scale @@ -675,88 +596,64 @@ def plot_q_rate_general( Otherwise, power is variable. show_fit:, bool, if False, not show the fit - """ + ''' - if "uid" in kwargs.keys(): - uid = kwargs["uid"] + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] else: - uid = "uid" - if "path" in kwargs.keys(): - path = kwargs["path"] + uid = 'uid' + if 'path' in kwargs.keys(): + path = kwargs['path'] else: - path = "" - ( - qr_label, - qz_label, - num_qz, - num_qr, - num_short, - num_long, - short_label, - long_label, - short_ulabel, - long_ulabel, - ind_long, - master_plot, - mastp, - ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) - - fig, ax = plt.subplots() - plt.title(r"$Q$" "-Rate-%s" % (uid), fontsize=20, y=1.06) + path = '' + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + fig,ax = plt.subplots() + plt.title(r'$Q$''-Rate-%s'%(uid),fontsize=20, y =1.06) Nqz = num_short - if Nqz != 1: - ls = "--" + if Nqz!=1: + ls = '--' else: - ls = "" - # print(Nqz) - for i in range(Nqz): - ind_long_i = ind_long[i] - y = np.array(rate)[ind_long_i] - x = long_label[ind_long_i] - # print(i, x, y, D0 ) - if Nqz != 1: - label = r"$q_z=%.5f$" % short_ulabel[i] + ls='' + #print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[ i ] + y = np.array( rate )[ind_long_i] + x = long_label[ind_long_i] + #print(i, x, y, D0 ) + if Nqz!=1: + label=r'$q_z=%.5f$'%short_ulabel[i] else: - label = "" - ax.loglog(x, y, marker="o", ls=ls, label=label) - if Nqz != 1: - legend = ax.legend(loc="best") - - if plot_index_range is not None: - d1, d2 = plot_index_range - d2 = min(len(x) - 1, d2) - ax.set_xlim((x**power)[d1], (x**power)[d2]) - ax.set_ylim(y[d1], y[d2]) - - if ylim is not None: - ax.set_ylim(ylim) - - ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$) (log)") - ax.set_xlabel("$q$" r"($\AA$) (log)") - fp = path + "%s_Q_Rate_loglog" % (uid) + ".png" - fig.savefig(fp, dpi=fig.dpi) + label='' + ax.loglog(x, y, marker = 'o', ls =ls, label=label) + if Nqz!=1:legend = ax.legend(loc='best') + + if plot_index_range != None: + d1,d2 = plot_index_range + d2 = min( len(x)-1, d2 ) + ax.set_xlim( (x**power)[d1], (x**power)[d2] ) + ax.set_ylim( y[d1],y[d2]) + + if ylim != None: + ax.set_ylim( ylim ) + + ax.set_ylabel('Relaxation rate 'r'$\gamma$'"($s^{-1}$) (log)") + ax.set_xlabel("$q$"r'($\AA$) (log)') + fp = path + '%s_Q_Rate_loglog'%(uid) + '.png' + fig.savefig( fp, dpi=fig.dpi) fig.tight_layout() if return_fig: - return fig, ax - - -def plot_xy_x2( - x, - y, - x2=None, - pargs=None, - loglog=False, - logy=True, - fig_ax=None, - xlabel="q (" r"$\AA^{-1}$)", - xlabel2="q (pixel)", - title="_q_Iq", - ylabel="I(q)", - save=True, - *argv, - **kwargs, -): - """YG.@CHX 2019/10/ Plot x, y, x2, if have, will plot as twiny( same y, different x) + return fig,ax + + + +def plot_xy_x2( x, y, x2=None, pargs=None, loglog=False, logy=True, fig_ax=None, + xlabel= 'q ('r'$\AA^{-1}$)', xlabel2='q (pixel)', title= '_q_Iq', + ylabel = 'I(q)',save=True, *argv,**kwargs): + '''YG.@CHX 2019/10/ Plot x, y, x2, if have, will plot as twiny( same y, different x) This funciton is primary for plot q-Iq Input: @@ -768,100 +665,111 @@ def plot_xy_x2( save: if True, save the plot in the path defined in pargs kwargs: could include xlim (in unit of index), ylim (in unit of real value) - """ - if fig_ax is None: + ''' + if fig_ax == None: fig, ax1 = plt.subplots() else: - fig, ax1 = fig_ax - if pargs is not None: - uid = pargs["uid"] - path = pargs["path"] + fig,ax1=fig_ax + if pargs != None: + uid = pargs['uid'] + path = pargs['path'] else: - uid = "XXX" - path = "" + uid='XXX' + path='' if loglog: - ax1.loglog(x, y, "-o") + ax1.loglog( x,y, '-o') elif logy: - ax1.semilogy(x, y, "-o") + ax1.semilogy( x,y, '-o') else: - ax1.plot(x, y, "-o") - ax1.set_xlabel(xlabel) - ax1.set_ylabel(ylabel) - title = ax1.set_title("%s--" % uid + title) - Nx = len(x) - if "xlim" in kwargs.keys(): - xlim = kwargs["xlim"] - if xlim[1] > Nx: - xlim[1] = Nx - 1 + ax1.plot( x,y, '-o') + ax1.set_xlabel( xlabel ) + ax1.set_ylabel( ylabel ) + title = ax1.set_title( '%s--'%uid + title) + Nx= len(x) + if 'xlim' in kwargs.keys(): + xlim = kwargs['xlim'] + if xlim[1]>Nx: + xlim[1]=Nx-1 else: - xlim = [0, Nx] - if "ylim" in kwargs.keys(): - ylim = kwargs["ylim"] + xlim=[ 0, Nx] + if 'ylim' in kwargs.keys(): + ylim = kwargs['ylim'] else: - ylim = [y.min(), y.max()] - lx1, lx2 = xlim - ax1.set_xlim([x[lx1], x[lx2]]) - ax1.set_ylim(ylim) - if x2 is not None: + ylim=[y.min(), y.max()] + lx1,lx2=xlim + ax1.set_xlim( [ x[lx1], x[lx2] ] ) + ax1.set_ylim( ylim ) + if x2 != None: ax2 = ax1.twiny() - ax2.set_xlabel(xlabel2) - ax2.set_ylabel(ylabel) - ax2.set_xlim([x2[lx1], x2[lx2]]) + ax2.set_xlabel( xlabel2 ) + ax2.set_ylabel( ylabel ) + ax2.set_xlim( [ x2[lx1], x2[lx2] ] ) title.set_y(1.1) fig.subplots_adjust(top=0.85) if save: - path = pargs["path"] - fp = path + "%s_q_Iq" % uid + ".png" - fig.savefig(fp, dpi=fig.dpi) + path = pargs['path'] + fp = path + '%s_q_Iq'%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) -def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1, threshold=0): - """save oavs as png""" - tifs = list(db[uid].data("OAV_image"))[0] + + +def save_oavs_tifs( uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1,threshold = 0 ): + '''save oavs as png''' + tifs = list( db[uid].data( 'OAV_image') )[0] try: - pixel_scalebar = np.ceil(scalebar_size / md["OAV resolution um_pixel"]) + pixel_scalebar=np.ceil(scalebar_size/md['OAV resolution um_pixel']) except: - pixel_scalebar = None - print("No OAVS resolution is available.") + pixel_scalebar=None + print('No OAVS resolution is available.') - text_string = "%s $\mu$m" % scalebar_size + text_string='%s $\mu$m'%scalebar_size h = db[uid] - oavs = tifs - - oav_period = h["descriptors"][0]["configuration"]["OAV"]["data"]["OAV_cam_acquire_period"] - oav_expt = h["descriptors"][0]["configuration"]["OAV"]["data"]["OAV_cam_acquire_time"] - oav_times = [] + oavs=tifs + + # 12/03/2023: have a problem with OAV not being detector [0]...just try and go throught the list + detectors = sorted(get_detectors(h)) + for d in range(len(detectors)): + try: + oav_period=h['descriptors'][d]['configuration']['OAV']['data']['OAV_cam_acquire_period'] + oav_expt=h['descriptors'][d]['configuration']['OAV']['data']['OAV_cam_acquire_time'] + except: + pass + oav_times=[] for i in range(len(oavs)): - oav_times.append(oav_expt + i * oav_period) - fig = plt.subplots(int(np.ceil(len(oavs) / 3)), 3, figsize=(3 * 5.08, int(np.ceil(len(oavs) / 3)) * 4)) + oav_times.append(oav_expt+i*oav_period) + fig=plt.subplots(int(np.ceil(len(oavs)/3)),3,figsize=(3*5.08,int(np.ceil(len(oavs)/3))*4)) for m in range(len(oavs)): - plt.subplot(int(np.ceil(len(oavs) / 3)), 3, m + 1) - # plt.subplots(figsize=(5.2,4)) + plt.subplot(int(np.ceil(len(oavs)/3)),3,m+1) + #plt.subplots(figsize=(5.2,4)) img = oavs[m] try: - ind = np.flipud(img * scale)[:, :, 2] < threshold + ind = np.flipud(img*scale)[:,:,2] < threshold except: - ind = np.flipud(img * scale) < threshold - rgb_cont_img = np.copy(np.flipud(img)) - # rgb_cont_img[ind,0]=1000 - if brightness_scale != 1: - rgb_cont_img = scale_rgb(rgb_cont_img, scale=brightness_scale) - - plt.imshow(rgb_cont_img, interpolation="none", resample=True, cmap="gray") - plt.axis("equal") - cross = [685, 440, 50] # definintion of direct beam: x, y, size - plt.plot([cross[0] - cross[2] / 2, cross[0] + cross[2] / 2], [cross[1], cross[1]], "r-") - plt.plot([cross[0], cross[0]], [cross[1] - cross[2] / 2, cross[1] + cross[2] / 2], "r-") - if pixel_scalebar is not None: - plt.plot([1100, 1100 + pixel_scalebar], [150, 150], "r-", Linewidth=5) # scale bar. - plt.text(1000, 50, text_string, fontsize=14, color="r") - plt.text(600, 50, str(oav_times[m])[:5] + " [s]", fontsize=14, color="r") - plt.axis("off") - plt.savefig(data_dir + "uid=%s_OVA_images.png" % uid) - - -def shift_mask_old(mask, shiftx, shifty): - """YG Dev Feb 4@CHX create new mask by shift mask in x and y direction with unit in pixel + ind = np.flipud(img*scale) < threshold + rgb_cont_img=np.copy(np.flipud(img)) + #rgb_cont_img[ind,0]=1000 + if brightness_scale !=1: + rgb_cont_img=scale_rgb(rgb_cont_img,scale=brightness_scale) + + plt.imshow(rgb_cont_img,interpolation='none',resample=True, cmap = 'gray') + plt.axis('equal') + cross=[685,440,50] # definintion of direct beam: x, y, size + plt.plot([cross[0]-cross[2]/2,cross[0]+cross[2]/2],[cross[1],cross[1]],'r-') + plt.plot([cross[0],cross[0]],[cross[1]-cross[2]/2,cross[1]+cross[2]/2],'r-') + if pixel_scalebar != None: + plt.plot([1100,1100+pixel_scalebar],[150,150],'r-',Linewidth=5) # scale bar. + plt.text(1000,50,text_string,fontsize=14,color='r') + plt.text(600,50,str(oav_times[m])[:5]+' [s]',fontsize=14,color='r') + plt.axis('off') + plt.savefig( data_dir + 'uid=%s_OVA_images.png'%uid) + + + + + +def shift_mask_old( mask, shiftx, shifty): + '''YG Dev Feb 4@CHX create new mask by shift mask in x and y direction with unit in pixel Input: mask: int-type array, shiftx: int scalar, shift value in x direction with unit in pixel @@ -869,110 +777,102 @@ def shift_mask_old(mask, shiftx, shifty): Output: maskn: int-type array, shifted mask - """ - qind, pixelist = roi.extract_label_indices(mask) + ''' + qind, pixelist = roi.extract_label_indices( mask ) dims = mask.shape - imgwidthy = dims[1] # dimension in y, but in plot being x - imgwidthx = dims[0] # dimension in x, but in plot being y - pixely = pixelist % imgwidthy - pixelx = pixelist // imgwidthy - pixelyn = pixely + shiftx - pixelxn = pixelx + shifty - w = (pixelyn < imgwidthy) & (pixelyn >= 0) & (pixelxn < imgwidthx) & (pixelxn >= 0) + imgwidthy = dims[1] #dimension in y, but in plot being x + imgwidthx = dims[0] #dimension in x, but in plot being y + pixely = pixelist%imgwidthy + pixelx = pixelist//imgwidthy + pixelyn = pixely + shiftx + pixelxn = pixelx + shifty + w = (pixelyn < imgwidthy ) & (pixelyn >= 0 ) & (pixelxn < imgwidthx ) & (pixelxn >= 0 ) pixelist_new = pixelxn[w] * imgwidthy + pixelyn[w] - maskn = np.zeros_like(mask) + maskn = np.zeros_like( mask ) maskn.ravel()[pixelist_new] = qind[w] return maskn def get_current_time(): - """get current time in a fomart of year/month/date/hour(24)/min/sec/, - e.g. 2009-01-05 22:14:39 - """ - loc_dt = datetime.datetime.now(pytz.timezone("US/Eastern")) + '''get current time in a fomart of year/month/date/hour(24)/min/sec/, + e.g. 2009-01-05 22:14:39 + ''' + loc_dt = datetime.datetime.now(pytz.timezone('US/Eastern')) fmt = "%Y-%m-%d %H:%M:%S" - return loc_dt.strftime(fmt) + return loc_dt.strftime(fmt) -def evalue_array(array, verbose=True): - """Y.G., Dev Nov 1, 2018 Get min, max, avg, std of an array""" - _min, _max, avg, std = np.min(array), np.max(array), np.average(array), np.std(array) + +def evalue_array( array, verbose = True ): + '''Y.G., Dev Nov 1, 2018 Get min, max, avg, std of an array ''' + _min, _max, avg, std = np.min( array), np.max( array), np.average( array ), np.std( array ) if verbose: - print( - "The min, max, avg, std of this array are: %s %s %s %s, respectively." % (_min, _max, avg, std) - ) - return _min, _max, avg, std + print( 'The min, max, avg, std of this array are: %s %s %s %s, respectively.'%(_min, _max, avg, std ) ) + return _min, _max, avg, std -def find_good_xpcs_uids(fuids, Nlim=100, det=["4m", "1m", "500"]): - """Y.G., Dev Nov 1, 2018 Find the good xpcs series - Input: - fuids: list, a list of full uids - Nlim: integer, the smallest number of images to be considered as XCPS sereis - det: list, a list of detector (can be short string of the full name of the detector) - Return: - the xpcs uids list - """ +def find_good_xpcs_uids( fuids, Nlim=100, det = [ '4m', '1m', '500'] ): + '''Y.G., Dev Nov 1, 2018 Find the good xpcs series + Input: + fuids: list, a list of full uids + Nlim: integer, the smallest number of images to be considered as XCPS sereis + det: list, a list of detector (can be short string of the full name of the detector) + Return: + the xpcs uids list + + ''' guids = [] for i, uid in enumerate(fuids): - if db[uid]["start"]["plan_name"] == "count" or db[uid]["start"]["plan_name"] == "manual_count": - head = db[uid]["start"] - for dec in head["detectors"]: + if db[uid]['start']['plan_name'] == 'count' or db[uid]['start']['plan_name'] == 'manual_count': + head = db[uid]['start'] + for dec in head['detectors']: for dt in det: if dt in dec: - if "number of images" in head: - if float(head["number of images"]) >= Nlim: - # print(i, uid) + if 'number of images' in head: + if float(head['number of images'] ) >= Nlim: + #print(i, uid) guids.append(uid) - G = np.unique(guids) - print("Found %s uids for XPCS series." % len(G)) + G = np.unique( guids ) + print('Found %s uids for XPCS series.'%len(G) ) return G -def create_fullImg_with_box( - shape, - box_nx=9, - box_ny=8, -): - """Y.G. 2018/10/26 Divide image with multi touched boxes +def create_fullImg_with_box( shape, box_nx = 9 , box_ny = 8, ): + '''Y.G. 2018/10/26 Divide image with multi touched boxes Input shape: the shape of image box_nx: the number of box in x box_ny: the number width of box in y Return: roi_mask, (* mask ) - """ + ''' - # shape = mask.shape - Wrow, Wcol = int(np.ceil(shape[0] / box_nx)), int(np.ceil(shape[1] / box_ny)) - # print(Wrow, Wcol) - roi_mask = np.zeros(shape, dtype=np.int32) - for i in range(box_nx): + #shape = mask.shape + Wrow, Wcol = int( np.ceil( shape[0]/box_nx )), int(np.ceil(shape[1]/box_ny) ) + #print(Wrow, Wcol) + roi_mask = np.zeros( shape, dtype=np.int32 ) + for i in range( box_nx ): for j in range(box_ny): - roi_mask[i * Wrow : (i + 1) * Wrow, j * Wcol : (j + 1) * Wcol] = i * box_ny + j + 1 - # roi_mask *= mask + roi_mask[ i*Wrow: (i+1)*Wrow , j*Wcol: (j+1)*Wcol ] = i * box_ny + j + 1 + #roi_mask *= mask return roi_mask -def get_refl_y0( - inc_ang, - inc_y0, - Ldet, - pixel_size, -): - """Get reflection beam center y + +def get_refl_y0( inc_ang, inc_y0, Ldet, pixel_size, ): + ''' Get reflection beam center y Input: inc_ang: incident angle in degree inc_y0: incident beam y center in pixel Ldet: sample to detector distance in meter pixel_size: pixel size in meter Return: reflection beam center y in pixel - """ - return Ldet * np.tan(np.radians(inc_ang)) * 2 / pixel_size + inc_y0 + ''' + return Ldet * np.tan( np.radians(inc_ang)) * 2 / pixel_size + inc_y0 -def lin2log_g2(lin_tau, lin_g2, num_points=False): +def lin2log_g2(lin_tau,lin_g2,num_points=False): """ Lutz developed at Aug,2018 function to resample g2 with linear time steps into logarithmics @@ -981,95 +881,85 @@ def lin2log_g2(lin_tau, lin_g2, num_points=False): num_points=False -> determine number of logortihmically sampled time points automatically (8 pts./decade) num_points=18 -> use 18 logarithmically spaced time points """ - # prep taus and g2s: remove nan and first data point at tau=0 - rem = lin_tau == 0 - # print('lin_tau: '+str(lin_tau.size)) - # print('lin_g2: '+str(lin_g2.size)) - lin_tau[rem] = np.nan - # lin_tau[0]=np.nan;#lin_g2[0]=np.nan + #prep taus and g2s: remove nan and first data point at tau=0 + rem = lin_tau==0 + #print('lin_tau: '+str(lin_tau.size)) + #print('lin_g2: '+str(lin_g2.size)) + lin_tau[rem]=np.nan + #lin_tau[0]=np.nan;#lin_g2[0]=np.nan lin_g2 = lin_g2[np.isfinite(lin_tau)] lin_tau = lin_tau[np.isfinite(lin_tau)] - # print('from lin-to-log-g2_sampling: ',lin_tau) + #print('from lin-to-log-g2_sampling: ',lin_tau) if num_points == False: # automatically decide how many log-points (8/decade) - dec = int(np.ceil((np.log10(lin_tau.max()) - np.log10(lin_tau.min())) * 8)) + dec=int(np.ceil((np.log10(lin_tau.max())-np.log10(lin_tau.min()))*8)) else: - dec = int(num_points) - log_tau = np.logspace(np.log10(lin_tau[0]), np.log10(lin_tau.max()), dec) + dec=int(num_points) + log_tau=np.logspace(np.log10(lin_tau[0]),np.log10(lin_tau.max()),dec) # re-sample correlation function: - log_g2 = [] - for i in range(log_tau.size - 1): - y = [i, log_tau[i] - (log_tau[i + 1] - log_tau[i]) / 2, log_tau[i] + (log_tau[i + 1] - log_tau[i]) / 2] - # x=lin_tau[lin_tau>y[1]] - x1 = lin_tau > y[1] - x2 = lin_tau < y[2] - x = x1 * x2 - # print(np.average(lin_g2[x])) + log_g2=[] + for i in range(log_tau.size-1): + y=[i,log_tau[i]-(log_tau[i+1]-log_tau[i])/2,log_tau[i]+(log_tau[i+1]-log_tau[i])/2] + #x=lin_tau[lin_tau>y[1]] + x1=lin_tau>y[1]; x2=lin_tau y[1] - x2 = lin_tau < y[2] - x = x1 * x2 + log_g2.append(np.interp(log_tau[i],lin_tau,lin_g2)) + if i == log_tau.size-2: + #print(log_tau[i+1]) + y=[i+1,log_tau[i+1]-(log_tau[i+1]-log_tau[i])/2,log_tau[i+1]] + x1=lin_tau>y[1]; x2=lin_tau 0 else False - # Normalize values first: ym = (y - np.min(y)) / (np.max(y) - np.min(y)) - shift # roots are at Y=0 positive = is_positive(ym[0]) @@ -1155,77 +1034,83 @@ def is_positive(num): list_of_roots.append(x[i - 1] + (x[i] - x[i - 1]) / (abs(ym[i]) + abs(ym[i - 1])) * abs(ym[i - 1])) positive = not positive if len(list_of_roots) >= 2: - FWHM = abs(list_of_roots[-1] - list_of_roots[0]) - CEN = list_of_roots[0] + 0.5 * (list_of_roots[1] - list_of_roots[0]) - ps.fwhm = FWHM - ps.cen = CEN - yf = ym - # return { + FWHM=abs(list_of_roots[-1] - list_of_roots[0]) + CEN=list_of_roots[0]+0.5*(list_of_roots[1]-list_of_roots[0]) + ps.fwhm=FWHM + ps.cen=CEN + yf=ym + #return { # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), # 'x_range': list_of_roots, - # } - else: # ok, maybe it's a step function.. - # print('no peak...trying step function...') + #} + else: # ok, maybe it's a step function.. + #print('no peak...trying step function...') ym = ym + shift + def err_func(x, x0, k=2, A=1, base=0 ): #### erf fit from Yugang + return base - A * erf(k*(x-x0)) + mod = Model( err_func ) + ### estimate starting values: + x0=np.mean(x) + #k=0.1*(np.max(x)-np.min(x)) + pars = mod.make_params( x0=x0, k=2, A = 1., base = 0. ) + result = mod.fit(ym, pars, x = x ) + CEN=result.best_values['x0'] + FWHM = result.best_values['k'] + A = result.best_values['A'] + b = result.best_values['base'] + yf_ = err_func(x, CEN, k=FWHM, A=A, base=b ) #result.best_fit + yf = (yf_ ) * (np.max(y) - np.min(y)) + np.min(y) - def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang - return base - A * erf(k * (x - x0)) + #(y - np.min(y)) / (np.max(y) - np.min(y)) - shift - mod = Model(err_func) - ### estimate starting values: - x0 = np.mean(x) - # k=0.1*(np.max(x)-np.min(x)) - pars = mod.make_params(x0=x0, k=2, A=1.0, base=0.0) - result = mod.fit(ym, pars, x=x) - CEN = result.best_values["x0"] - FWHM = result.best_values["k"] - A = result.best_values["A"] - b = result.best_values["base"] - yf_ = err_func(x, CEN, k=FWHM, A=A, base=b) # result.best_fit - yf = (yf_) * (np.max(y) - np.min(y)) + np.min(y) - - # (y - np.min(y)) / (np.max(y) - np.min(y)) - shift ps.cen = CEN ps.fwhm = FWHM if replot: ### re-plot results: - if logplot == "on": - fig, ax = plt.subplots() # plt.figure() - ax.semilogy([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") + if logplot=='on': + fig, ax = plt.subplots() #plt.figure() + ax.semilogy([PEAK,PEAK],[np.min(y),np.max(y)],'k--',label='PEAK') ax.hold(True) - ax.semilogy([CEN, CEN], [np.min(y), np.max(y)], "r-.", label="CEN") - ax.semilogy([COM, COM], [np.min(y), np.max(y)], "g.-.", label="COM") - ax.semilogy(x, y, "bo-") - # plt.xlabel(field);plt.ylabel(intensity_field) + ax.semilogy([CEN,CEN],[np.min(y),np.max(y)],'r-.',label='CEN') + ax.semilogy([COM,COM],[np.min(y),np.max(y)],'g.-.',label='COM') + ax.semilogy(x,y,'bo-') + #plt.xlabel(field);plt.ylabel(intensity_field) ax.legend() - # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) - # plt.show() + #plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + #plt.show() else: - # plt.close(999) - fig, ax = plt.subplots() # plt.figure() - ax.plot([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") + #plt.close(999) + fig, ax = plt.subplots() #plt.figure() + ax.plot([PEAK,PEAK],[np.min(y),np.max(y)],'k--',label='PEAK') - # ax.hold(True) - ax.plot([CEN, CEN], [np.min(y), np.max(y)], "m-.", label="CEN") - ax.plot([COM, COM], [np.min(y), np.max(y)], "g.-.", label="COM") - ax.plot(x, y, "bo--") - ax.plot(x, yf, "r-", label="Fit") + #ax.hold(True) + ax.plot([CEN,CEN],[np.min(y),np.max(y)],'m-.',label='CEN') + ax.plot([COM,COM],[np.min(y),np.max(y)],'g.-.',label='COM') + ax.plot(x,y,'bo--') + ax.plot(x,yf,'r-', label='Fit') - # plt.xlabel(field);plt.ylabel(intensity_field) + #plt.xlabel(field);plt.ylabel(intensity_field) ax.legend() - # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) - # plt.show() + #plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + #plt.show() ### assign values of interest as function attributes: - ps.peak = PEAK - ps.com = COM + ps.peak=PEAK + ps.com=COM return ps.cen -def create_seg_ring(ring_edges, ang_edges, mask, setup_pargs): - """YG Dev April 6, 2018 + + + + + + + +def create_seg_ring( ring_edges, ang_edges, mask, setup_pargs ): + '''YG Dev April 6, 2018 Create segment ring mask Input: ring_edges: edges of rings (in pixel), e.g., [ [320,340], [450, 460], ] @@ -1245,59 +1130,49 @@ def create_seg_ring(ring_edges, ang_edges, mask, setup_pargs): roi_mask: segmented ring mask: two-D array qval_dict: dict, key as q-number, val: q val - """ + ''' + + roi_mask_qr, qr, qr_edge = get_ring_mask(mask, inner_radius= None, outer_radius = None, + width = None, num_rings = None, edges= np.array( ring_edges), unit='pixel', + pargs= setup_pargs) - roi_mask_qr, qr, qr_edge = get_ring_mask( - mask, - inner_radius=None, - outer_radius=None, - width=None, - num_rings=None, - edges=np.array(ring_edges), - unit="pixel", - pargs=setup_pargs, - ) - - roi_mask_ang, ang_center, ang_edge = get_angular_mask( - mask, - inner_angle=None, - outer_angle=None, - width=None, - edges=np.array(ang_edges), - num_angles=None, - center=center, - flow_geometry=False, - ) - - roi_mask, good_ind = combine_two_roi_mask(roi_mask_qr, roi_mask_ang, pixel_num_thres=100) - qval_dict_ = get_qval_dict(qr_center=qr, qz_center=ang_center, one_qz_multi_qr=False) - qval_dict = {i: qval_dict_[k] for (i, k) in enumerate(good_ind)} + roi_mask_ang, ang_center, ang_edge = get_angular_mask( mask, inner_angle= None, + outer_angle = None, width = None, edges = np.array( ang_edges ), + num_angles = None, center = center, flow_geometry= False ) + + + roi_mask, good_ind = combine_two_roi_mask( roi_mask_qr, roi_mask_ang,pixel_num_thres=100) + qval_dict_ = get_qval_dict( qr_center = qr, qz_center = ang_center,one_qz_multi_qr=False) + qval_dict = { i:qval_dict_[k] for (i,k) in enumerate( good_ind) } return roi_mask, qval_dict -def find_bad_pixels_FD(bad_frame_list, FD, img_shape=[514, 1030], threshold=15, show_progress=True): - """Designed to find bad pixel list in 500K - threshold: the max intensity in 5K - """ - bad = np.zeros(img_shape, dtype=bool) - if show_progress: - for i in tqdm(bad_frame_list[bad_frame_list >= FD.beg]): - p, v = FD.rdrawframe(i) - w = np.where(v > threshold)[0] - bad.ravel()[p[w]] = 1 + + +def find_bad_pixels_FD( bad_frame_list, FD, img_shape = [514, 1030], + threshold= 15, show_progress=True): + '''Designed to find bad pixel list in 500K + threshold: the max intensity in 5K + ''' + bad = np.zeros( img_shape, dtype=bool ) + if show_progress: + for i in tqdm(bad_frame_list[ bad_frame_list>=FD.beg]): + p,v = FD.rdrawframe(i) + w = np.where( v > threshold)[0] + bad.ravel()[ p[w] ] = 1 # x,y = np.where( imgsa[i] > threshold) # bad[x[0],y[0]] = 1 else: - for i in bad_frame_list[bad_frame_list >= FD.beg]: - p, v = FD.rdrawframe(i) - w = np.where(v > threshold)[0] - bad.ravel()[p[w]] = 1 + for i in bad_frame_list[ bad_frame_list>=FD.beg]: + p,v = FD.rdrawframe(i) + w = np.where( v > threshold)[0] + bad.ravel()[ p[w] ] = 1 return ~bad -def get_q_iq_using_dynamic_mask(FD, mask, setup_pargs, bin_number=1, threshold=15): - """DEV by Yugang@CHX, June 6, 2019 +def get_q_iq_using_dynamic_mask( FD, mask, setup_pargs, bin_number=1, threshold=15 ): + '''DEV by Yugang@CHX, June 6, 2019 Get circular average of a time series using a dynamics mask, which pixel values are defined as zeors if above a threshold. Return an averaged q(pix)-Iq-q(A-1) of the whole time series using bin frames with bin_number @@ -1314,55 +1189,58 @@ def get_q_iq_using_dynamic_mask(FD, mask, setup_pargs, bin_number=1, threshold=1 qp_saxs: q in pixel iq_saxs: intenstity q_saxs: q in A-1 - """ + ''' beg = FD.beg end = FD.end shape = FD.rdframe(beg).shape - Nimg_ = FD.end - FD.beg - # Nimg_ = 100 - Nimg = Nimg_ // bin_number - time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bin_number)) + beg - for n in tqdm(range(Nimg)): - t1, t2 = time_edge[n] - # print(t1,t2) - if bin_number == 1: + Nimg_ = FD.end-FD.beg + #Nimg_ = 100 + Nimg = Nimg_//bin_number + time_edge = np.array(create_time_slice( N= Nimg_, + slice_num= Nimg, slice_width= bin_number )) + beg + for n in tqdm( range(Nimg) ): + t1,t2 = time_edge[n] + #print(t1,t2) + if bin_number==1: avg_imgi = FD.rdframe(t1) else: - avg_imgi = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False, show_progress=False) - badpi = find_bad_pixels_FD( - np.arange(t1, t2), FD, img_shape=avg_imgi.shape, threshold=threshold, show_progress=False - ) - img = avg_imgi * mask * badpi - qp_saxsi, iq_saxsi, q_saxsi = get_circular_average(img, mask * badpi, save=False, pargs=setup_pargs) - # print( img.max()) - if t1 == FD.beg: - qp_saxs, iq_saxs, q_saxs = np.zeros_like(qp_saxsi), np.zeros_like(iq_saxsi), np.zeros_like(q_saxsi) + avg_imgi = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1, + plot_ = False,show_progress= False) + badpi = find_bad_pixels_FD( np.arange(t1,t2) , FD, + img_shape = avg_imgi.shape, threshold= threshold, show_progress=False ) + img = avg_imgi* mask * badpi + qp_saxsi, iq_saxsi, q_saxsi = get_circular_average( img, + mask * badpi, save= False, + pargs=setup_pargs ) + #print( img.max()) + if t1==FD.beg: + qp_saxs, iq_saxs, q_saxs = np.zeros_like( qp_saxsi ), np.zeros_like( iq_saxsi ), np.zeros_like( q_saxsi ) qp_saxs += qp_saxsi iq_saxs += iq_saxsi - q_saxs += q_saxsi + q_saxs += q_saxsi qp_saxs /= Nimg iq_saxs /= Nimg q_saxs /= Nimg return qp_saxs, iq_saxs, q_saxs +def get_waxs_beam_center( gamma, origin = [432, 363], Ldet = 1495, pixel_size = 75 * 1e-3 ): + '''YG Feb 10, 2018 + Calculate beam center for WAXS geometry by giving beam center at gamma=0 and the target gamma + Input: + gamma: angle in degree + Ldet: sample to detector distance, 1495 mm for CHX WAXS + origin: beam center for gamma = 0, (python x,y coordinate in pixel) + pxiel size: 75 * 1e-3 mm for Eiger 1M + output: + beam center: for the target gamma, in pixel + ''' + return [ int( origin[0] + np.tan( np.radians(gamma)) * Ldet/pixel_size) ,origin[1] ] -def get_waxs_beam_center(gamma, origin=[432, 363], Ldet=1495, pixel_size=75 * 1e-3): - """YG Feb 10, 2018 - Calculate beam center for WAXS geometry by giving beam center at gamma=0 and the target gamma - Input: - gamma: angle in degree - Ldet: sample to detector distance, 1495 mm for CHX WAXS - origin: beam center for gamma = 0, (python x,y coordinate in pixel) - pxiel size: 75 * 1e-3 mm for Eiger 1M - output: - beam center: for the target gamma, in pixel - """ - return [int(origin[0] + np.tan(np.radians(gamma)) * Ldet / pixel_size), origin[1]] -def get_img_from_iq(qp, iq, img_shape, center): - """YG Jan 24, 2018 +def get_img_from_iq( qp, iq, img_shape, center): + '''YG Jan 24, 2018 Get image from circular average Input: qp: q in pixel unit @@ -1371,120 +1249,121 @@ def get_img_from_iq(qp, iq, img_shape, center): center: [center_y, center_x] e.g., [120, 200] Output: img: recovered image - """ - pixelist = np.arange(img_shape[0] * img_shape[1]) - pixely = pixelist % img_shape[1] - center[1] - pixelx = pixelist // img_shape[1] - center[0] - r = np.hypot(pixelx, pixely) # leave as float. - # r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 - return (np.interp(r, qp, iq)).reshape(img_shape) - - -def average_array_withNan(array, axis=0, mask=None): - """YG. Jan 23, 2018 - Average array invovling np.nan along axis - - Input: - array: ND array, actually should be oneD or twoD at this stage..TODOLIST for ND - axis: the average axis - mask: bool, same shape as array, if None, will mask all the nan values - Output: - avg: averaged array along axis - """ + ''' + pixelist = np.arange( img_shape[0] * img_shape[1] ) + pixely = pixelist%img_shape[1] -center[1] + pixelx = pixelist//img_shape[1] - center[0] + r= np.hypot(pixelx, pixely) #leave as float. + #r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 + return (np.interp( r, qp, iq )).reshape( img_shape ) + + +def average_array_withNan( array, axis=0, mask=None): + '''YG. Jan 23, 2018 + Average array invovling np.nan along axis + + Input: + array: ND array, actually should be oneD or twoD at this stage..TODOLIST for ND + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + avg: averaged array along axis + ''' shape = array.shape - if mask is None: + if mask == None: mask = np.isnan(array) - # mask = np.ma.masked_invalid(array).mask + #mask = np.ma.masked_invalid(array).mask array_ = np.ma.masked_array(array, mask=mask) try: - sums = np.array(np.ma.sum(array_[:, :], axis=axis)) + sums = np.array( np.ma.sum( array_[:,:], axis= axis ) ) except: - sums = np.array(np.ma.sum(array_[:], axis=axis)) + sums = np.array( np.ma.sum( array_[:], axis= axis ) ) - cts = np.sum(~mask, axis=axis) - # print(cts) - return sums / cts + cts = np.sum(~mask,axis=axis) + #print(cts) + return sums/cts +def deviation_array_withNan( array, axis=0, mask=None): + '''YG. Jan 23, 2018 + Get the deviation of array invovling np.nan along axis -def deviation_array_withNan(array, axis=0, mask=None): - """YG. Jan 23, 2018 - Get the deviation of array invovling np.nan along axis + Input: + array: ND array + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + dev: the deviation of array along axis + ''' + avg2 = average_array_withNan( array**2, axis = axis, mask = mask ) + avg = average_array_withNan( array, axis = axis, mask = mask ) + return np.sqrt( avg2 - avg**2 ) - Input: - array: ND array - axis: the average axis - mask: bool, same shape as array, if None, will mask all the nan values - Output: - dev: the deviation of array along axis - """ - avg2 = average_array_withNan(array**2, axis=axis, mask=mask) - avg = average_array_withNan(array, axis=axis, mask=mask) - return np.sqrt(avg2 - avg**2) -def refine_roi_mask(roi_mask, pixel_num_thres=10): - """YG Dev Jan20,2018 +def refine_roi_mask( roi_mask, pixel_num_thres=10): + '''YG Dev Jan20,2018 remove bad roi which pixel numbe is lower pixel_num_thres roi_mask: array, pixel_num_thres: integer, the low limit pixel number in each roi of the combined mask, i.e., if the pixel number in one roi of the combined mask smaller than pixel_num_thres, that roi will be considered as bad one and be removed. - """ - new_mask = np.zeros_like(roi_mask) + ''' + new_mask = np.zeros_like( roi_mask ) qind, pixelist = roi.extract_label_indices(roi_mask) noqs = len(np.unique(qind)) - nopr = np.bincount(qind, minlength=(noqs + 1))[1:] - good_ind = np.where(nopr >= pixel_num_thres)[0] + 1 + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + good_ind = np.where( nopr >= pixel_num_thres)[0] +1 l = len(good_ind) - new_ind = np.arange(1, l + 1) - for i, gi in enumerate(good_ind): - new_mask.ravel()[np.where(roi_mask.ravel() == gi)[0]] = new_ind[i] - return new_mask, good_ind - 1 - - -def shrink_image_stack(imgs, bins): - """shrink imgs by bins - imgs: shape as [Nimg, imx, imy]""" + new_ind = np.arange( 1, l+1 ) + for i, gi in enumerate( good_ind ): + new_mask.ravel()[ + np.where( roi_mask.ravel() == gi)[0] ] = new_ind[i] + return new_mask, good_ind -1 + +def shrink_image_stack( imgs, bins): + '''shrink imgs by bins + imgs: shape as [Nimg, imx, imy] ''' Nimg, imx, imy = imgs.shape bx, by = bins - imgsk = np.zeros([Nimg, imx // bx, imy // by]) + imgsk = np.zeros( [Nimg, imx//bx, imy//by] ) N = len(imgs) for i in range(N): - imgsk[i] = shrink_image(imgs[i], bins) + imgsk[i] = shrink_image(imgs[i], bins ) return imgsk - -def shrink_image(img, bins): - """YG Dec 12, 2017 dev@CHX shrink a two-d image by factor as bins, i.e., bins_x, bins_y +def shrink_image(img, bins ): + '''YG Dec 12, 2017 dev@CHX shrink a two-d image by factor as bins, i.e., bins_x, bins_y input: img: 2d array, bins: integer list, eg. [2,2] output: imgb: binned img - """ - m, n = img.shape + ''' + m,n = img.shape bx, by = bins - Nx, Ny = m // bx, n // by - # print(Nx*bx, Ny*by) - return img[: Nx * bx, : Ny * by].reshape(Nx, bx, Ny, by).mean(axis=(1, 3)) + Nx, Ny = m//bx, n//by + #print(Nx*bx, Ny*by) + return img[:Nx*bx, :Ny*by].reshape( Nx,bx, Ny, by).mean(axis=(1,3) ) -def get_diff_fv(g2_fit_paras, qval_dict, ang_init=137.2): - """YG@CHX Nov 9,2017 - Get flow velocity and diff from g2_fit_paras""" +def get_diff_fv( g2_fit_paras, qval_dict, ang_init=137.2): + '''YG@CHX Nov 9,2017 + Get flow velocity and diff from g2_fit_paras ''' g2_fit_para_ = g2_fit_paras.copy() - qr = np.array([qval_dict[k][0] for k in sorted(qval_dict.keys())]) - qang = np.array([qval_dict[k][1] for k in sorted(qval_dict.keys())]) - # x=g2_fit_para_.pop( 'relaxation_rate' ) - # x=g2_fit_para_.pop( 'flow_velocity' ) - g2_fit_para_["diff"] = g2_fit_paras["relaxation_rate"] / qr**2 - cos_part = np.abs(np.cos(np.radians(qang - ang_init))) - g2_fit_para_["fv"] = g2_fit_paras["flow_velocity"] / cos_part / qr + qr = np.array( [qval_dict[k][0] for k in sorted( qval_dict.keys())] ) + qang = np.array( [qval_dict[k][1] for k in sorted( qval_dict.keys())] ) + #x=g2_fit_para_.pop( 'relaxation_rate' ) + #x=g2_fit_para_.pop( 'flow_velocity' ) + g2_fit_para_['diff'] = g2_fit_paras[ 'relaxation_rate' ]/qr**2 + cos_part = np.abs( np.cos( np.radians( qang - ang_init)) ) + g2_fit_para_['fv'] = g2_fit_paras[ 'flow_velocity' ]/cos_part/qr return g2_fit_para_ + + # function to get indices of local extrema (=indices of speckle echo maximum amplitudes): -def get_echos(dat_arr, min_distance=10): +def get_echos(dat_arr,min_distance=10): """ getting local maxima and minima from 1D data -> e.g. speckle echos strategy: using peak_local_max (from skimage) with min_distance parameter to find well defined local maxima @@ -1493,20 +1372,19 @@ def get_echos(dat_arr, min_distance=10): by LW 10/23/2018 """ from skimage.feature import peak_local_max - - max_ind = peak_local_max(dat_arr, min_distance) # !!! careful, skimage function reverses the order (wtf?) - min_ind = [] + max_ind=peak_local_max(dat_arr, min_distance) # !!! careful, skimage function reverses the order (wtf?) + min_ind=[] for i in range(len(max_ind[:-1])): - min_ind.append(max_ind[i + 1][0] + np.argmin(dat_arr[max_ind[i + 1][0] : max_ind[i][0]])) - # unfortunately, skimage function fu$$s up the format: max_ind is an array of a list of lists...fix this: - mmax_ind = [] + min_ind.append(max_ind[i+1][0]+np.argmin(dat_arr[max_ind[i+1][0]:max_ind[i][0]])) + #unfortunately, skimage function fu$$s up the format: max_ind is an array of a list of lists...fix this: + mmax_ind=[] for l in max_ind: mmax_ind.append(l[0]) - # return [mmax_ind,min_ind] - return [list(reversed(mmax_ind)), list(reversed(min_ind))] + #return [mmax_ind,min_ind] + return [list(reversed(mmax_ind)),list(reversed(min_ind))] -def pad_length(arr, pad_val=np.nan): +def pad_length(arr,pad_val=np.nan): """ arr: 2D matrix pad_val: values being padded @@ -1516,77 +1394,76 @@ def pad_length(arr, pad_val=np.nan): update June 2023: remove use of np.shape and np.size that doesn't work (anymore?) on arrays with inhomogenous size by LW 12/30/2017 """ - max_len = [] + max_len=[] for i in range(len(arr)): max_len.append([len(arr[i])]) - max_len = np.max(max_len) + max_len=np.max(max_len) for l in range(len(arr)): - arr[l] = np.pad(arr[l] * 1.0, (0, max_len - np.size(arr[l])), mode="constant", constant_values=pad_val) + arr[l]=np.pad(arr[l]*1.,(0,max_len-np.size(arr[l])),mode='constant',constant_values=pad_val) return arr + def save_array_to_tiff(array, output, verbose=True): - """Y.G. Nov 1, 2017 + '''Y.G. Nov 1, 2017 Save array to a tif file - """ + ''' img = PIL.Image.fromarray(array) - img.save(output) + img.save( output ) if verbose: - print("The data is save to: %s." % (output)) + print( 'The data is save to: %s.'%( output )) + def load_pilatus(filename): - """Y.G. Nov 1, 2017 + '''Y.G. Nov 1, 2017 Load a pilatus 2D image - """ - return np.array(PIL.Image.open(filename).convert("I")) + ''' + return np.array( PIL.Image.open(filename).convert('I') ) - -def ls_dir(inDir, have_list=[], exclude_list=[]): - """Y.G. Aug 1, 2019 +def ls_dir(inDir, have_list=[], exclude_list=[] ): + '''Y.G. Aug 1, 2019 List all filenames in a filefolder inDir: fullpath of the inDir have_string: only retrun filename containing the string exclude_string: only retrun filename not containing the string - """ + ''' from os import listdir from os.path import isfile, join - tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] ) tifs_ = [] for tif in tifs: - flag = 1 + flag=1 for string in have_list: if string not in tif: - flag *= 0 - for string in exclude_list: + flag *=0 + for string in exclude_list: if string in tif: - flag *= 0 + flag *=0 if flag: - tifs_.append(tif) + tifs_.append( tif ) - return np.array(tifs_) + return np.array( tifs_ ) def ls_dir2(inDir, string=None): - """Y.G. Nov 1, 2017 + '''Y.G. Nov 1, 2017 List all filenames in a filefolder (not include hidden files and subfolders) inDir: fullpath of the inDir string: if not None, only retrun filename containing the string - """ + ''' from os import listdir from os.path import isfile, join - - if string is None: - tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + if string == None: + tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] ) else: - tifs = np.array([f for f in listdir(inDir) if (isfile(join(inDir, f))) & (string in f)]) + tifs = np.array( [f for f in listdir(inDir) if (isfile(join(inDir, f)))&(string in f) ] ) return tifs - -def re_filename(old_filename, new_filename, inDir=None, verbose=True): - """Y.G. Nov 28, 2017 +def re_filename( old_filename, new_filename, inDir=None, verbose=True ): + '''Y.G. Nov 28, 2017 Rename old_filename with new_filename in a inDir inDir: fullpath of the inDir, if None, the filename should have the fullpath old_filename/ new_filename: string @@ -1595,31 +1472,30 @@ def re_filename(old_filename, new_filename, inDir=None, verbose=True): 'uid=run17_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', '/home/yuzhang/Analysis/Timepix/2017_3/Results/run17/run17_pos1/' ) - """ - if inDir is not None: - os.rename(inDir + old_filename, inDir + new_filename) + ''' + if inDir != None: + os.rename(inDir + old_filename, inDir+new_filename) else: - os.rename(old_filename, new_filename) - print("The file: %s is changed to: %s." % (old_filename, new_filename)) + os.rename( old_filename, new_filename) + print('The file: %s is changed to: %s.'%(old_filename, new_filename)) -def re_filename_dir(old_pattern, new_pattern, inDir, verbose=True): - """Y.G. Nov 28, 2017 +def re_filename_dir( old_pattern, new_pattern, inDir,verbose=True ): + '''Y.G. Nov 28, 2017 Rename all filenames with old_pattern with new_pattern in a inDir inDir: fullpath of the inDir, if None, the filename should have the fullpath old_pattern, new_pattern an example, re_filename_dir('20_', '17_', inDir ) - """ + ''' fps = ls_dir(inDir) for fp in fps: if old_pattern in fp: old_filename = fp new_filename = fp.replace(old_pattern, new_pattern) - re_filename(old_filename, new_filename, inDir, verbose=verbose) - + re_filename( old_filename, new_filename, inDir,verbose= verbose ) -def get_roi_nr(qdict, q, phi, q_nr=True, phi_nr=False, q_thresh=0, p_thresh=0, silent=True, qprecision=5): +def get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False,q_thresh=0, p_thresh=0, silent=True, qprecision=5): """ function to return roi number from qval_dict, corresponding Q and phi, lists (sets) of all available Qs and phis [roi_nr,Q,phi,Q_list,phi_list]=get_roi_nr(..) @@ -1637,54 +1513,40 @@ def get_roi_nr(qdict, q, phi, q_nr=True, phi_nr=False, q_thresh=0, p_thresh=0, s """ import collections from collections import OrderedDict - qdict = collections.OrderedDict(sorted(qdict.items())) - qs = [] - phis = [] + qs=[] + phis=[] for i in qdict.keys(): qs.append(qdict[i][0]) - phis.append(qdict[i][1]) - qslist = list(OrderedDict.fromkeys(qs)) - qslist = np.unique(np.round(qslist, qprecision)) - phislist = list(OrderedDict.fromkeys(phis)) - qslist = list(np.sort(qslist)) - phislist = list(np.sort(phislist)) + phis.append(qdict[i][1]) + qslist=list(OrderedDict.fromkeys(qs)) + qslist = np.unique( np.round(qslist, qprecision ) ) + phislist=list(OrderedDict.fromkeys(phis)) + qslist=list(np.sort(qslist)) + phislist=list(np.sort(phislist)) if q_nr: - qinterest = qslist[q] - qindices = [i for i, x in enumerate(qs) if np.abs(x - qinterest) < q_thresh] + qinterest=qslist[q] + qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] else: - qinterest = q - qindices = [i for i, x in enumerate(qs) if np.abs(x - qinterest) < q_thresh] # new + qinterest=q + qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] # new if phi_nr: - phiinterest = phislist[phi] - phiindices = [i for i, x in enumerate(phis) if x == phiinterest] + phiinterest=phislist[phi] + phiindices = [i for i,x in enumerate(phis) if x == phiinterest] else: - phiinterest = phi - phiindices = [i for i, x in enumerate(phis) if np.abs(x - phiinterest) < p_thresh] # new - ret_list = [ - list(set(qindices).intersection(phiindices))[0], - qinterest, - phiinterest, - qslist, - phislist, - ] # -> this is the original + phiinterest=phi + phiindices = [i for i,x in enumerate(phis) if np.abs(x-phiinterest) < p_thresh] # new + ret_list=[list(set(qindices).intersection(phiindices))[0],qinterest,phiinterest,qslist,phislist] #-> this is the original if silent == False: - print("list of available Qs:") + print('list of available Qs:') print(qslist) - print("list of available phis:") + print('list of available phis:') print(phislist) - print("Roi number for Q= " + str(ret_list[1]) + " and phi= " + str(ret_list[2]) + ": " + str(ret_list[0])) + print('Roi number for Q= '+str(ret_list[1])+' and phi= '+str(ret_list[2])+': '+str(ret_list[0])) return ret_list - -def get_fit_by_two_linear( - x, - y, - mid_xpoint1, - mid_xpoint2=None, - xrange=None, -): - """YG Octo 16,2017 Fit a curve with two linear func, the curve is splitted by mid_xpoint, +def get_fit_by_two_linear(x,y, mid_xpoint1, mid_xpoint2=None, xrange=None, ): + '''YG Octo 16,2017 Fit a curve with two linear func, the curve is splitted by mid_xpoint, namely, fit the curve in two regions defined by (xmin,mid_xpoint ) and (mid_xpoint2, xmax) Input: x: 1D np.array @@ -1698,111 +1560,106 @@ def get_fit_by_two_linear( fit parameter (slope, background) of linear fit2 convinent fit class, gmfit2(x) gives yvale - """ - if xrange is None: - x1, x2 = min(x), max(x) - x1, x2 = xrange - if mid_xpoint2 is None: - mid_xpoint2 = mid_xpoint1 - D1, gmfit1 = linear_fit(x, y, xrange=[x1, mid_xpoint1]) - D2, gmfit2 = linear_fit(x, y, xrange=[mid_xpoint2, x2]) + ''' + if xrange == None: + x1,x2 = min(x), max(x) + x1,x2=xrange + if mid_xpoint2 == None: + mid_xpoint2= mid_xpoint1 + D1, gmfit1 = linear_fit( x,y, xrange= [ x1,mid_xpoint1 ]) + D2, gmfit2 = linear_fit( x,y, xrange= [mid_xpoint2, x2 ]) return D1, gmfit1, D2, gmfit2 - -def get_cross_point(x, gmfit1, gmfit2): - """YG Octo 16,2017 +def get_cross_point( x, gmfit1, gmfit2 ): + '''YG Octo 16,2017 Get croess point of two curve - """ + ''' y1 = gmfit1(x) y2 = gmfit2(x) - return x[np.argmin(np.abs(y1 - y2))] - + return x[np.argmin( np.abs(y1-y2) )] -def get_curve_turning_points( - x, - y, - mid_xpoint1, - mid_xpoint2=None, - xrange=None, -): - """YG Octo 16,2017 +def get_curve_turning_points( x, y, mid_xpoint1, mid_xpoint2=None, xrange=None, ): + '''YG Octo 16,2017 Get a turning point of a curve by doing a two-linear fit - """ - D1, gmfit1, D2, gmfit2 = get_fit_by_two_linear(x, y, mid_xpoint1, mid_xpoint2, xrange) - return get_cross_point(x, gmfit1, gmfit2) - - -def plot_fit_two_linear_fit(x, y, gmfit1, gmfit2, ax=None): - """YG Octo 16,2017 Plot data with two fitted linear func""" - if ax is None: - fig, ax = plt.subplots() - plot1D(x=x, y=y, ax=ax, c="k", legend="data", m="o", ls="") # logx=True, logy=True ) - plot1D(x=x, y=gmfit1(x), ax=ax, c="r", m="", ls="-", legend="fit1") - plot1D(x=x, y=gmfit2(x), ax=ax, c="b", m="", ls="-", legend="fit2") + ''' + D1, gmfit1, D2, gmfit2 = get_fit_by_two_linear(x,y, mid_xpoint1, mid_xpoint2, xrange ) + return get_cross_point( x, gmfit1, gmfit2 ) + + +def plot_fit_two_linear_fit(x,y, gmfit1, gmfit2, ax=None ): + '''YG Octo 16,2017 Plot data with two fitted linear func + ''' + if ax == None: + fig, ax =plt.subplots() + plot1D( x = x, y = y, ax =ax, c='k', legend='data', m='o', ls='')#logx=True, logy=True ) + plot1D( x = x, y = gmfit1(x), ax =ax, c='r', m='', ls='-',legend='fit1' ) + plot1D( x = x, y = gmfit2(x), ax =ax, c='b', m='', ls='-',legend='fit2' ) return ax -def linear_fit(x, y, xrange=None): - """YG Octo 16,2017 copied from XPCS_SAXS +def linear_fit( x,y, xrange=None): + '''YG Octo 16,2017 copied from XPCS_SAXS a linear fit - """ - if xrange is not None: + ''' + if xrange != None: xmin, xmax = xrange - x1, x2 = find_index(x, xmin, tolerance=None), find_index(x, xmax, tolerance=None) + x1,x2 = find_index( x,xmin,tolerance= None),find_index( x,xmax,tolerance= None) x_ = x[x1:x2] y_ = y[x1:x2] else: - x_ = x - y_ = y + x_=x + y_=y D0 = np.polyfit(x_, y_, 1) gmfit = np.poly1d(D0) return D0, gmfit -def find_index(x, x0, tolerance=None): - """YG Octo 16,2017 copied from SAXS +def find_index( x,x0,tolerance= None): + '''YG Octo 16,2017 copied from SAXS find index of x0 in x #find the position of P in a list (plist) with tolerance - """ + ''' - N = len(x) - i = 0 + N=len(x) + i=0 if x0 > max(x): - position = len(x) - 1 - elif x0 < min(x): - position = 0 + position= len(x) -1 + elif x0 max(x): - position = len(x) - 1 - elif x0 < min(x): - position = 0 + position= len(x) -1 + elif x0 di: try: els = line.split() - if good_cols is None: - temp = np.array(els, dtype=float) + if good_cols == None: + temp = np.array( els, dtype=float ) else: - temp = np.array([els[j] for j in good_cols], dtype=float) - data = np.vstack((data, temp)) + temp= np.array( [els[j] for j in good_cols], dtype=float ) + data=np.vstack( (data,temp)) except: pass - if labels is None: + if labels == None: labels = np.arange(data.shape[1]) - df = pds.DataFrame(data, index=np.arange(data.shape[0]), columns=labels) + df = pds.DataFrame( data, index= np.arange(data.shape[0]), columns= labels ) return df -def get_print_uids(start_time, stop_time, return_all_info=False): - """Update Feb 20, 2018 also return full uids + +def get_print_uids( start_time, stop_time, return_all_info=False): + '''Update Feb 20, 2018 also return full uids YG. Octo 3, 2017@CHX Get full uids and print uid plus Measurement contents by giving start_time, stop_time - """ - hdrs = list(db(start_time=start_time, stop_time=stop_time)) - fuids = np.zeros(len(hdrs), dtype=object) - uids = np.zeros(len(hdrs), dtype=object) - sids = np.zeros(len(hdrs), dtype=object) - n = 0 - all_info = np.zeros(len(hdrs), dtype=object) - for i in range(len(hdrs)): - fuid = hdrs[-i - 1]["start"]["uid"] # reverse order - uid = fuid[:6] # reverse order - sid = hdrs[-i - 1]["start"]["scan_id"] - fuids[n] = fuid - uids[n] = uid - sids[n] = sid - date = time.ctime(hdrs[-i - 1]["start"]["time"]) + ''' + hdrs = list( db(start_time= start_time, stop_time = stop_time) ) + fuids = np.zeros( len(hdrs),dtype=object) + uids = np.zeros( len(hdrs),dtype=object) + sids = np.zeros( len(hdrs), dtype=object) + n=0 + all_info = np.zeros( len(hdrs), dtype=object) + for i in range(len(hdrs)): + fuid = hdrs[-i-1]['start']['uid'] #reverse order + uid = fuid[:6] #reverse order + sid = hdrs[-i-1]['start']['scan_id'] + fuids[n]=fuid + uids[n]=uid + sids[n]=sid + date = time.ctime(hdrs[-i-1]['start']['time']) try: - m = hdrs[-i - 1]["start"]["Measurement"] + m = hdrs[-i-1]['start']['Measurement'] except: - m = "" - info = "%3d: uid = '%s' ##%s #%s: %s-- %s " % (i, uid, date, sid, m, fuid) - print(info) + m='' + info = "%3d: uid = '%s' ##%s #%s: %s-- %s "%(i,uid,date,sid,m, fuid) + print( info ) if return_all_info: - all_info[n] = info - n += 1 + all_info[n]=info + n +=1 if not return_all_info: return fuids, uids, sids else: return fuids, uids, sids, all_info -def get_last_uids(n=-1): - """YG Sep 26, 2017 - A Convinient function to copy uid to jupyter for analysis""" - uid = db[n]["start"]["uid"][:8] - sid = db[n]["start"]["scan_id"] - m = db[n]["start"]["Measurement"] - return " uid = '%s' #(scan num: %s (Measurement: %s " % (uid, sid, m) + +def get_last_uids( n=-1 ): + '''YG Sep 26, 2017 + A Convinient function to copy uid to jupyter for analysis''' + uid = db[n]['start']['uid'][:8] + sid = db[n]['start']['scan_id'] + m = db[n]['start']['Measurement'] + return " uid = '%s' #(scan num: %s (Measurement: %s "%(uid,sid,m) -def get_base_all_filenames(inDir, base_filename_cut_length=-7): - """YG Sep 26, 2017 + +def get_base_all_filenames( inDir, base_filename_cut_length = -7 ): + '''YG Sep 26, 2017 Get base filenames and their related all filenames Input: inDir, str, input data dir @@ -2037,13 +1886,12 @@ def get_base_all_filenames(inDir, base_filename_cut_length=-7): Output: dict: keys, base filename vales, all realted filename - """ + ''' from os import listdir from os.path import isfile, join - - tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] ) tifsc = list(tifs.copy()) - utifs = np.sort(np.unique(np.array([f[:base_filename_cut_length] for f in tifs])))[::-1] + utifs = np.sort( np.unique( np.array([ f[:base_filename_cut_length] for f in tifs] ) ) )[::-1] files = {} for uf in utifs: files[uf] = [] @@ -2051,15 +1899,15 @@ def get_base_all_filenames(inDir, base_filename_cut_length=-7): reName = [] for i in range(len(tifsc)): if uf in tifsc[i]: - files[uf].append(tifsc[i]) + files[uf].append( tifsc[i] ) reName.append(tifsc[i]) for fn in reName: tifsc.remove(fn) return files -def create_ring_mask(shape, r1, r2, center, mask=None): - """YG. Sep 20, 2017 Develop@CHX +def create_ring_mask( shape, r1, r2, center, mask=None): + '''YG. Sep 20, 2017 Develop@CHX Create 2D ring mask input: shape: two integer number list, mask shape, e.g., [100,100] @@ -2068,34 +1916,32 @@ def create_ring_mask(shape, r1, r2, center, mask=None): center: two integer number list, [cx,cy], ring center, e.g., [30,50] output: 2D numpy array, 0,1 type - """ - - m = np.zeros(shape, dtype=bool) - rr, cc = disk((center[1], center[0]), r2, shape=shape) - m[rr, cc] = 1 - rr, cc = disk((center[1], center[0]), r1, shape=shape) - m[rr, cc] = 0 - if mask is not None: + ''' + + m = np.zeros( shape, dtype= bool) + rr,cc = disk((center[1], center[0]), r2, shape=shape ) + m[rr,cc] = 1 + rr,cc = disk((center[1], center[0]), r1,shape=shape ) + m[rr,cc] = 0 + if mask != None: m += mask return m - def get_image_edge(img): - """ + ''' Y.G. Developed at Sep 8, 2017 @CHX Get sharp edges of an image img: two-D array, e.g., a roi mask - """ - edg_ = prewitt(img / 1.0) + ''' + edg_ = prewitt(img/1.0) edg = np.zeros_like(edg_) w = np.where(edg_ > 1e-10) edg[w] = img[w] - edg[np.where(edg == 0)] = 1 + edg[np.where(edg==0)] = 1 return edg - -def get_image_with_roi(img, roi_mask, scale_factor=2): - """ +def get_image_with_roi( img, roi_mask, scale_factor = 2): + ''' Y.G. Developed at Sep 8, 2017 @CHX Get image with edges of roi_mask by doing i) get edges of roi_mask by function get_image_edge @@ -2103,22 +1949,24 @@ def get_image_with_roi(img, roi_mask, scale_factor=2): img: two-D array for image roi_mask: two-D array for ROI scale_factor: scaling factor of ROI in image - """ - edg = get_image_edge(roi_mask) + ''' + edg = get_image_edge( roi_mask ) img_ = img.copy() w = np.where(roi_mask) - img_[w] = img[w] * scale_factor + img_[w] = img[w] * scale_factor return img_ * edg -def get_today_date(): - from time import gmtime, strftime - return strftime("%m-%d-%Y", gmtime()) -def move_beamstop(mask, xshift, yshift): - """Y.G. Developed at July 18, 2017 @CHX +def get_today_date( ): + from time import gmtime, strftime + return strftime("%m-%d-%Y", gmtime() ) + + +def move_beamstop( mask, xshift, yshift ): + '''Y.G. Developed at July 18, 2017 @CHX Create new mask by shift the old one with xshift, yshift Input --- @@ -2129,150 +1977,135 @@ def move_beamstop(mask, xshift, yshift): Output --- mask, 2D numpy array, - """ + ''' m = np.ones_like(mask) - W, H = mask.shape - w = np.where(mask == 0) - nx, ny = w[0] + int(yshift), w[1] + int(xshift) - gw = np.where((nx >= 0) & (nx < W) & (ny >= 0) & (ny < H)) - nx = nx[gw] - ny = ny[gw] - m[nx, ny] = 0 + W,H = mask.shape + w = np.where(mask==0) + nx, ny = w[0]+ int(yshift), w[1]+ int(xshift ) + gw = np.where( (nx >= 0) & (nx= 0) & (ny= xmax) | (pixel <= xmin))[0] + pixel = roi.roi_pixel_values(avg_img, roi_mask, [k] )[0][0] + #print( np.max(pixel), np.min(pixel) ) + if filter_type == 'ylim': + xmin,xmax = filter_dict[k] + badp =np.where( (pixel>= xmax) | ( pixel <= xmin) )[0] else: badp = filter_dict[k] - if len(badp) != 0: - pls = np.where([rf == k])[1] - rf[pls[badp]] = 0 + if len(badp)!=0: + pls = np.where([rf==k])[1] + rf[ pls[badp] ] = 0 return rm ## -# Dev at March 31 for create Eiger chip mask -def create_chip_edges_mask(det="1M"): - """Create a chip edge mask for Eiger detector""" - if det == "1M": +#Dev at March 31 for create Eiger chip mask +def create_chip_edges_mask( det='1M' ): + ''' Create a chip edge mask for Eiger detector + + ''' + if det == '1M': shape = [1065, 1030] w = 4 - mask = np.ones(shape, dtype=np.int32) - cx = [1030 // 4 * i for i in range(1, 4)] - # cy = [ 1065//4 *i for i in range(1,4) ] - cy = [808, 257] - # print (cx, cy ) + mask = np.ones( shape , dtype = np.int32) + cx = [ 1030//4 *i for i in range(1,4) ] + #cy = [ 1065//4 *i for i in range(1,4) ] + cy = [808, 257 ] + #print (cx, cy ) for c in cx: - mask[:, c - w // 2 : c + w // 2] = 0 + mask[:, c-w//2:c+w//2 ] = 0 for c in cy: - mask[c - w // 2 : c + w // 2, :] = 0 + mask[ c-w//2:c+w//2, : ] = 0 return mask +def create_ellipse_donut( cx, cy , wx_inner, wy_inner, wx_outer, wy_outer, roi_mask, gap=0): + Nmax = np.max( np.unique( roi_mask ) ) + rr1, cc1 = ellipse( cy,cx, wy_inner, wx_inner ) + rr2, cc2 = ellipse( cy, cx, wy_inner + gap, wx_inner +gap ) + rr3, cc3 = ellipse( cy, cx, wy_outer,wx_outer ) + roi_mask[rr3,cc3] = 2 + Nmax + roi_mask[rr2,cc2] = 0 + roi_mask[rr1,cc1] = 1 + Nmax + return roi_mask -def create_ellipse_donut(cx, cy, wx_inner, wy_inner, wx_outer, wy_outer, roi_mask, gap=0): - Nmax = np.max(np.unique(roi_mask)) - rr1, cc1 = ellipse(cy, cx, wy_inner, wx_inner) - rr2, cc2 = ellipse(cy, cx, wy_inner + gap, wx_inner + gap) - rr3, cc3 = ellipse(cy, cx, wy_outer, wx_outer) - roi_mask[rr3, cc3] = 2 + Nmax - roi_mask[rr2, cc2] = 0 - roi_mask[rr1, cc1] = 1 + Nmax +def create_box( cx, cy, wx, wy, roi_mask): + Nmax = np.max( np.unique( roi_mask ) ) + for i, [cx_,cy_] in enumerate(list( zip( cx,cy ))): #create boxes + x = np.array( [ cx_-wx, cx_+wx, cx_+wx, cx_-wx]) + y = np.array( [ cy_-wy, cy_-wy, cy_+wy, cy_+wy]) + rr, cc = polygon( y,x) + roi_mask[rr,cc] = i +1 + Nmax return roi_mask -def create_box(cx, cy, wx, wy, roi_mask): - Nmax = np.max(np.unique(roi_mask)) - for i, [cx_, cy_] in enumerate(list(zip(cx, cy))): # create boxes - x = np.array([cx_ - wx, cx_ + wx, cx_ + wx, cx_ - wx]) - y = np.array([cy_ - wy, cy_ - wy, cy_ + wy, cy_ + wy]) - rr, cc = polygon(y, x) - roi_mask[rr, cc] = i + 1 + Nmax - return roi_mask -def create_folder(base_folder, sub_folder): - """ +def create_folder( base_folder, sub_folder ): + ''' Crate a subfolder under base folder Input: base_folder: full path of the base folder sub_folder: sub folder name to be created Return: Created full path of the created folder - """ + ''' - data_dir0 = os.path.join(base_folder, sub_folder) + data_dir0 = os.path.join( base_folder, sub_folder ) ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' os.makedirs(data_dir0, exist_ok=True) - print("Results from this analysis will be stashed in the directory %s" % data_dir0) + print('Results from this analysis will be stashed in the directory %s' % data_dir0) return data_dir0 -def create_user_folder(CYCLE, username=None, default_dir="/XF11ID/analysis/"): - """ + + + +def create_user_folder( CYCLE, username=None, default_dir= '/XF11ID/analysis/' ): + ''' Crate a folder for saving user data analysis result Input: CYCLE: run cycle username: if None, get username from the jupyter username Return: Created folder name - """ - if username != "Default": - if username is None: + ''' + if username !='Default': + if username == None: username = getpass.getuser() - data_dir0 = os.path.join(default_dir, CYCLE, username, "Results/") + data_dir0 = os.path.join(default_dir, CYCLE, username, 'Results/') else: - data_dir0 = os.path.join(default_dir, CYCLE + "/") + data_dir0 = os.path.join(default_dir, CYCLE +'/') ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' os.makedirs(data_dir0, exist_ok=True) - print("Results from this analysis will be stashed in the directory %s" % data_dir0) + print('Results from this analysis will be stashed in the directory %s' % data_dir0) return data_dir0 + + + + ################################## #########For dose analysis ####### ################################## -def get_fra_num_by_dose(exp_dose, exp_time, att=1, dead_time=2): - """ +def get_fra_num_by_dose( exp_dose, exp_time, att=1, dead_time =2 ): + ''' Calculate the frame number to be correlated by giving a X-ray exposure dose Paramters: @@ -2396,12 +2238,12 @@ def get_fra_num_by_dose(exp_dose, exp_time, att=1, dead_time=2): exp_time = 1.34, dead_time = 2) --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) - """ - return np.int_(np.array(exp_dose) / (exp_time + dead_time) / att) + ''' + return np.int_( np.array( exp_dose )/( exp_time + dead_time)/ att ) -def get_multi_tau_lag_steps(fra_max, num_bufs=8): - """ +def get_multi_tau_lag_steps( fra_max, num_bufs = 8 ): + ''' Get taus in log steps ( a multi-taus defined taus ) for a time series with max frame number as fra_max Parameters: fra_max: integer, the maximun frame number @@ -2412,14 +2254,16 @@ def get_multi_tau_lag_steps(fra_max, num_bufs=8): e.g., get_multi_tau_lag_steps( 20, 8 ) --> array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]) - """ - num_levels = int(np.log(fra_max / (num_bufs - 1)) / np.log(2) + 1) + 1 + ''' + num_levels = int(np.log( fra_max/(num_bufs-1))/np.log(2) +1) +1 tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) return lag_steps[lag_steps < fra_max] -def get_series_g2_taus(fra_max_list, acq_time=1, max_fra_num=None, log_taus=True, num_bufs=8): - """ + +def get_series_g2_taus( fra_max_list, acq_time=1, max_fra_num=None, log_taus = True, + num_bufs = 8): + ''' Get taus for dose dependent analysis Parameters: fra_max_list: a list, a lsit of largest available frame number @@ -2436,30 +2280,30 @@ def get_series_g2_taus(fra_max_list, acq_time=1, max_fra_num=None, log_taus=True 40: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32]) } - """ + ''' tausd = {} for n in fra_max_list: - if max_fra_num is not None: + if max_fra_num != None: L = max_fra_num else: L = np.infty - if n > L: - warnings.warn( - "Warning: the dose value is too large, and please" - "check the maxium dose in this data set and give a smaller dose value." - "We will use the maxium dose of the data." - ) + if n>L: + warnings.warn("Warning: the dose value is too large, and please" + "check the maxium dose in this data set and give a smaller dose value." + "We will use the maxium dose of the data.") n = L if log_taus: - lag_steps = get_multi_tau_lag_steps(n, num_bufs) + lag_steps = get_multi_tau_lag_steps(n, num_bufs) else: - lag_steps = np.arange(n) + lag_steps = np.arange( n ) tausd[n] = lag_steps * acq_time return tausd -def check_lost_metadata(md, Nimg=None, inc_x0=None, inc_y0=None, pixelsize=7.5 * 10 * (-5)): - """Y.G. Dec 31, 2016, check lost metadata + + +def check_lost_metadata(md, Nimg=None, inc_x0 =None, inc_y0= None, pixelsize=7.5*10*(-5) ): + '''Y.G. Dec 31, 2016, check lost metadata Parameter: md: dict, meta data dictionay @@ -2473,55 +2317,56 @@ def check_lost_metadata(md, Nimg=None, inc_x0=None, inc_y0=None, pixelsize=7.5 * timeperframe: acquisition time is sec center: list, [x,y], incident beam center in pixel Will also update md - """ + ''' mdn = md.copy() - if "number of images" not in list(md.keys()): - md["number of images"] = Nimg - if "x_pixel_size" not in list(md.keys()): - md["x_pixel_size"] = 7.5000004e-05 - dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm + if 'number of images' not in list(md.keys()): + md['number of images'] = Nimg + if 'x_pixel_size' not in list(md.keys()): + md['x_pixel_size'] = 7.5000004e-05 + dpix = md['x_pixel_size'] * 1000. #in mm, eiger 4m is 0.075 mm try: - lambda_ = md["wavelength"] + lambda_ =md['wavelength'] except: - lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms + lambda_ =md['incident_wavelength'] # wavelegth of the X-rays in Angstroms try: - Ldet = md["det_distance"] - if Ldet <= 1000: - Ldet *= 1000 - md["det_distance"] = Ldet + Ldet = md['det_distance'] + if Ldet<=1000: + Ldet *=1000 + md['det_distance'] = Ldet except: - Ldet = md["detector_distance"] - if Ldet <= 1000: - Ldet *= 1000 - md["detector_distance"] = Ldet + Ldet = md['detector_distance'] + if Ldet<=1000: + Ldet *=1000 + md['detector_distance'] = Ldet + - try: # try exp time from detector - exposuretime = md["count_time"] # exposure time in sec + try:#try exp time from detector + exposuretime= md['count_time'] #exposure time in sec except: - exposuretime = md["cam_acquire_time"] # exposure time in sec - try: # try acq time from detector - acquisition_period = md["frame_time"] + exposuretime= md['cam_acquire_time'] #exposure time in sec + try:#try acq time from detector + acquisition_period = md['frame_time'] except: try: - acquisition_period = md["acquire period"] + acquisition_period = md['acquire period'] except: - uid = md["uid"] - acquisition_period = float(db[uid]["start"]["acquire period"]) + uid = md['uid'] + acquisition_period = float( db[uid]['start']['acquire period'] ) timeperframe = acquisition_period - if inc_x0 is not None: - mdn["beam_center_x"] = inc_y0 - print("Beam_center_x has been changed to %s. (no change in raw metadata): " % inc_y0) - if inc_y0 is not None: - mdn["beam_center_y"] = inc_x0 - print("Beam_center_y has been changed to %s. (no change in raw metadata): " % inc_x0) - center = [int(mdn["beam_center_x"]), int(mdn["beam_center_y"])] # beam center [y,x] for python image - center = [center[1], center[0]] + if inc_x0 != None: + mdn['beam_center_x']= inc_y0 + print( 'Beam_center_x has been changed to %s. (no change in raw metadata): '%inc_y0) + if inc_y0 != None: + mdn['beam_center_y']= inc_x0 + print( 'Beam_center_y has been changed to %s. (no change in raw metadata): '%inc_x0) + center = [ int(mdn['beam_center_x']),int( mdn['beam_center_y'] ) ] #beam center [y,x] for python image + center=[center[1], center[0]] return dpix, lambda_, Ldet, exposuretime, timeperframe, center -def combine_images(filenames, outputfile, outsize=(2000, 2400)): - """Y.G. Dec 31, 2016 +def combine_images( filenames, outputfile, outsize=(2000, 2400)): + '''Y.G. Dec 31, 2016 Combine images together to one image using PIL.Image Input: filenames: list, the images names to be combined @@ -2529,44 +2374,45 @@ def combine_images(filenames, outputfile, outsize=(2000, 2400)): outsize: the combined image size Output: save a combined image file - """ - N = len(filenames) - # nx = np.int( np.ceil( np.sqrt(N)) ) - # ny = np.int( np.ceil( N / float(nx) ) ) - - ny = int(np.ceil(np.sqrt(N))) - nx = int(np.ceil(N / float(ny))) - - # print(nx,ny) - result = Image.new("RGB", outsize, color=(255, 255, 255, 0)) - basewidth = int(outsize[0] / nx) - hsize = int(outsize[1] / ny) + ''' + N = len( filenames) + #nx = np.int( np.ceil( np.sqrt(N)) ) + #ny = np.int( np.ceil( N / float(nx) ) ) + + ny = int( np.ceil( np.sqrt(N)) ) + nx = int( np.ceil( N / float(ny) ) ) + + #print(nx,ny) + result = Image.new("RGB", outsize, color=(255,255,255,0)) + basewidth = int( outsize[0]/nx ) + hsize = int( outsize[1]/ny ) for index, file in enumerate(filenames): path = os.path.expanduser(file) img = Image.open(path) bands = img.split() - ratio = img.size[1] / img.size[0] # h/w + ratio = img.size[1]/ img.size[0] #h/w if hsize > basewidth * ratio: basewidth_ = basewidth - hsize_ = int(basewidth * ratio) + hsize_ = int( basewidth * ratio ) else: - basewidth_ = int(hsize / ratio) - hsize_ = hsize - # print( index, file, basewidth, hsize ) - size = (basewidth_, hsize_) - bands = [b.resize(size, Image.LINEAR) for b in bands] - img = Image.merge("RGBA", bands) + basewidth_ = int( hsize/ratio ) + hsize_ = hsize + #print( index, file, basewidth, hsize ) + size = (basewidth_,hsize_) + bands = [b.resize(size, Image.Resampling.BILINEAR) for b in bands] + img = Image.merge('RGBA', bands) x = index % nx * basewidth y = index // nx * hsize w, h = img.size - # print('pos {0},{1} size {2},{3}'.format(x, y, w, h)) - result.paste(img, (x, y, x + w, y + h)) - result.save(outputfile, quality=100, optimize=True) - print("The combined image is saved as: %s" % outputfile) + #print('pos {0},{1} size {2},{3}'.format(x, y, w, h)) + result.paste(img, (x, y, x + w, y + h )) + result.save( outputfile,quality=100, optimize=True ) + print( 'The combined image is saved as: %s'%outputfile) -def get_qval_dict(qr_center, qz_center=None, qval_dict=None, multi_qr_for_one_qz=True, one_qz_multi_qr=True): - """Y.G. Dec 27, 2016 +def get_qval_dict( qr_center, qz_center=None, qval_dict = None, multi_qr_for_one_qz= True, + one_qz_multi_qr = True): + '''Y.G. Dec 27, 2016 Map the roi label array with qr or (qr,qz) or (q//, q|-) values Parameters: qr_center: list, a list of qr @@ -2581,385 +2427,314 @@ def get_qval_dict(qr_center, qz_center=None, qval_dict=None, multi_qr_for_one_qz Return: qval_dict, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) - """ + ''' - if qval_dict is None: + if qval_dict == None: qval_dict = {} maxN = 0 else: - maxN = np.max(list(qval_dict.keys())) + 1 + maxN = np.max( list( qval_dict.keys() ) ) +1 - if qz_center is not None: + if qz_center != None: if multi_qr_for_one_qz: if one_qz_multi_qr: - for qzind in range(len(qz_center)): - for qrind in range(len(qr_center)): - qval_dict[maxN + qzind * len(qr_center) + qrind] = np.array( - [qr_center[qrind], qz_center[qzind]] - ) + for qzind in range( len( qz_center)): + for qrind in range( len( qr_center)): + qval_dict[ maxN + qzind* len( qr_center) + qrind ] = np.array( [qr_center[qrind], qz_center[qzind] ] ) else: - for qrind in range(len(qr_center)): - for qzind in range(len(qz_center)): - qval_dict[maxN + qrind * len(qz_center) + qzind] = np.array( - [qr_center[qrind], qz_center[qzind]] - ) + for qrind in range( len( qr_center)): + for qzind in range( len( qz_center)): + qval_dict[ maxN + qrind* len( qz_center) + qzind ] = np.array( [qr_center[qrind], qz_center[qzind] ] ) + else: - for i, [qr, qz] in enumerate(zip(qr_center, qz_center)): - qval_dict[maxN + i] = np.array([qr, qz]) + for i, [qr, qz] in enumerate(zip( qr_center, qz_center)): + qval_dict[ maxN + i ] = np.array( [ qr, qz ] ) else: - for qrind in range(len(qr_center)): - qval_dict[maxN + qrind] = np.array([qr_center[qrind]]) + for qrind in range( len( qr_center)): + qval_dict[ maxN + qrind ] = np.array( [ qr_center[qrind] ] ) return qval_dict -def update_qval_dict(qval_dict1, qval_dict2): - """Y.G. Dec 31, 2016 +def update_qval_dict( qval_dict1, qval_dict2 ): + ''' Y.G. Dec 31, 2016 Update qval_dict1 with qval_dict2 Input: qval_dict1, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) qval_dict2, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) Output: qval_dict, a dict, with the same key as dict1, and all key in dict2 but which key plus max(dict1.keys()) - """ - maxN = np.max(list(qval_dict1.keys())) + 1 + ''' + maxN = np.max( list( qval_dict1.keys() ) ) +1 qval_dict = {} - qval_dict.update(qval_dict1) - for k in list(qval_dict2.keys()): - qval_dict[k + maxN] = qval_dict2[k] + qval_dict.update( qval_dict1 ) + for k in list( qval_dict2.keys() ): + qval_dict[k + maxN ] = qval_dict2[k] return qval_dict - -def update_roi_mask(roi_mask1, roi_mask2): - """Y.G. Dec 31, 2016 +def update_roi_mask( roi_mask1, roi_mask2 ): + ''' Y.G. Dec 31, 2016 Update qval_dict1 with qval_dict2 Input: roi_mask1, 2d-array, label array, same shape as xpcs frame, roi_mask2, 2d-array, label array, same shape as xpcs frame, Output: roi_mask, 2d-array, label array, same shape as xpcs frame, update roi_mask1 with roi_mask2 - """ + ''' roi_mask = roi_mask1.copy() - w = np.where(roi_mask2) - roi_mask[w] = roi_mask2[w] + np.max(roi_mask) + w= np.where( roi_mask2 ) + roi_mask[w] = roi_mask2[w] + np.max( roi_mask ) return roi_mask -def check_bad_uids(uids, mask, img_choice_N=10, bad_uids_index=None): - """Y.G. Dec 22, 2016 - Find bad uids by checking the average intensity by a selection of the number img_choice_N of frames for the uid. If the average intensity is zeros, the uid will be considered as bad uid. - Parameters: - uids: list, a list of uid - mask: array, bool type numpy.array - img_choice_N: random select number of the uid - bad_uids_index: a list of known bad uid list, default is None - Return: - guids: list, good uids - buids, list, bad uids - """ +def check_bad_uids(uids, mask, img_choice_N = 10, bad_uids_index = None ): + '''Y.G. Dec 22, 2016 + Find bad uids by checking the average intensity by a selection of the number img_choice_N of frames for the uid. If the average intensity is zeros, the uid will be considered as bad uid. + Parameters: + uids: list, a list of uid + mask: array, bool type numpy.array + img_choice_N: random select number of the uid + bad_uids_index: a list of known bad uid list, default is None + Return: + guids: list, good uids + buids, list, bad uids + ''' import random - buids = [] - guids = list(uids) - # print( guids ) - if bad_uids_index is None: + guids = list( uids ) + #print( guids ) + if bad_uids_index == None: bad_uids_index = [] for i, uid in enumerate(uids): - # print( i, uid ) + #print( i, uid ) if i not in bad_uids_index: - detector = get_detector(db[uid]) - imgs = load_data(uid, detector) - img_samp_index = random.sample(range(len(imgs)), img_choice_N) - imgsa = apply_mask(imgs, mask) - avg_img = get_avg_img(imgsa, img_samp_index, plot_=False, uid=uid) + detector = get_detector( db[uid ] ) + imgs = load_data( uid, detector ) + img_samp_index = random.sample( range(len(imgs)), img_choice_N) + imgsa = apply_mask( imgs, mask ) + avg_img = get_avg_img( imgsa, img_samp_index, plot_ = False, uid =uid) if avg_img.max() == 0: - buids.append(uid) - guids.pop(list(np.where(np.array(guids) == uid)[0])[0]) - print("The bad uid is: %s" % uid) + buids.append( uid ) + guids.pop( list( np.where( np.array(guids) == uid)[0] )[0] ) + print( 'The bad uid is: %s'%uid ) else: - guids.pop(list(np.where(np.array(guids) == uid)[0])[0]) - buids.append(uid) - print("The bad uid is: %s" % uid) - print("The total and bad uids number are %s and %s, repsectively." % (len(uids), len(buids))) + guids.pop( list( np.where( np.array(guids) == uid)[0] )[0] ) + buids.append( uid ) + print( 'The bad uid is: %s'%uid ) + print( 'The total and bad uids number are %s and %s, repsectively.'%( len(uids), len(buids) ) ) return guids, buids -def find_uids(start_time, stop_time): - """Y.G. Dec 22, 2016 - A wrap funciton to find uids by giving start and end time - Return: - sids: list, scan id - uids: list, uid with 8 character length - fuids: list, uid with full length - """ - hdrs = db(start_time=start_time, stop_time=stop_time) +def find_uids(start_time, stop_time ): + '''Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end time + Return: + sids: list, scan id + uids: list, uid with 8 character length + fuids: list, uid with full length + + ''' + hdrs = db(start_time= start_time, stop_time = stop_time) try: - print("Totally %s uids are found." % (len(list(hdrs)))) + print ('Totally %s uids are found.'%(len(list(hdrs)))) except: pass - sids = [] - uids = [] - fuids = [] + sids=[] + uids=[] + fuids=[] for hdr in hdrs: - s = get_sid_filenames(hdr) - # print (s[1][:8]) - sids.append(s[0]) - uids.append(s[1][:8]) - fuids.append(s[1]) - sids = sids[::-1] - uids = uids[::-1] - fuids = fuids[::-1] + s= get_sid_filenames( hdr) + #print (s[1][:8]) + sids.append( s[0] ) + uids.append( s[1][:8] ) + fuids.append( s[1] ) + sids=sids[::-1] + uids=uids[::-1] + fuids=fuids[::-1] return np.array(sids), np.array(uids), np.array(fuids) -def ployfit(y, x=None, order=20): - """ +def ployfit( y, x=None, order = 20 ): + ''' fit data (one-d array) by a ploynominal function return the fitted one-d array - """ - if x is None: + ''' + if x == None: x = range(len(y)) pol = np.polyfit(x, y, order) return np.polyval(pol, x) - -def check_bad_data_points( - data, - fit=True, - polyfit_order=30, - legend_size=12, - plot=True, - scale=1.0, - good_start=None, - good_end=None, - path=None, - return_ylim=False, -): - """ +def check_bad_data_points( data, fit=True, polyfit_order = 30, legend_size = 12, + plot=True, scale=1.0, good_start=None, good_end=None, path=None, return_ylim=False ): + ''' data: 1D array scale: the scale of deviation fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve - """ - if good_start is None: - good_start = 0 - if good_end is None: - good_end = len(data) + ''' + if good_start == None: + good_start=0 + if good_end == None: + good_end = len( data ) bd1 = [i for i in range(0, good_start)] - bd3 = [i for i in range(good_end, len(data))] + bd3 = [i for i in range(good_end,len( data ) )] d_ = data[good_start:good_end] if fit: - pfit = ployfit(d_, order=polyfit_order) + pfit = ployfit( d_, order = polyfit_order) d = d_ - pfit else: d = d_ pfit = np.ones_like(d) * data.mean() - ymin = d.mean() - scale * d.std() - ymax = d.mean() + scale * d.std() + ymin = d.mean()-scale *d.std() + ymax = d.mean()+scale *d.std() if plot: - fig = plt.figure() - ax = fig.add_subplot(2, 1, 1) - plot1D(d_, ax=ax, color="k", legend="data", legend_size=legend_size) - plot1D(pfit, ax=ax, color="b", legend="ploy-fit", title="Find Bad Points", legend_size=legend_size) - - ax2 = fig.add_subplot(2, 1, 2) - plot1D( - d, - ax=ax2, - legend="difference", - marker="s", - color="b", - ) - - # print('here') - plot1D( - x=[0, len(d_)], - y=[ymin, ymin], - ax=ax2, - ls="--", - lw=3, - marker="o", - color="r", - legend="low_thresh", - legend_size=legend_size, - ) - - plot1D( - x=[0, len(d_)], - y=[ymax, ymax], - ax=ax2, - ls="--", - lw=3, - marker="o", - color="r", - legend="high_thresh", - title="", - legend_size=legend_size, - ) - - if path is not None: - fp = path + "%s" % (uid) + "_find_bad_points" + ".png" - plt.savefig(fp, dpi=fig.dpi) - bd2 = list(np.where(np.abs(d - d.mean()) > scale * d.std())[0] + good_start) + fig = plt.figure( ) + ax = fig.add_subplot(2,1,1 ) + plot1D( d_, ax = ax, color='k', legend='data',legend_size=legend_size ) + plot1D( pfit,ax=ax, color='b', legend='ploy-fit', title='Find Bad Points',legend_size=legend_size ) + + ax2 = fig.add_subplot(2,1,2 ) + plot1D( d, ax = ax2,legend='difference',marker='s', color='b', ) + + #print('here') + plot1D(x=[0,len(d_)], y=[ymin,ymin], ax = ax2, ls='--',lw= 3, marker='o', color='r', legend='low_thresh', legend_size=legend_size ) + + plot1D(x=[0,len(d_)], y=[ymax,ymax], ax = ax2 , ls='--', lw= 3,marker='o', color='r',legend='high_thresh',title='',legend_size=legend_size ) + + if path != None: + fp = path + '%s'%( uid ) + '_find_bad_points' + '.png' + plt.savefig( fp, dpi=fig.dpi) + bd2= list( np.where( np.abs(d -d.mean()) > scale *d.std() )[0] + good_start ) if return_ylim: - return np.array(bd1 + bd2 + bd3), ymin, ymax, pfit + return np.array( bd1 + bd2 + bd3 ), ymin, ymax,pfit else: - return np.array(bd1 + bd2 + bd3), pfit - - -def get_bad_frame_list( - imgsum, - fit=True, - polyfit_order=30, - legend_size=12, - plot=True, - scale=1.0, - good_start=None, - good_end=None, - uid="uid", - path=None, - return_ylim=False, -): - """ + return np.array( bd1 + bd2 + bd3 ), pfit + + + + +def get_bad_frame_list( imgsum, fit=True, polyfit_order = 30,legend_size = 12, + plot=True, scale=1.0, good_start=None, good_end=None, uid='uid',path=None, + + return_ylim=False): + ''' imgsum: the sum intensity of a time series scale: the scale of deviation fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve - """ - if good_start is None: - good_start = 0 - if good_end is None: - good_end = len(imgsum) + ''' + if good_start == None: + good_start=0 + if good_end == None: + good_end = len( imgsum ) bd1 = [i for i in range(0, good_start)] - bd3 = [i for i in range(good_end, len(imgsum))] + bd3 = [i for i in range(good_end,len( imgsum ) )] imgsum_ = imgsum[good_start:good_end] if fit: - pfit = ployfit(imgsum_, order=polyfit_order) + pfit = ployfit( imgsum_, order = polyfit_order) data = imgsum_ - pfit else: data = imgsum_ pfit = np.ones_like(data) * data.mean() - ymin = data.mean() - scale * data.std() - ymax = data.mean() + scale * data.std() + ymin = data.mean()-scale *data.std() + ymax = data.mean()+scale *data.std() if plot: - fig = plt.figure() - ax = fig.add_subplot(2, 1, 1) - plot1D(imgsum_, ax=ax, color="k", legend="data", legend_size=legend_size) - plot1D(pfit, ax=ax, color="b", legend="ploy-fit", title=uid + "_imgsum", legend_size=legend_size) - - ax2 = fig.add_subplot(2, 1, 2) - plot1D( - data, - ax=ax2, - legend="difference", - marker="s", - color="b", - ) - - # print('here') - plot1D( - x=[0, len(imgsum_)], - y=[ymin, ymin], - ax=ax2, - ls="--", - lw=3, - marker="o", - color="r", - legend="low_thresh", - legend_size=legend_size, - ) - - plot1D( - x=[0, len(imgsum_)], - y=[ymax, ymax], - ax=ax2, - ls="--", - lw=3, - marker="o", - color="r", - legend="high_thresh", - title="imgsum_to_find_bad_frame", - legend_size=legend_size, - ) - - if path is not None: - fp = path + "%s" % (uid) + "_imgsum_analysis" + ".png" - plt.savefig(fp, dpi=fig.dpi) - - bd2 = list(np.where(np.abs(data - data.mean()) > scale * data.std())[0] + good_start) + fig = plt.figure( ) + ax = fig.add_subplot(2,1,1 ) + plot1D( imgsum_, ax = ax, color='k', legend='data',legend_size=legend_size ) + plot1D( pfit,ax=ax, color='b', legend='ploy-fit', title=uid + '_imgsum',legend_size=legend_size ) + + ax2 = fig.add_subplot(2,1,2 ) + plot1D( data, ax = ax2,legend='difference',marker='s', color='b', ) + + #print('here') + plot1D(x=[0,len(imgsum_)], y=[ymin,ymin], ax = ax2, ls='--',lw= 3, marker='o', color='r', legend='low_thresh', legend_size=legend_size ) + + plot1D(x=[0,len(imgsum_)], y=[ymax,ymax], ax = ax2 , ls='--', lw= 3,marker='o', color='r',legend='high_thresh',title='imgsum_to_find_bad_frame',legend_size=legend_size ) + + if path != None: + fp = path + '%s'%( uid ) + '_imgsum_analysis' + '.png' + plt.savefig( fp, dpi=fig.dpi) + + + + bd2= list( np.where( np.abs(data -data.mean()) > scale *data.std() )[0] + good_start ) if return_ylim: - return np.array(bd1 + bd2 + bd3), ymin, ymax + return np.array( bd1 + bd2 + bd3 ), ymin, ymax else: - return np.array(bd1 + bd2 + bd3) - + return np.array( bd1 + bd2 + bd3 ) -def save_dict_csv(mydict, filename, mode="w"): +def save_dict_csv( mydict, filename, mode='w'): import csv - with open(filename, mode) as csv_file: spamwriter = csv.writer(csv_file) for key, value in mydict.items(): spamwriter.writerow([key, value]) -def read_dict_csv(filename): - import csv - with open(filename, "r") as csv_file: +def read_dict_csv( filename ): + import csv + with open(filename, 'r') as csv_file: reader = csv.reader(csv_file) mydict = dict(reader) return mydict -def find_bad_pixels(FD, bad_frame_list, uid="uid"): +def find_bad_pixels( FD, bad_frame_list, uid='uid'): bpx = [] - bpy = [] + bpy=[] for n in bad_frame_list: - if n >= FD.beg and n <= FD.end: + if n>= FD.beg and n<=FD.end: f = FD.rdframe(n) - w = np.where(f == f.max()) - if len(w[0]) == 1: - bpx.append(w[0][0]) - bpy.append(w[1][0]) + w = np.where( f == f.max()) + if len(w[0])==1: + bpx.append( w[0][0] ) + bpy.append( w[1][0] ) + + + return trans_data_to_pd( [bpx,bpy], label=[ uid+'_x', uid +'_y' ], dtype='list') + + + - return trans_data_to_pd([bpx, bpy], label=[uid + "_x", uid + "_y"], dtype="list") +def mask_exclude_badpixel( bp, mask, uid ): -def mask_exclude_badpixel(bp, mask, uid): - for i in range(len(bp)): - mask[int(bp[bp.columns[0]][i]), int(bp[bp.columns[1]][i])] = 0 + for i in range( len(bp)): + mask[ int( bp[bp.columns[0]][i] ), int( bp[bp.columns[1]][i] )]=0 return mask -def print_dict(dicts, keys=None): - """ + +def print_dict( dicts, keys=None): + ''' print keys: values in a dicts if keys is None: print all the keys - """ - if keys is None: - keys = list(dicts.keys()) + ''' + if keys == None: + keys = list( dicts.keys()) for k in keys: try: - print("%s--> %s" % (k, dicts[k])) + print('%s--> %s'%(k, dicts[k]) ) except: pass - -def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): - """ +def get_meta_data( uid, default_dec = 'eiger', *argv,**kwargs ): + ''' Jan 25, 2018 add default_dec opt Y.G. Dev Dec 8, 2016 @@ -2981,68 +2756,65 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): filename: the full path of the data start_time: the data acquisition starting time in a human readable manner And all the input metadata - """ + ''' - if "verbose" in kwargs.keys(): # added: option to suppress output - verbose = kwargs["verbose"] + if 'verbose' in kwargs.keys(): # added: option to suppress output + verbose= kwargs['verbose'] else: - verbose = True + verbose=True import time - header = db[uid] - md = {} + md ={} - md["suid"] = uid # short uid + md['suid'] = uid #short uid try: - md["filename"] = get_sid_filenames(header)[2][0] + md['filename'] = get_sid_filenames(header)[2][0] except: - md["filename"] = "N.A." + md['filename'] = 'N.A.' - devices = sorted(list(header.devices())) + devices = sorted( list(header.devices()) ) if len(devices) > 1: if verbose: # added: mute output - print( - "More than one device. This would have unintented consequences.Currently, only the device contains 'default_dec=%s'." - % default_dec - ) - # raise ValueError("More than one device. This would have unintented consequences.") + print( "More than one device. This would have unintented consequences.Currently, only the device contains 'default_dec=%s'."%default_dec) + #raise ValueError("More than one device. This would have unintented consequences.") dec = devices[0] for dec_ in devices: if default_dec in dec_: dec = dec_ - # print(dec) - # detector_names = sorted( header.start['detectors'] ) - detector_names = sorted(get_detectors(db[uid])) - # if len(detector_names) > 1: + #print(dec) + #detector_names = sorted( header.start['detectors'] ) + detector_names = sorted( get_detectors(db[uid]) ) + #if len(detector_names) > 1: # raise ValueError("More than one det. This would have unintented consequences.") detector_name = detector_names[0] - # md['detector'] = detector_name - md["detector"] = get_detector(header) - # print( md['detector'] ) - new_dict = header.config_data(dec)["primary"][0] + #md['detector'] = detector_name + md['detector'] = get_detector( header ) + #print( md['detector'] ) + new_dict = header.config_data(dec)['primary'][0] for key, val in new_dict.items(): - newkey = key.replace(detector_name + "_", "") + newkey = key.replace(detector_name+"_", "") md[newkey] = val # for k,v in ev['descriptor']['configuration'][dec]['data'].items(): # md[ k[len(dec)+1:] ]= v try: - md.update(header.start["plan_args"].items()) - md.pop("plan_args") + md.update(header.start['plan_args'].items()) + md.pop('plan_args') except: pass md.update(header.start.items()) + # print(header.start.time) - md["start_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(header.start["time"])) - md["stop_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(header.stop["time"])) - try: # added: try to handle runs that don't contain image data + md['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(header.start['time'])) + md['stop_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime( header.stop['time'])) + try: # added: try to handle runs that don't contain image data if "primary" in header.v2: descriptor = header.v2["primary"].descriptors[0] - md["img_shape"] = descriptor["data_keys"][md["detector"]]["shape"][:2][::-1] + md['img_shape'] = descriptor['data_keys'][md['detector']]['shape'][:2][::-1] except: if verbose: print("couldn't find image shape...skip!") @@ -3050,14 +2822,15 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): pass md.update(kwargs) - # for k, v in sorted(md.items()): - # ... + #for k, v in sorted(md.items()): + # ... # print(f'{k}: {v}') return md -def get_max_countc(FD, labeled_array): + +def get_max_countc(FD, labeled_array ): """YG. 2016, Nov 18 Compute the max intensity of ROIs in the compressed file (FD) @@ -3080,29 +2853,27 @@ def get_max_countc(FD, labeled_array): The labels for each element of the `mean_intensity` list """ - qind, pixelist = roi.extract_label_indices(labeled_array) - timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) - timg[pixelist] = np.arange(1, len(pixelist) + 1) + qind, pixelist = roi.extract_label_indices( labeled_array ) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) - if labeled_array.shape != (FD.md["ncols"], FD.md["nrows"]): + if labeled_array.shape != ( FD.md['ncols'],FD.md['nrows']): raise ValueError( - " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" - % (FD.md["ncols"], FD.md["nrows"], labeled_array.shape[0], labeled_array.shape[1]) - ) + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( FD.md['ncols'],FD.md['nrows'], labeled_array.shape[0], labeled_array.shape[1]) ) - max_inten = 0 - for i in tqdm(range(FD.beg, FD.end, 1), desc="Get max intensity of ROIs in all frames"): + max_inten =0 + for i in tqdm(range( FD.beg, FD.end, 1 ), desc= 'Get max intensity of ROIs in all frames' ): try: - (p, v) = FD.rdrawframe(i) - w = np.where(timg[p])[0] - max_inten = max(max_inten, np.max(v[w])) + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + max_inten = max( max_inten, np.max(v[w]) ) except: pass return max_inten -def create_polygon_mask(image, xcorners, ycorners): - """ +def create_polygon_mask( image, xcorners, ycorners ): + ''' Give image and x/y coners to create a polygon mask image: 2d array xcorners, list, points of x coners @@ -3113,19 +2884,18 @@ def create_polygon_mask(image, xcorners, ycorners): Example: - """ - from skimage.draw import disk, line, line_aa, polygon - + ''' + from skimage.draw import line_aa, line, polygon, disk imy, imx = image.shape - bst_mask = np.zeros_like(image, dtype=bool) - rr, cc = polygon(ycorners, xcorners, shape=image.shape) - bst_mask[rr, cc] = 1 - # full_mask= ~bst_mask + bst_mask = np.zeros_like( image , dtype = bool) + rr, cc = polygon( ycorners,xcorners,shape = image.shape) + bst_mask[rr,cc] =1 + #full_mask= ~bst_mask return bst_mask -def create_rectangle_mask(image, xcorners, ycorners): - """ +def create_rectangle_mask( image, xcorners, ycorners ): + ''' Give image and x/y coners to create a rectangle mask image: 2d array xcorners, list, points of x coners @@ -3136,19 +2906,18 @@ def create_rectangle_mask(image, xcorners, ycorners): Example: - """ - from skimage.draw import disk, line, line_aa, polygon - + ''' + from skimage.draw import line_aa, line, polygon, disk imy, imx = image.shape - bst_mask = np.zeros_like(image, dtype=bool) - rr, cc = polygon(ycorners, xcorners, shape=image.shape) - bst_mask[rr, cc] = 1 - # full_mask= ~bst_mask + bst_mask = np.zeros_like( image , dtype = bool) + rr, cc = polygon( ycorners,xcorners,shape = image.shape) + bst_mask[rr,cc] =1 + #full_mask= ~bst_mask return bst_mask -def create_multi_rotated_rectangle_mask(image, center=None, length=100, width=50, angles=[0]): - """Developed at July 10, 2017 by Y.G.@CHX, NSLS2 +def create_multi_rotated_rectangle_mask( image, center=None, length=100, width=50, angles=[0] ): + ''' Developed at July 10, 2017 by Y.G.@CHX, NSLS2 Create multi rectangle-shaped mask by rotating a rectangle with a list of angles The original rectangle is defined by four corners, i.e., [ (center[1] - width//2, center[0]), @@ -3166,59 +2935,57 @@ def create_multi_rotated_rectangle_mask(image, center=None, length=100, width=50 Return: mask: 2D bool-type numpy array - """ + ''' - from skimage.draw import polygon + from skimage.draw import polygon from skimage.transform import rotate - - cx, cy = center + cx,cy = center imy, imx = image.shape - mask = np.zeros(image.shape, dtype=bool) - wy = length - wx = width - x = np.array([max(0, cx - wx // 2), min(imx, cx + wx // 2), min(imx, cx + wx // 2), max(0, cx - wx // 2)]) - y = np.array([cy, cy, min(imy, cy + wy), min(imy, cy + wy)]) - rr, cc = polygon(y, x, shape=image.shape) - mask[rr, cc] = 1 - mask_rot = np.zeros(image.shape, dtype=bool) + mask = np.zeros( image.shape, dtype = bool) + wy = length + wx = width + x = np.array( [ max(0, cx - wx//2), min(imx, cx+wx//2), min(imx, cx+wx//2), max(0,cx-wx//2 ) ]) + y = np.array( [ cy, cy, min( imy, cy + wy) , min(imy, cy + wy) ]) + rr, cc = polygon( y,x, shape = image.shape) + mask[rr,cc] =1 + mask_rot= np.zeros( image.shape, dtype = bool) for angle in angles: - mask_rot += np.array(rotate(mask, angle, center=center), dtype=bool) # , preserve_range=True) - return ~mask_rot - + mask_rot += np.array( rotate( mask, angle, center= center ), dtype=bool) #, preserve_range=True) + return ~mask_rot -def create_wedge(image, center, radius, wcors, acute_angle=True): - """YG develop at June 18, 2017, @CHX - Create a wedge by a combination of disk and a triangle defined by center and wcors - wcors: [ [x1,x2,x3...], [y1,y2,y3..] - - """ - from skimage.draw import disk, line, line_aa, polygon +def create_wedge( image, center, radius, wcors, acute_angle=True) : + '''YG develop at June 18, 2017, @CHX + Create a wedge by a combination of disk and a triangle defined by center and wcors + wcors: [ [x1,x2,x3...], [y1,y2,y3..] + ''' + from skimage.draw import line_aa, line, polygon, disk imy, imx = image.shape - cy, cx = center - x = [cx] + list(wcors[0]) - y = [cy] + list(wcors[1]) - - maskc = np.zeros_like(image, dtype=bool) - rr, cc = disk((cy, cx), radius, shape=image.shape) - maskc[rr, cc] = 1 - - maskp = np.zeros_like(image, dtype=bool) - x = np.array(x) - y = np.array(y) - print(x, y) - rr, cc = polygon(y, x, shape=image.shape) - maskp[rr, cc] = 1 + cy,cx = center + x = [cx] + list(wcors[0]) + y = [cy] + list(wcors[1]) + + maskc = np.zeros_like( image , dtype = bool) + rr, cc = disk((cy, cx), radius, shape = image.shape) + maskc[rr,cc] =1 + + maskp = np.zeros_like( image , dtype = bool) + x = np.array( x ) + y = np.array( y ) + print(x,y) + rr, cc = polygon( y,x, shape = image.shape) + maskp[rr,cc] =1 if acute_angle: - return maskc * maskp + return maskc*maskp else: - return maskc * ~maskp + return maskc*~maskp -def create_cross_mask( - image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4, center_disk=True, center_radius=10 -): - """ + +def create_cross_mask( image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4, + center_disk = True, center_radius=10 + ): + ''' Give image and the beam center to create a cross-shaped mask wy_left: the width of left h-line wy_right: the width of rigth h-line @@ -3228,66 +2995,69 @@ def create_cross_mask( Return: the cross mask - """ - from skimage.draw import disk, line, line_aa, polygon + ''' + from skimage.draw import line_aa, line, polygon, disk imy, imx = image.shape - cx, cy = center - bst_mask = np.zeros_like(image, dtype=bool) + cx,cy = center + bst_mask = np.zeros_like( image , dtype = bool) ### - # for right part + #for right part wy = wy_right - x = np.array([cx, imx, imx, cx]) - y = np.array([cy - wy, cy - wy, cy + wy, cy + wy]) - rr, cc = polygon(y, x, shape=image.shape) - bst_mask[rr, cc] = 1 + x = np.array( [ cx, imx, imx, cx ]) + y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy]) + rr, cc = polygon( y,x, shape = image.shape) + bst_mask[rr,cc] =1 ### - # for left part + #for left part wy = wy_left - x = np.array([0, cx, cx, 0]) - y = np.array([cy - wy, cy - wy, cy + wy, cy + wy]) - rr, cc = polygon(y, x, shape=image.shape) - bst_mask[rr, cc] = 1 + x = np.array( [0, cx, cx,0 ]) + y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy]) + rr, cc = polygon( y,x, shape = image.shape) + bst_mask[rr,cc] =1 ### - # for up part + #for up part wx = wx_up - x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) - y = np.array([cy, cy, imy, imy]) - rr, cc = polygon(y, x, shape=image.shape) - bst_mask[rr, cc] = 1 + x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) + y = np.array( [ cy, cy, imy, imy]) + rr, cc = polygon( y,x, shape = image.shape) + bst_mask[rr,cc] =1 ### - # for low part + #for low part wx = wx_down - x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) - y = np.array([0, 0, cy, cy]) - rr, cc = polygon(y, x, shape=image.shape) - bst_mask[rr, cc] = 1 + x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) + y = np.array( [ 0,0, cy, cy]) + rr, cc = polygon( y,x, shape = image.shape) + bst_mask[rr,cc] =1 + + if center_radius!=0: + rr, cc = disk((cy, cx), center_radius, shape = bst_mask.shape) + bst_mask[rr,cc] =1 - if center_radius != 0: - rr, cc = disk((cy, cx), center_radius, shape=bst_mask.shape) - bst_mask[rr, cc] = 1 - full_mask = ~bst_mask + full_mask= ~bst_mask return full_mask -def generate_edge(centers, width): - """YG. 10/14/2016 - give centers and width (number or list) to get edges""" - edges = np.zeros([len(centers), 2]) - edges[:, 0] = centers - width - edges[:, 1] = centers + width + + + +def generate_edge( centers, width): + '''YG. 10/14/2016 + give centers and width (number or list) to get edges''' + edges = np.zeros( [ len(centers),2]) + edges[:,0] = centers - width + edges[:,1] = centers + width return edges -def export_scan_scalar( - uid, x="dcm_b", y=["xray_eye1_stats1_total"], path="/XF11ID/analysis/2016_3/commissioning/Results/" -): - """YG. 10/17/2016 +def export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'], + path='/XF11ID/analysis/2016_3/commissioning/Results/' ): + '''YG. 10/17/2016 export uid data to a txt file uid: unique scan id x: the x-col @@ -3299,73 +3069,72 @@ def export_scan_scalar( A plot for the data: d.plot(x='dcm_b', y = 'xray_eye1_stats1_total', marker='o', ls='-', color='r') - """ + ''' from databroker import DataBroker as db - - from pyCHX.chx_generic_functions import trans_data_to_pd + from pyCHX.chx_generic_functions import trans_data_to_pd hdr = db[uid] print(hdr.fields()) data = db[uid].table() xp = data[x] - datap = np.zeros([len(xp), len(y) + 1]) - datap[:, 0] = xp + datap = np.zeros( [len(xp), len(y)+1]) + datap[:,0] = xp for i, yi in enumerate(y): - datap[:, i + 1] = data[yi] + datap[:,i+1] = data[yi] - datap = trans_data_to_pd(datap, label=[x] + [yi for yi in y]) - datap.to_csv(path + "uid=%s.csv" % uid) + datap = trans_data_to_pd( datap, label=[x] + [yi for yi in y]) + datap.to_csv( path + 'uid=%s.csv'%uid) return datap -##### -# load data by databroker -def get_flatfield(uid, reverse=False): - import h5py +##### +#load data by databroker - detector = get_detector(db[uid]) +def get_flatfield( uid, reverse=False ): + import h5py + detector = get_detector( db[uid ] ) sud = get_sid_filenames(db[uid]) - master_path = "%s_master.h5" % (sud[2][0]) - print(master_path) - f = h5py.File(master_path, "r") - k = "entry/instrument/detector/detectorSpecific/" # data_collection_date' - d = np.array(f[k]["flatfield"]) + master_path = '%s_master.h5'%(sud[2][0]) + print( master_path) + f= h5py.File(master_path, 'r') + k= 'entry/instrument/detector/detectorSpecific/' #data_collection_date' + d= np.array( f[ k]['flatfield'] ) f.close() if reverse: - d = reverse_updown(d) + d = reverse_updown( d ) return d -def get_detector(header): - """Get the first detector image string by giving header""" + +def get_detector( header ): + '''Get the first detector image string by giving header ''' keys = get_detectors(header) for k in keys: - if "eiger" in k: + if 'eiger' in k: return k - -def get_detectors(header): - """Get all the detector image strings by giving header""" +def get_detectors( header ): + '''Get all the detector image strings by giving header ''' if "primary" in header.v2: descriptor = header.v2["primary"].descriptors[0] - keys = [k for k, v in descriptor["data_keys"].items() if "external" in v] + keys = [k for k, v in descriptor['data_keys'].items() if 'external' in v] return sorted(set(keys)) - return [] - + return [] -def get_full_data_path(uid): - """A dirty way to get full data path""" +def get_full_data_path( uid ): + '''A dirty way to get full data path''' header = db[uid] d = header.db - s = list(d.get_documents(db[uid])) - # print(s[2]) - p = s[2][1]["resource_path"] - p2 = s[3][1]["datum_kwargs"]["seq_id"] - # print(p,p2) - return p + "_" + str(p2) + "_master.h5" + s = list(d.get_documents( db[uid ])) + #print(s[2]) + p = s[2][1]['resource_path'] + p2 = s[3][1]['datum_kwargs']['seq_id'] + #print(p,p2) + return p + '_' + str(p2) + '_master.h5' + def get_sid_filenames(header): @@ -3402,18 +3171,45 @@ def get_sid_filenames(header): for datum in event_model.unpack_datum_page(doc): datums[datum["resource"]].append(datum) for resource_uid, resource in resources.items(): - file_prefix = Path(resource.get("root", "/"), resource["resource_path"]) - if "eiger" not in resource["spec"].lower(): + file_prefix = Path(resource.get('root', '/'), resource["resource_path"]) + if 'eiger' not in resource['spec'].lower(): continue for datum in datums[resource_uid]: dm_kw = datum["datum_kwargs"] - seq_id = dm_kw["seq_id"] - new_filepaths = glob(f"{file_prefix!s}_{seq_id}*") + seq_id = dm_kw['seq_id'] + new_filepaths = glob(f'{file_prefix!s}_{seq_id}*') filepaths.extend(new_filepaths) - return header.start["scan_id"], header.start["uid"], filepaths - + return header.start['scan_id'], header.start['uid'], filepaths + +def load_dask_data(uid,detector,reverse=False,rot90=False): + """ + load data as dask-array + get image md (direct beam, wavelength, sample-detector distance,...) from databroker documents (no need to read an actual image) + load_dask_data(uid,detector,reverse=False,rot90=False) + returns detector_images(dask-array), image_md + LW 04/26/2024 + """ + import dask + hdr=db[uid] + det=detector.split('_image')[0] + # collect image metadata + img_md_dict={'detector_distance':'det_distance','incident_wavelength':'wavelength','frame_time':'cam_acquire_period','count_time':'cam_acquire_time','num_images':'cam_num_images','beam_center_x':'beam_center_x','beam_center_y':'beam_center_y'} + img_md={} + for k in list(img_md_dict.keys()): + img_md[k]=hdr.config_data(det)['primary'][0]['%s_%s'%(det,img_md_dict[k])] + if md['detector'] in ['eiger4m_single_image','eiger1m_single_image','eiger500K_single_image']: + img_md.update({'y_pixel_size': 7.5e-05, 'x_pixel_size': 7.5e-05}) + else: img_md.update({'y_pixel_size': None, 'x_pixel_size': None}) + # load image data as dask-arry: + #raise Exception('this was supposed to break!') + dimg=hdr.xarray_dask()[md['detector']][0] + if reverse: + dimg=dask.array.flip(dimg,axis=(0,1)) + if rot90: + dimg=dask.array.rot90(dimg,axes=(1,2)) + return dimg,img_md -def load_data(uid, detector="eiger4m_single_image", fill=True, reverse=False, rot90=False): +def load_data(uid, detector='eiger4m_single_image', fill=True, reverse=False, rot90=False): """load bluesky scan data by giveing uid and detector Parameters @@ -3438,11 +3234,11 @@ def load_data(uid, detector="eiger4m_single_image", fill=True, reverse=False, ro ATTEMPTS = 0 for attempt in range(ATTEMPTS): try: - (ev,) = hdr.events(fields=[detector], fill=fill) + ev, = hdr.events(fields=[detector], fill=fill) break except Exception: - print("Trying again ...!") + print ('Trying again ...!') if attempt == ATTEMPTS - 1: # We're out of attempts. Raise the exception to help with debugging. raise @@ -3453,51 +3249,54 @@ def load_data(uid, detector="eiger4m_single_image", fill=True, reverse=False, ro # TODO(mrakitin): replace with the lazy loader (when it's implemented): imgs = list(hdr.data(detector)) - if len(imgs[0]) >= 1: + if len(imgs[0])>=1: md = imgs[0].md imgs = pims.pipeline(lambda img: img)(imgs[0]) imgs.md = md if reverse: md = imgs.md - imgs = reverse_updown(imgs) # Why not np.flipud? + imgs = reverse_updown( imgs ) # Why not np.flipud? imgs.md = md if rot90: md = imgs.md - imgs = rot90_clockwise(imgs) # Why not np.flipud? + imgs = rot90_clockwise( imgs ) # Why not np.flipud? imgs.md = md return imgs -def mask_badpixels(mask, detector): - """ +def mask_badpixels( mask, detector ): + ''' Mask known bad pixel from the giveing mask - """ - if detector == "eiger1m_single_image": - # to be determined + ''' + if detector =='eiger1m_single_image': + #to be determined mask = mask - elif detector == "eiger4m_single_image" or detector == "image": - mask[513:552, :] = 0 - mask[1064:1103, :] = 0 - mask[1615:1654, :] = 0 - mask[:, 1029:1041] = 0 - mask[:, 0] = 0 - mask[0:, 2069] = 0 - mask[0] = 0 - mask[2166] = 0 - - elif detector == "eiger500K_single_image": - # to be determined + elif detector =='eiger4m_single_image' or detector == 'image': + mask[513:552,:] =0 + mask[1064:1103,:] =0 + mask[1615:1654,:] =0 + mask[:,1029:1041] = 0 + mask[:, 0] =0 + mask[0:, 2069] =0 + mask[0] =0 + mask[2166] =0 + + elif detector =='eiger500K_single_image': + #to be determined mask = mask else: mask = mask return mask -def load_data2(uid, detector="eiger4m_single_image"): + + + +def load_data2( uid , detector = 'eiger4m_single_image' ): """load bluesky scan data by giveing uid and detector Parameters @@ -3515,52 +3314,54 @@ def load_data2(uid, detector="eiger4m_single_image"): md = imgs.md """ hdr = db[uid] - flag = 1 - while flag < 4 and flag != 0: + flag =1 + while flag<4 and flag !=0: try: - (ev,) = hdr.events(fields=[detector]) - flag = 0 + ev, = hdr.events(fields=[detector]) + flag =0 except: flag += 1 - print("Trying again ...!") + print ('Trying again ...!') if flag: - print("Can't Load Data!") - uid = "00000" # in case of failling load data + print ("Can't Load Data!") + uid = '00000' #in case of failling load data imgs = 0 else: - imgs = ev["data"][detector] + imgs = ev['data'][detector] - # print (imgs) + #print (imgs) return imgs -def psave_obj(obj, filename): - """save an object with filename by pickle.dump method + +def psave_obj(obj, filename ): + '''save an object with filename by pickle.dump method This function automatically add '.pkl' as filename extension Input: obj: the object to be saved filename: filename (with full path) to be saved Return: None - """ - with open(filename + ".pkl", "wb") as f: + ''' + with open( filename + '.pkl', 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) - -def pload_obj(filename): - """load a pickled filename +def pload_obj(filename ): + '''load a pickled filename This function automatically add '.pkl' to filename extension Input: filename: filename (with full path) to be saved Return: load the object by pickle.load method - """ - with open(filename + ".pkl", "rb") as f: + ''' + with open( filename + '.pkl', 'rb') as f: return pickle.load(f) -def load_mask(path, mask_name, plot_=False, reverse=False, rot90=False, *argv, **kwargs): + +def load_mask( path, mask_name, plot_ = False, reverse=False, rot90=False, *argv,**kwargs): + """load a mask file the mask is a numpy binary file (.npy) @@ -3579,132 +3380,103 @@ def load_mask(path, mask_name, plot_=False, reverse=False, rot90=False, *argv, * mask = load_mask( path, mask_name, plot_ = True ) """ - mask = np.load(path + mask_name) - mask = np.array(mask, dtype=np.int32) + mask = np.load( path + mask_name ) + mask = np.array(mask, dtype = np.int32) if reverse: - mask = mask[::-1, :] + mask = mask[::-1,:] if rot90: - mask = np.rot90(mask) + mask = np.rot90( mask ) if plot_: - show_img(mask, *argv, **kwargs) + show_img( mask, *argv,**kwargs) return mask -def create_hot_pixel_mask(img, threshold, center=None, center_radius=300, outer_radius=0): - """create a hot pixel mask by giving threshold - Input: - img: the image to create hot pixel mask - threshold: the threshold above which will be considered as hot pixels - center: optional, default=None - else, as a two-element list (beam center), i.e., [center_x, center_y] - if center is not None, the hot pixel will not include a disk region - which is defined by center and center_radius ( in unit of pixel) - Output: - a bool types numpy array (mask), 1 is good and 0 is excluded - """ - bst_mask = np.ones_like(img, dtype=bool) - if center is not None: - from skimage.draw import disk +def create_hot_pixel_mask(img, threshold, center=None, center_radius=300, outer_radius=0 ): + '''create a hot pixel mask by giving threshold + Input: + img: the image to create hot pixel mask + threshold: the threshold above which will be considered as hot pixels + center: optional, default=None + else, as a two-element list (beam center), i.e., [center_x, center_y] + if center is not None, the hot pixel will not include a disk region + which is defined by center and center_radius ( in unit of pixel) + Output: + a bool types numpy array (mask), 1 is good and 0 is excluded + ''' + bst_mask = np.ones_like( img , dtype = bool) + if center != None: + from skimage.draw import disk imy, imx = img.shape - cy, cx = center - rr, cc = disk((cy, cx), center_radius, shape=img.shape) - bst_mask[rr, cc] = 0 + cy,cx = center + rr, cc = disk((cy, cx), center_radius,shape=img.shape ) + bst_mask[rr,cc] =0 if outer_radius: - bst_mask = np.zeros_like(img, dtype=bool) - rr2, cc2 = disk((cy, cx), outer_radius, shape=img.shape) - bst_mask[rr2, cc2] = 1 - bst_mask[rr, cc] = 0 - hmask = np.ones_like(img) - hmask[np.where(img * bst_mask > threshold)] = 0 + bst_mask = np.zeros_like( img , dtype = bool) + rr2, cc2 = disk((cy, cx), outer_radius,shape=img.shape ) + bst_mask[rr2,cc2] =1 + bst_mask[rr,cc] =0 + hmask = np.ones_like( img ) + hmask[np.where( img * bst_mask > threshold)]=0 return hmask -def apply_mask(imgs, mask): - """apply mask to imgs to produce a generator + + +def apply_mask( imgs, mask): + '''apply mask to imgs to produce a generator Usuages: imgsa = apply_mask( imgs, mask ) good_series = apply_mask( imgs[good_start:], mask ) - """ + ''' return pims.pipeline(lambda img: np.int_(mask) * img)(imgs) # lazily apply mask -def reverse_updown(imgs): - """reverse imgs upside down to produce a generator +def reverse_updown( imgs): + '''reverse imgs upside down to produce a generator Usuages: imgsr = reverse_updown( imgs) - """ - return pims.pipeline(lambda img: img[::-1, :])(imgs) # lazily apply mask - + ''' + return pims.pipeline(lambda img: img[::-1,:])(imgs) # lazily apply mask -def rot90_clockwise(imgs): - """reverse imgs upside down to produce a generator +def rot90_clockwise( imgs): + '''reverse imgs upside down to produce a generator Usuages: imgsr = rot90_clockwise( imgs) - """ - return pims.pipeline(lambda img: np.rot90(img))(imgs) # lazily apply mask - + ''' + return pims.pipeline(lambda img: np.rot90(img) )(imgs) # lazily apply mask -def RemoveHot(img, threshold=1e7, plot_=True): - """Remove hot pixel from img""" +def RemoveHot( img,threshold= 1E7, plot_=True ): + '''Remove hot pixel from img''' - mask = np.ones_like(np.array(img)) - badp = np.where(np.array(img) >= threshold) - if len(badp[0]) != 0: + mask = np.ones_like( np.array( img ) ) + badp = np.where( np.array(img) >= threshold ) + if len(badp[0])!=0: mask[badp] = 0 if plot_: - show_img(mask) + show_img( mask ) return mask ############ ###plot data - -def show_img( - image, - ax=None, - label_array=None, - alpha=0.5, - interpolation="nearest", - xlim=None, - ylim=None, - save=False, - image_name=None, - path=None, - aspect=None, - logs=False, - vmin=None, - vmax=None, - return_fig=False, - cmap="viridis", - show_time=False, - file_name=None, - ylabel=None, - xlabel=None, - extent=None, - show_colorbar=True, - tight=True, - show_ticks=True, - save_format="png", - dpi=None, - center=None, - origin="lower", - lab_fontsize=16, - tick_size=12, - colorbar_fontsize=8, - use_mat_imshow=False, - *argv, - **kwargs, -): +def show_img( image, ax=None,label_array=None, alpha=0.5, interpolation='nearest', + xlim=None, ylim=None, save=False,image_name=None,path=None, + aspect=None, logs=False,vmin=None,vmax=None,return_fig=False,cmap='viridis', + show_time= False, file_name =None, ylabel=None, xlabel=None, extent=None, + show_colorbar=True, tight=True, show_ticks=True, save_format = 'png', dpi= None, + center=None,origin='lower', lab_fontsize = 16, tick_size = 12, colorbar_fontsize = 8, + use_mat_imshow=False, + *argv,**kwargs ): """YG. Sep26, 2017 Add label_array/alpha option to show a mask on top of image a simple function to show image by using matplotlib.plt imshow @@ -3718,119 +3490,88 @@ def show_img( ------- None """ - if ax is None: + if ax == None: if RUN_GUI: fig = Figure() ax = fig.add_subplot(111) else: fig, ax = plt.subplots() else: - fig, ax = ax + fig, ax=ax - if center is not None: - plot1D(center[1], center[0], ax=ax, c="b", m="o", legend="") + + if center != None: + plot1D(center[1],center[0],ax=ax, c='b', m='o', legend='') if not logs: if not use_mat_imshow: - im = imshow( - ax, - image, - origin=origin, - cmap=cmap, - interpolation=interpolation, - vmin=vmin, - vmax=vmax, - extent=extent, - ) # vmin=0,vmax=1, + im=imshow(ax, image, origin=origin,cmap=cmap,interpolation=interpolation, vmin=vmin,vmax=vmax, + extent=extent) #vmin=0,vmax=1, else: - im = ax.imshow( - image, origin=origin, cmap=cmap, interpolation=interpolation, vmin=vmin, vmax=vmax, extent=extent - ) # vmin=0,vmax=1, + im=ax.imshow( image, origin=origin,cmap=cmap,interpolation=interpolation, vmin=vmin,vmax=vmax, + extent=extent) #vmin=0,vmax=1, else: if not use_mat_imshow: - im = imshow( - ax, - image, - origin=origin, - cmap=cmap, - interpolation=interpolation, - norm=LogNorm(vmin, vmax), - extent=extent, - ) + im=imshow(ax, image, origin=origin,cmap=cmap, + interpolation=interpolation, norm=LogNorm(vmin, vmax),extent=extent) else: - im = ax.imshow( - image, - origin=origin, - cmap=cmap, - interpolation=interpolation, - norm=LogNorm(vmin, vmax), - extent=extent, - ) - if label_array is not None: - im2 = show_label_array(ax, label_array, alpha=alpha, cmap=cmap, interpolation=interpolation) - - ax.set_title(image_name) - if xlim is not None: - ax.set_xlim(xlim) - if ylim is not None: - ax.set_ylim(ylim) + im=ax.imshow(image, origin=origin,cmap=cmap, + interpolation=interpolation, norm=LogNorm(vmin, vmax),extent=extent) + if label_array != None: + im2=show_label_array(ax, label_array, alpha= alpha, cmap=cmap, interpolation=interpolation ) + + ax.set_title( image_name ) + if xlim != None: + ax.set_xlim( xlim ) + if ylim != None: + ax.set_ylim( ylim ) if not show_ticks: ax.set_yticks([]) ax.set_xticks([]) else: - ax.tick_params(axis="both", which="major", labelsize=tick_size) - ax.tick_params(axis="both", which="minor", labelsize=tick_size) - # mpl.rcParams['xtick.labelsize'] = tick_size - # mpl.rcParams['ytick.labelsize'] = tick_size - # print(tick_size) - - if ylabel is not None: - # ax.set_ylabel(ylabel)#, fontsize = 9) - ax.set_ylabel(ylabel, fontsize=lab_fontsize) - if xlabel is not None: - ax.set_xlabel(xlabel, fontsize=lab_fontsize) - - if aspect is not None: - # aspect = image.shape[1]/float( image.shape[0] ) + + ax.tick_params(axis='both', which='major', labelsize=tick_size ) + ax.tick_params(axis='both', which='minor', labelsize=tick_size ) + #mpl.rcParams['xtick.labelsize'] = tick_size + #mpl.rcParams['ytick.labelsize'] = tick_size + #print(tick_size) + + if ylabel != None: + #ax.set_ylabel(ylabel)#, fontsize = 9) + ax.set_ylabel( ylabel , fontsize = lab_fontsize ) + if xlabel != None: + ax.set_xlabel(xlabel , fontsize = lab_fontsize ) + + if aspect != None: + #aspect = image.shape[1]/float( image.shape[0] ) ax.set_aspect(aspect) else: - ax.set_aspect(aspect="auto") + ax.set_aspect(aspect='auto') if show_colorbar: - cbar = fig.colorbar(im, extend="neither", spacing="proportional", orientation="vertical") + cbar = fig.colorbar(im, extend='neither', spacing='proportional', + orientation='vertical' ) cbar.ax.tick_params(labelsize=colorbar_fontsize) fig.set_tight_layout(tight) if save: if show_time: - dt = datetime.now() - CurTime = "_%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) - fp = path + "%s" % (file_name) + CurTime + "." + save_format + dt =datetime.now() + CurTime = '_%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + fp = path + '%s'%( file_name ) + CurTime + '.' + save_format else: - fp = path + "%s" % (image_name) + "." + save_format - if dpi is None: + fp = path + '%s'%( image_name ) + '.' + save_format + if dpi == None: dpi = fig.dpi - plt.savefig(fp, dpi=dpi) - # fig.set_tight_layout(tight) + plt.savefig( fp, dpi= dpi) + #fig.set_tight_layout(tight) if return_fig: - return im # fig - - -def plot1D( - y, - x=None, - yerr=None, - ax=None, - return_fig=False, - ls="-", - figsize=None, - legend=None, - legend_size=None, - lw=None, - markersize=None, - tick_size=8, - *argv, - **kwargs, -): + return im #fig + + + + +def plot1D( y,x=None, yerr=None, ax=None,return_fig=False, ls='-', figsize=None,legend=None, + legend_size=None, lw=None, markersize=None, tick_size=8, *argv,**kwargs): """a simple function to plot two-column data by using matplotlib.plot pass *argv,**kwargs to plot @@ -3843,223 +3584,200 @@ def plot1D( ------- None """ - if ax is None: + if ax == None: if RUN_GUI: fig = Figure() ax = fig.add_subplot(111) else: - if figsize is not None: + if figsize != None: fig, ax = plt.subplots(figsize=figsize) else: fig, ax = plt.subplots() - if legend is None: - legend = " " + if legend == None: + legend = ' ' try: - logx = kwargs["logx"] + logx = kwargs['logx'] except: - logx = False + logx=False try: - logy = kwargs["logy"] + logy = kwargs['logy'] except: - logy = False + logy=False try: - logxy = kwargs["logxy"] + logxy = kwargs['logxy'] except: - logxy = False + logxy= False - if logx == True and logy == True: + if logx==True and logy==True: logxy = True try: - marker = kwargs["marker"] + marker = kwargs['marker'] except: try: - marker = kwargs["m"] + marker = kwargs['m'] except: - marker = next(markers_) + marker= next( markers_ ) try: - color = kwargs["color"] + color = kwargs['color'] except: try: - color = kwargs["c"] + color = kwargs['c'] except: - color = next(colors_) + color = next( colors_ ) - if x is None: - x = range(len(y)) - if yerr is None: - ax.plot( - x, - y, - marker=marker, - color=color, - ls=ls, - label=legend, - lw=lw, - markersize=markersize, - ) # ,*argv,**kwargs) + if x == None: + x=range(len(y)) + if yerr == None: + ax.plot(x,y, marker=marker,color=color,ls=ls,label= legend, lw=lw, + markersize=markersize, )#,*argv,**kwargs) else: - ax.errorbar( - x, - y, - yerr, - marker=marker, - color=color, - ls=ls, - label=legend, - lw=lw, - markersize=markersize, - ) # ,*argv,**kwargs) + ax.errorbar(x,y,yerr, marker=marker,color=color,ls=ls,label= legend, + lw=lw,markersize=markersize,)#,*argv,**kwargs) if logx: - ax.set_xscale("log") + ax.set_xscale('log') if logy: - ax.set_yscale("log") + ax.set_yscale('log') if logxy: - ax.set_xscale("log") - ax.set_yscale("log") - - ax.tick_params(axis="both", which="major", labelsize=tick_size) - ax.tick_params(axis="both", which="minor", labelsize=tick_size) - - if "xlim" in kwargs.keys(): - ax.set_xlim(kwargs["xlim"]) - if "ylim" in kwargs.keys(): - ax.set_ylim(kwargs["ylim"]) - if "xlabel" in kwargs.keys(): - ax.set_xlabel(kwargs["xlabel"]) - if "ylabel" in kwargs.keys(): - ax.set_ylabel(kwargs["ylabel"]) - - if "title" in kwargs.keys(): - title = kwargs["title"] + ax.set_xscale('log') + ax.set_yscale('log') + + + ax.tick_params(axis='both', which='major', labelsize=tick_size ) + ax.tick_params(axis='both', which='minor', labelsize=tick_size ) + + if 'xlim' in kwargs.keys(): + ax.set_xlim( kwargs['xlim'] ) + if 'ylim' in kwargs.keys(): + ax.set_ylim( kwargs['ylim'] ) + if 'xlabel' in kwargs.keys(): + ax.set_xlabel(kwargs['xlabel']) + if 'ylabel' in kwargs.keys(): + ax.set_ylabel(kwargs['ylabel']) + + if 'title' in kwargs.keys(): + title = kwargs['title'] else: - title = "plot" - ax.set_title(title) - # ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)') - if (legend != "") and (legend != None): - ax.legend(loc="best", fontsize=legend_size) - if "save" in kwargs.keys(): - if kwargs["save"]: - # dt =datetime.now() - # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - # fp = kwargs['path'] + '%s'%( title ) + CurTime + '.png' - fp = kwargs["path"] + "%s" % (title) + ".png" - plt.savefig(fp, dpi=fig.dpi) + title = 'plot' + ax.set_title( title ) + #ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)') + if (legend!='') and (legend!=None): + ax.legend(loc = 'best', fontsize=legend_size ) + if 'save' in kwargs.keys(): + if kwargs['save']: + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + #fp = kwargs['path'] + '%s'%( title ) + CurTime + '.png' + fp = kwargs['path'] + '%s'%( title ) + '.png' + plt.savefig( fp, dpi=fig.dpi) if return_fig: return fig ### +def check_shutter_open( data_series, min_inten=0, time_edge = [0,10], plot_ = False, *argv,**kwargs): -def check_shutter_open(data_series, min_inten=0, time_edge=[0, 10], plot_=False, *argv, **kwargs): - """Check the first frame with shutter open + '''Check the first frame with shutter open - Parameters - ---------- - data_series: a image series - min_inten: the total intensity lower than min_inten is defined as shtter close - time_edge: the searching frame number range + Parameters + ---------- + data_series: a image series + min_inten: the total intensity lower than min_inten is defined as shtter close + time_edge: the searching frame number range - return: - shutter_open_frame: a integer, the first frame number with open shutter + return: + shutter_open_frame: a integer, the first frame number with open shutter - Usuage: - good_start = check_shutter_open( imgsa, min_inten=5, time_edge = [0,20], plot_ = False ) + Usuage: + good_start = check_shutter_open( imgsa, min_inten=5, time_edge = [0,20], plot_ = False ) - """ - imgsum = np.array([np.sum(img) for img in data_series[time_edge[0] : time_edge[1] : 1]]) + ''' + imgsum = np.array( [np.sum(img ) for img in data_series[time_edge[0]:time_edge[1]:1]] ) if plot_: fig, ax = plt.subplots() - ax.plot(imgsum, "bo") - ax.set_title("uid=%s--imgsum" % uid) - ax.set_xlabel("Frame") - ax.set_ylabel("Total_Intensity") - # plt.show() - shutter_open_frame = np.where(np.array(imgsum) > min_inten)[0][0] - print("The first frame with open shutter is : %s" % shutter_open_frame) + ax.plot(imgsum,'bo') + ax.set_title('uid=%s--imgsum'%uid) + ax.set_xlabel( 'Frame' ) + ax.set_ylabel( 'Total_Intensity' ) + #plt.show() + shutter_open_frame = np.where( np.array(imgsum) > min_inten )[0][0] + print ('The first frame with open shutter is : %s'%shutter_open_frame ) return shutter_open_frame -def get_each_frame_intensity( - data_series, sampling=50, bad_pixel_threshold=1e10, plot_=False, save=False, *argv, **kwargs -): - """Get the total intensity of each frame by sampling every N frames - Also get bad_frame_list by check whether above bad_pixel_threshold - Usuage: - imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, - bad_pixel_threshold=1e10, plot_ = True) - """ +def get_each_frame_intensity( data_series, sampling = 50, + bad_pixel_threshold=1e10, + plot_ = False, save= False, *argv,**kwargs): + '''Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + ''' - # print ( argv, kwargs ) - imgsum = np.array([np.sum(img) for img in tqdm(data_series[::sampling], leave=True)]) + #print ( argv, kwargs ) + imgsum = np.array( [np.sum(img ) for img in tqdm( data_series[::sampling] , leave = True ) ] ) if plot_: - uid = "uid" - if "uid" in kwargs.keys(): - uid = kwargs["uid"] + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] fig, ax = plt.subplots() - ax.plot(imgsum, "bo") - ax.set_title("uid= %s--imgsum" % uid) - ax.set_xlabel("Frame_bin_%s" % sampling) - ax.set_ylabel("Total_Intensity") + ax.plot(imgsum,'bo') + ax.set_title('uid= %s--imgsum'%uid) + ax.set_xlabel( 'Frame_bin_%s'%sampling ) + ax.set_ylabel( 'Total_Intensity' ) if save: - # dt =datetime.now() - # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - path = kwargs["path"] - if "uid" in kwargs: - uid = kwargs["uid"] + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs['path'] + if 'uid' in kwargs: + uid = kwargs['uid'] else: - uid = "uid" - # fp = path + "Uid= %s--Waterfall-"%uid + CurTime + '.png' - fp = path + "uid=%s--imgsum-" % uid + ".png" - fig.savefig(fp, dpi=fig.dpi) - # plt.show() + uid = 'uid' + #fp = path + "Uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-"%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + #plt.show() - bad_frame_list = np.where(np.array(imgsum) > bad_pixel_threshold)[0] + bad_frame_list = np.where( np.array(imgsum) > bad_pixel_threshold )[0] if len(bad_frame_list): - print("Bad frame list are: %s" % bad_frame_list) + print ('Bad frame list are: %s' %bad_frame_list) else: - print("No bad frames are involved.") - return imgsum, bad_frame_list + print ('No bad frames are involved.') + return imgsum,bad_frame_list -def create_time_slice(N, slice_num, slice_width, edges=None): - """create a ROI time regions""" - if edges is not None: + + +def create_time_slice( N, slice_num, slice_width, edges=None ): + '''create a ROI time regions ''' + if edges != None: time_edge = edges else: - if slice_num == 1: - time_edge = [[0, N]] + if slice_num==1: + time_edge = [ [0,N] ] else: tstep = N // slice_num - te = np.arange(0, slice_num + 1) * tstep - tc = np.int_((te[:-1] + te[1:]) / 2)[1:-1] - if slice_width % 2: - sw = slice_width // 2 + 1 - time_edge = ( - [ - [0, slice_width], - ] - + [[s - sw + 1, s + sw] for s in tc] - + [[N - slice_width, N]] - ) + te = np.arange( 0, slice_num +1 ) * tstep + tc = np.int_( (te[:-1] + te[1:])/2 )[1:-1] + if slice_width%2: + sw = slice_width//2 +1 + time_edge = [ [0,slice_width], ] + [ [s-sw+1,s+sw] for s in tc ] + [ [N-slice_width,N]] else: - sw = slice_width // 2 - time_edge = ( - [ - [0, slice_width], - ] - + [[s - sw, s + sw] for s in tc] - + [[N - slice_width, N]] - ) + sw= slice_width//2 + time_edge = [ [0,slice_width], ] + [ [s-sw,s+sw] for s in tc ] + [ [N-slice_width,N]] + + return np.array(time_edge) -def show_label_array(ax, label_array, cmap=None, aspect=None, interpolation="nearest", **kwargs): +def show_label_array(ax, label_array, cmap=None, aspect=None,interpolation='nearest',**kwargs): """ YG. Sep 26, 2017 Modified show_label_array(ax, label_array, cmap=None, **kwargs) @@ -4081,32 +3799,25 @@ def show_label_array(ax, label_array, cmap=None, aspect=None, interpolation="nea img : AxesImage The artist added to the axes """ - if cmap is None: - cmap = "viridis" - # print(cmap) + if cmap == None: + cmap = 'viridis' + #print(cmap) _cmap = copy.copy((mcm.get_cmap(cmap))) - _cmap.set_under("w", 0) - vmin = max(0.5, kwargs.pop("vmin", 0.5)) - im = ax.imshow(label_array, cmap=cmap, interpolation=interpolation, vmin=vmin, **kwargs) - if aspect is None: - ax.set_aspect(aspect="auto") - # ax.set_aspect('equal') + _cmap.set_under('w', 0) + vmin = max(.5, kwargs.pop('vmin', .5)) + im = ax.imshow(label_array, cmap=cmap, + interpolation=interpolation, + vmin=vmin, + **kwargs) + if aspect == None: + ax.set_aspect(aspect='auto') + #ax.set_aspect('equal') return im -def show_label_array_on_image( - ax, - image, - label_array, - cmap=None, - norm=None, - log_img=True, - alpha=0.3, - vmin=0.1, - vmax=5, - imshow_cmap="gray", - **kwargs, -): # norm=LogNorm(), + +def show_label_array_on_image(ax, image, label_array, cmap=None,norm=None, log_img=True,alpha=0.3, vmin=0.1, vmax=5, + imshow_cmap='gray', **kwargs): #norm=LogNorm(), """ This will plot the required ROI's(labeled array) on the image @@ -4134,202 +3845,178 @@ def show_label_array_on_image( im_label : AxesImage The artist added to the axes """ - ax.set_aspect("equal") + ax.set_aspect('equal') - # print (vmin, vmax ) + #print (vmin, vmax ) if log_img: - im = ax.imshow( - image, cmap=imshow_cmap, interpolation="none", norm=LogNorm(vmin, vmax), **kwargs - ) # norm=norm, + im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',norm=LogNorm(vmin, vmax),**kwargs) #norm=norm, else: - im = ax.imshow(image, cmap=imshow_cmap, interpolation="none", vmin=vmin, vmax=vmax, **kwargs) # norm=norm, + im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',vmin=vmin, vmax=vmax,**kwargs) #norm=norm, + + im_label = mpl_plot.show_label_array(ax, label_array, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha, + **kwargs) # norm=norm, - im_label = mpl_plot.show_label_array( - ax, label_array, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha, **kwargs - ) # norm=norm, return im, im_label -def show_ROI_on_image( - image, - ROI, - center=None, - rwidth=400, - alpha=0.3, - label_on=True, - save=False, - return_fig=False, - rect_reqion=None, - log_img=True, - vmin=0.01, - vmax=5, - show_ang_cor=False, - cmap=cmap_albula, - fig_ax=None, - uid="uid", - path="", - aspect=1, - show_colorbar=True, - show_roi_edge=False, - *argv, - **kwargs, -): - """show ROI on an image - image: the data frame - ROI: the interested region - center: the plot center - rwidth: the plot range around the center - """ +def show_ROI_on_image( image, ROI, center=None, rwidth=400,alpha=0.3, label_on = True, + save=False, return_fig = False, rect_reqion=None, log_img = True, vmin=0.01, vmax=5, + show_ang_cor = False,cmap = cmap_albula, fig_ax=None, + uid='uid', path='', aspect = 1, show_colorbar=True, show_roi_edge=False, *argv,**kwargs): + + '''show ROI on an image + image: the data frame + ROI: the interested region + center: the plot center + rwidth: the plot range around the center + + ''' + if RUN_GUI: - fig = Figure(figsize=(8, 8)) + fig = Figure(figsize=(8,8)) axes = fig.add_subplot(111) - elif fig_ax is not None: + elif fig_ax != None: fig, axes = fig_ax else: - fig, axes = plt.subplots() # plt.subplots(figsize=(8,8)) + fig, axes = plt.subplots( ) #plt.subplots(figsize=(8,8)) - # print( vmin, vmax) - # norm=LogNorm(vmin, vmax) + #print( vmin, vmax) + #norm=LogNorm(vmin, vmax) - axes.set_title("%s_ROI_on_Image" % uid) + axes.set_title( "%s_ROI_on_Image"%uid ) if log_img: - if vmin == 0: + if vmin==0: vmin += 1e-10 - vmax = max(1, vmax) + vmax = max(1, vmax ) if not show_roi_edge: - # print('here') - im, im_label = show_label_array_on_image( - axes, - image, - ROI, - imshow_cmap="viridis", - cmap=cmap, - alpha=alpha, - log_img=log_img, - vmin=vmin, - vmax=vmax, - origin="lower", - ) + #print('here') + im,im_label = show_label_array_on_image(axes, image, ROI, imshow_cmap='viridis', + cmap=cmap,alpha=alpha, log_img=log_img, + vmin=vmin, vmax=vmax, origin="lower") else: - edg = get_image_edge(ROI) - image_ = get_image_with_roi(image, ROI, scale_factor=2) - # fig, axes = plt.subplots( ) - show_img( - image_, - ax=[fig, axes], - vmin=vmin, - vmax=vmax, - logs=log_img, - image_name="%s_ROI_on_Image" % uid, - cmap=cmap, - ) - - if rect_reqion is None: - if center is not None: - x1, x2 = [center[1] - rwidth, center[1] + rwidth] - y1, y2 = [center[0] - rwidth, center[0] + rwidth] - axes.set_xlim([x1, x2]) - axes.set_ylim([y1, y2]) + edg = get_image_edge( ROI ) + image_ = get_image_with_roi( image, ROI, scale_factor = 2) + #fig, axes = plt.subplots( ) + show_img( image_, ax=[fig,axes], vmin=vmin, vmax=vmax, + logs= log_img, image_name= "%s_ROI_on_Image"%uid, + cmap = cmap ) + + + if rect_reqion == None: + if center != None: + x1,x2 = [center[1] - rwidth, center[1] + rwidth] + y1,y2 = [center[0] - rwidth, center[0] + rwidth] + axes.set_xlim( [x1,x2]) + axes.set_ylim( [y1,y2]) else: - x1, x2, y1, y2 = rect_reqion - axes.set_xlim([x1, x2]) - axes.set_ylim([y1, y2]) + x1,x2,y1,y2= rect_reqion + axes.set_xlim( [x1,x2]) + axes.set_ylim( [y1,y2]) if label_on: - num_qzr = len(np.unique(ROI)) - 1 - for i in range(1, num_qzr + 1): - ind = np.where(ROI == i)[1] - indz = np.where(ROI == i)[0] - c = "%i" % i - y_val = int(indz.mean()) - x_val = int(ind.mean()) - # print (xval, y) - axes.text(x_val, y_val, c, color="b", va="center", ha="center") + num_qzr = len(np.unique( ROI )) -1 + for i in range( 1, num_qzr + 1 ): + ind = np.where( ROI == i)[1] + indz = np.where( ROI == i)[0] + c = '%i'%i + y_val = int( indz.mean() ) + x_val = int( ind.mean() ) + #print (xval, y) + axes.text(x_val, y_val, c, color='b',va='center', ha='center') if show_ang_cor: - axes.text(-0.0, 0.5, "-/+180" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) - axes.text(1.0, 0.5, "0" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) - axes.text(0.5, -0.0, "-90" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) - axes.text(0.5, 1.0, "90" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(-0.0, 0.5, '-/+180' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) + axes.text(1.0, 0.5, '0' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) + axes.text(0.5, -0.0, '-90'+ r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) + axes.text(0.5, 1.0, '90' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) axes.set_aspect(aspect) - # fig.colorbar(im_label) + #fig.colorbar(im_label) if show_colorbar: if not show_roi_edge: fig.colorbar(im) if save: - fp = path + "%s_ROI_on_Image" % uid + ".png" - plt.savefig(fp, dpi=fig.dpi) - # plt.show() + fp = path + "%s_ROI_on_Image"%uid + '.png' + plt.savefig( fp, dpi=fig.dpi) + #plt.show() if return_fig: return fig, axes, im -def crop_image(image, crop_mask): - """Crop the non_zeros pixels of an image to a new image""" - from skimage.util import crop, pad + +def crop_image( image, crop_mask ): + + ''' Crop the non_zeros pixels of an image to a new image + + + ''' + from skimage.util import crop, pad pxlst = np.where(crop_mask.ravel())[0] dims = crop_mask.shape - imgwidthy = dims[1] # dimension in y, but in plot being x - imgwidthx = dims[0] # dimension in x, but in plot being y - # x and y are flipped??? - # matrix notation!!! - pixely = pxlst % imgwidthy - pixelx = pxlst // imgwidthy + imgwidthy = dims[1] #dimension in y, but in plot being x + imgwidthx = dims[0] #dimension in x, but in plot being y + #x and y are flipped??? + #matrix notation!!! + pixely = pxlst%imgwidthy + pixelx = pxlst//imgwidthy minpixelx = np.min(pixelx) minpixely = np.min(pixely) maxpixelx = np.max(pixelx) maxpixely = np.max(pixely) - crops = crop_mask * image - img_crop = crop(crops, ((minpixelx, imgwidthx - maxpixelx - 1), (minpixely, imgwidthy - maxpixely - 1))) + crops = crop_mask*image + img_crop = crop( crops, ((minpixelx, imgwidthx - maxpixelx -1 ), + (minpixely, imgwidthy - maxpixely -1 )) ) return img_crop -def get_avg_img(data_series, img_samp_index=None, sampling=100, plot_=False, save=False, *argv, **kwargs): - """Get average imagef from a data_series by every sampling number to save time""" - if img_samp_index is None: - avg_img = np.average(data_series[::sampling], axis=0) +def get_avg_img( data_series, img_samp_index=None, sampling = 100, plot_ = False , save=False, *argv,**kwargs): + '''Get average imagef from a data_series by every sampling number to save time''' + if img_samp_index == None: + avg_img = np.average(data_series[:: sampling], axis=0) else: - avg_img = np.zeros_like(data_series[0]) - n = 0 + avg_img = np.zeros_like( data_series[0] ) + n=0 for i in img_samp_index: avg_img += data_series[i] - n += 1 - avg_img = np.array(avg_img) / n + n +=1 + avg_img = np.array( avg_img) / n if plot_: fig, ax = plt.subplots() - uid = "uid" - if "uid" in kwargs.keys(): - uid = kwargs["uid"] - - im = ax.imshow(avg_img, cmap="viridis", origin="lower", norm=LogNorm(vmin=0.001, vmax=1e2)) - # ax.set_title("Masked Averaged Image") - ax.set_title("uid= %s--Masked Averaged Image" % uid) + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + + im = ax.imshow(avg_img , cmap='viridis',origin='lower', + norm= LogNorm(vmin=0.001, vmax=1e2)) + #ax.set_title("Masked Averaged Image") + ax.set_title('uid= %s--Masked Averaged Image'%uid) fig.colorbar(im) if save: - # dt =datetime.now() - # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - path = kwargs["path"] - if "uid" in kwargs: - uid = kwargs["uid"] + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs['path'] + if 'uid' in kwargs: + uid = kwargs['uid'] else: - uid = "uid" - # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' - fp = path + "uid=%s--avg-img-" % uid + ".png" - fig.savefig(fp, dpi=fig.dpi) - # plt.show() + uid = 'uid' + #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-"%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + #plt.show() return avg_img -def check_ROI_intensity(avg_img, ring_mask, ring_number=3, save=False, plot=True, *argv, **kwargs): + +def check_ROI_intensity( avg_img, ring_mask, ring_number=3 , save=False, plot=True, *argv,**kwargs): + """plot intensity versus pixel of a ring Parameters ---------- @@ -4342,72 +4029,68 @@ def check_ROI_intensity(avg_img, ring_mask, ring_number=3, save=False, plot=True """ - # print('here') + #print('here') - uid = "uid" - if "uid" in kwargs.keys(): - uid = kwargs["uid"] - pixel = roi.roi_pixel_values(avg_img, ring_mask, [ring_number]) + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + pixel = roi.roi_pixel_values(avg_img, ring_mask, [ring_number] ) if plot: fig, ax = plt.subplots() - ax.set_title("%s--check-RIO-%s-intensity" % (uid, ring_number)) - ax.plot(pixel[0][0], "bo", ls="-") - ax.set_ylabel("Intensity") - ax.set_xlabel("pixel") + ax.set_title('%s--check-RIO-%s-intensity'%(uid, ring_number) ) + ax.plot( pixel[0][0] ,'bo', ls='-' ) + ax.set_ylabel('Intensity') + ax.set_xlabel('pixel') if save: - path = kwargs["path"] - fp = path + "%s_Mean_intensity_of_one_ROI" % uid + ".png" - fig.savefig(fp, dpi=fig.dpi) + path = kwargs['path'] + fp = path + "%s_Mean_intensity_of_one_ROI"%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) if save: - path = kwargs["path"] - save_lists( - [range(len(pixel[0][0])), pixel[0][0]], - label=["pixel_list", "roi_intensity"], - filename="%s_Mean_intensity_of_one_ROI" % uid, - path=path, - ) - # plt.show() + path = kwargs['path'] + save_lists( [range( len( pixel[0][0] )), pixel[0][0]], label=['pixel_list', 'roi_intensity'], + filename="%s_Mean_intensity_of_one_ROI"%uid, path= path) + #plt.show() return pixel[0][0] +#from tqdm import tqdm -# from tqdm import tqdm +def cal_g2( image_series, ring_mask, bad_image_process, + bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None ): + '''calculation g2 by using a multi-tau algorithm''' - -def cal_g2(image_series, ring_mask, bad_image_process, bad_frame_list=None, good_start=0, num_buf=8, num_lev=None): - """calculation g2 by using a multi-tau algorithm""" - - noframes = len(image_series) # number of frames, not "no frames" - # num_buf = 8 # number of buffers + noframes = len( image_series) # number of frames, not "no frames" + #num_buf = 8 # number of buffers if bad_image_process: import skbeam.core.mask as mask_image + bad_img_list = np.array( bad_frame_list) - good_start + new_imgs = mask_image.bad_to_nan_gen( image_series, bad_img_list) - bad_img_list = np.array(bad_frame_list) - good_start - new_imgs = mask_image.bad_to_nan_gen(image_series, bad_img_list) + if num_lev == None: + num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) + print ('%s frames will be processed...'%(noframes)) + print( 'Bad Frames involved!') - if num_lev is None: - num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 - print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) - print("%s frames will be processed..." % (noframes)) - print("Bad Frames involved!") - - g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(new_imgs)) - print("G2 calculation DONE!") + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm( new_imgs) ) + print( 'G2 calculation DONE!') else: - if num_lev is None: - num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 - print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) - print("%s frames will be processed..." % (noframes)) - g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(image_series)) - print("G2 calculation DONE!") + + if num_lev == None: + num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) + print ('%s frames will be processed...'%(noframes)) + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(image_series) ) + print( 'G2 calculation DONE!') return g2, lag_steps + def run_time(t0): - """Calculate running time of a program + '''Calculate running time of a program Parameters ---------- t0: time_string, t0=time.time() @@ -4421,17 +4104,17 @@ def run_time(t0): t0=time.time() .....(the running code) run_time(t0) - """ + ''' elapsed_time = time.time() - t0 - if elapsed_time < 60: - print("Total time: %.3f sec" % (elapsed_time)) + if elapsed_time<60: + print ('Total time: %.3f sec' %(elapsed_time )) else: - print("Total time: %.3f min" % (elapsed_time / 60.0)) + print ('Total time: %.3f min' %(elapsed_time/60.)) -def trans_data_to_pd(data, label=None, dtype="array"): - """ +def trans_data_to_pd(data, label=None,dtype='array'): + ''' convert data into pandas.DataFrame Input: data: list or np.array @@ -4439,32 +4122,29 @@ def trans_data_to_pd(data, label=None, dtype="array"): dtype: list or array [[NOT WORK or dict (for dict only save the scalar not arrays values)]] Output: a pandas.DataFrame - """ - # lists a [ list1, list2...] all the list have the same length - import sys - - import pandas as pd - from numpy import arange, array - - if dtype == "list": - data = array(data).T - N, M = data.shape - elif dtype == "array": - data = array(data) - N, M = data.shape + ''' + #lists a [ list1, list2...] all the list have the same length + from numpy import arange,array + import pandas as pd,sys + if dtype == 'list': + data=array(data).T + N,M=data.shape + elif dtype == 'array': + data=array(data) + N,M=data.shape else: print("Wrong data type! Now only support 'list' and 'array' tpye") - index = arange(N) - if label is None: - label = ["data%s" % i for i in range(M)] - # print label - df = pd.DataFrame(data, index=index, columns=label) + + index = arange( N ) + if label == None:label=['data%s'%i for i in range(M)] + #print label + df = pd.DataFrame( data, index=index, columns= label ) return df -def save_lists(data, label=None, filename=None, path=None, return_res=False, verbose=False): - """ +def save_lists( data, label=None, filename=None, path=None, return_res = False, verbose=False): + ''' save_lists( data, label=None, filename=None, path=None) save lists to a CSV file with filename in path @@ -4477,55 +4157,55 @@ def save_lists(data, label=None, filename=None, path=None, return_res=False, ver Example: save_arrays( [q,iq], label= ['q_A-1', 'Iq'], filename='uid=%s-q-Iq'%uid, path= data_dir ) - """ + ''' - M, N = len(data[0]), len(data) - d = np.zeros([N, M]) + M,N = len(data[0]),len(data) + d = np.zeros( [N,M] ) for i in range(N): d[i] = data[i] - df = trans_data_to_pd(d.T, label, "array") - # dt =datetime.now() - # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - if filename is None: - filename = "data" - filename = os.path.join(path, filename) # +'.csv') + df = trans_data_to_pd(d.T, label, 'array') + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + if filename == None: + filename = 'data' + filename = os.path.join(path, filename )#+'.csv') df.to_csv(filename) if verbose: - print("The data was saved in: %s." % filename) + print('The data was saved in: %s.'%filename) if return_res: return df - -def get_pos_val_overlap(p1, v1, p2, v2, Nl): - """get the overlap of v1 and v2 - p1: the index of array1 in array with total length as Nl - v1: the corresponding value of p1 - p2: the index of array2 in array with total length as Nl - v2: the corresponding value of p2 - Return: - The values in v1 with the position in overlap of p1 and p2 - The values in v2 with the position in overlap of p1 and p2 - - An example: - Nl =10 - p1= np.array( [1,3,4,6,8] ) - v1 = np.array( [10,20,30,40,50]) - p2= np.array( [ 0,2,3,5,7,8]) - v2=np.array( [10,20,30,40,50,60,70]) - - get_pos_val_overlap( p1, v1, p2,v2, Nl) - - """ - ind = np.zeros(Nl, dtype=np.int32) - ind[p1] = np.arange(len(p1)) + 1 - w2 = np.where(ind[p2])[0] - w1 = ind[p2[w2]] - 1 +def get_pos_val_overlap( p1, v1, p2,v2, Nl): + '''get the overlap of v1 and v2 + p1: the index of array1 in array with total length as Nl + v1: the corresponding value of p1 + p2: the index of array2 in array with total length as Nl + v2: the corresponding value of p2 + Return: + The values in v1 with the position in overlap of p1 and p2 + The values in v2 with the position in overlap of p1 and p2 + + An example: + Nl =10 + p1= np.array( [1,3,4,6,8] ) + v1 = np.array( [10,20,30,40,50]) + p2= np.array( [ 0,2,3,5,7,8]) + v2=np.array( [10,20,30,40,50,60,70]) + + get_pos_val_overlap( p1, v1, p2,v2, Nl) + + ''' + ind = np.zeros( Nl, dtype=np.int32 ) + ind[p1] = np.arange( len(p1) ) +1 + w2 = np.where( ind[p2] )[0] + w1 = ind[ p2[w2]] -1 return v1[w1], v2[w2] -def save_arrays(data, label=None, dtype="array", filename=None, path=None, return_res=False, verbose=False): - """ + +def save_arrays( data, label=None, dtype='array', filename=None, path=None, return_res = False,verbose=False): + ''' July 10, 2016, Y.G.@CHX save_arrays( data, label=None, dtype='array', filename=None, path=None): save data to a CSV file with filename in path @@ -4542,23 +4222,22 @@ def save_arrays(data, label=None, dtype="array", filename=None, path=None, retur save_arrays( qiq, label= ['q_A-1', 'Iq'], dtype='array', filename='uid=%s-q-Iq'%uid, path= data_dir ) - """ - df = trans_data_to_pd(data, label, dtype) - # dt =datetime.now() - # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - if filename is None: - filename = "data" - filename_ = os.path.join(path, filename) # +'.csv') + ''' + df = trans_data_to_pd(data, label,dtype) + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + if filename == None: + filename = 'data' + filename_ = os.path.join(path, filename)# +'.csv') df.to_csv(filename_) if verbose: - print("The file: %s is saved in %s" % (filename, path)) - # print( 'The g2 of uid= %s is saved in %s with filename as g2-%s-%s.csv'%(uid, path, uid, CurTime)) + print( 'The file: %s is saved in %s'%(filename, path) ) + #print( 'The g2 of uid= %s is saved in %s with filename as g2-%s-%s.csv'%(uid, path, uid, CurTime)) if return_res: return df - -def cal_particle_g2(radius, viscosity, qr, taus, beta=0.2, T=298): - """YG Dev Nov 20, 2017@CHX +def cal_particle_g2( radius, viscosity, qr, taus, beta=0.2, T=298): + '''YG Dev Nov 20, 2017@CHX calculate particle g2 fucntion by giving particle radius, Q , and solution viscosity using a simple exponetional model Input: @@ -4573,75 +4252,73 @@ def cal_particle_g2(radius, viscosity, qr, taus, beta=0.2, T=298): cal_particle_g2( radius=125 *10**(-9), qr=[0.01,0.015], viscosity= 8.9*1e-4) - """ - D0 = get_diffusion_coefficient(viscosity, radius, T=T) - g2_q1 = np.zeros(len(qr), dtype=object) + ''' + D0 = get_diffusion_coefficient( viscosity, radius, T=T) + g2_q1 = np.zeros(len(qr), dtype = object) for i, q1 in enumerate(qr): relaxation_rate = D0 * q1**2 - g2_q1[i] = simple_exponential(taus, beta=beta, relaxation_rate=relaxation_rate, baseline=1) + g2_q1[i] = simple_exponential( taus, beta=beta, relaxation_rate = relaxation_rate, baseline=1) return g2_q1 +def get_Reynolds_number( flow_rate, flow_radius, fluid_density, fluid_viscosity ): + '''May 10, 2019, Y.G.@CHX + get Reynolds_number , the ratio of the inertial to viscous forces, V*Dia*density/eta + Reynolds_number << 1000 gives a laminar flow + flow_rate: ul/s + flow_radius: mm + fluid_density: Kg/m^3 ( for water, 1000 Kg/m^3 = 1 g/cm^3 ) + fliud_viscosity: N*s/m^2 ( Kg /(s*m) ) + + return Reynolds_number + ''' + return flow_rate * 1e-6 * flow_radius * 1e-3 *2 * fluid_density/ fluid_viscosity + +def get_Deborah_number( flow_rate, beam_size, q_vector, diffusion_coefficient ): + '''May 10, 2019, Y.G.@CHX + get Deborah_number, the ratio of transit time to diffusion time, (V/beam_size)/ ( D*q^2) + flow_rate: ul/s + beam_size: ul + q_vector: A-1 + diffusion_coefficient: A^2/s + + return Deborah_number + ''' + return (flow_rate /beam_size) / ( diffusion_coefficient * q_vector**2 ) + + + +def get_viscosity( diffusion_coefficient , radius, T=298): + '''May 10, 2019, Y.G.@CHX + get visocity of a Brownian motion particle with radius in fuild with diffusion_coefficient + diffusion_coefficient in unit of A^2/s + radius: m + T: K + k: 1.38064852(79)*10**(−23) J/T, Boltzmann constant -def get_Reynolds_number(flow_rate, flow_radius, fluid_density, fluid_viscosity): - """May 10, 2019, Y.G.@CHX - get Reynolds_number , the ratio of the inertial to viscous forces, V*Dia*density/eta - Reynolds_number << 1000 gives a laminar flow - flow_rate: ul/s - flow_radius: mm - fluid_density: Kg/m^3 ( for water, 1000 Kg/m^3 = 1 g/cm^3 ) - fliud_viscosity: N*s/m^2 ( Kg /(s*m) ) - - return Reynolds_number - """ - return flow_rate * 1e-6 * flow_radius * 1e-3 * 2 * fluid_density / fluid_viscosity - - -def get_Deborah_number(flow_rate, beam_size, q_vector, diffusion_coefficient): - """May 10, 2019, Y.G.@CHX - get Deborah_number, the ratio of transit time to diffusion time, (V/beam_size)/ ( D*q^2) - flow_rate: ul/s - beam_size: ul - q_vector: A-1 - diffusion_coefficient: A^2/s - - return Deborah_number - """ - return (flow_rate / beam_size) / (diffusion_coefficient * q_vector**2) - - -def get_viscosity(diffusion_coefficient, radius, T=298): - """May 10, 2019, Y.G.@CHX - get visocity of a Brownian motion particle with radius in fuild with diffusion_coefficient - diffusion_coefficient in unit of A^2/s - radius: m - T: K - k: 1.38064852(79)*10**(−23) J/T, Boltzmann constant - - return visosity: N*s/m^2 (water at 25K = 8.9*10**(-4) ) - """ - - k = 1.38064852 * 10 ** (-23) - return k * T / (6 * np.pi * diffusion_coefficient * radius) * 10**20 + return visosity: N*s/m^2 (water at 25K = 8.9*10**(-4) ) + ''' + k= 1.38064852*10**(-23) + return k*T / ( 6*np.pi* diffusion_coefficient * radius) * 10**20 -def get_diffusion_coefficient(viscosity, radius, T=298): - """July 10, 2016, Y.G.@CHX - get diffusion_coefficient of a Brownian motion particle with radius in fuild with visocity - viscosity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) - radius: m - T: K - k: 1.38064852(79)×10−23 J/T, Boltzmann constant +def get_diffusion_coefficient( viscosity, radius, T=298): + '''July 10, 2016, Y.G.@CHX + get diffusion_coefficient of a Brownian motion particle with radius in fuild with visocity + viscosity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) + radius: m + T: K + k: 1.38064852(79)×10−23 J/T, Boltzmann constant - return diffusion_coefficient in unit of A^2/s - e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: - 1.38064852*10**(−23) *298 / ( 6*np.pi* 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10^5 A2/s + return diffusion_coefficient in unit of A^2/s + e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: + 1.38064852*10**(−23) *298 / ( 6*np.pi* 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10^5 A2/s - get_diffusion_coefficient( 0.20871, 250 *10**(-9), T=298) + get_diffusion_coefficient( 0.20871, 250 *10**(-9), T=298) - """ + ''' - k = 1.38064852 * 10 ** (-23) - return k * T / (6 * np.pi * viscosity * radius) * 10**20 + k= 1.38064852*10**(-23) + return k*T / ( 6*np.pi* viscosity * radius) * 10**20 def ring_edges(inner_radius, width, spacing=0, num_rings=None): @@ -4653,6 +4330,8 @@ def ring_edges(inner_radius, width, spacing=0, num_rings=None): The number of rings, their widths, and any spacing between rings can be specified. They can be uniform or varied. + + LW 04/02/2024: fixed checking whether width and spacing are iterable Parameters ---------- @@ -4694,35 +4373,43 @@ def ring_edges(inner_radius, width, spacing=0, num_rings=None): """ # All of this input validation merely checks that width, spacing, and # num_rings are self-consistent and complete. - width_is_list = isinstance(width, collections.Iterable) - spacing_is_list = isinstance(spacing, collections.Iterable) - if width_is_list and spacing_is_list: + try: + iter(width) + width_is_list=True + except: width_is_list=False + try: + iter(spacing) + spacing_is_list=True + except: spacing_is_list=False + + # width_is_list = isinstance(width, collections.Iterable) + # spacing_is_list = isinstance(spacing, collections.Iterable) + if (width_is_list and spacing_is_list): if len(width) != len(spacing) + 1: - raise ValueError("List of spacings must be one less than list " "of widths.") - if num_rings is None: + raise ValueError("List of spacings must be one less than list " + "of widths.") + if num_rings == None: try: num_rings = len(width) except TypeError: try: num_rings = len(spacing) + 1 except TypeError: - raise ValueError( - "Since width and spacing are constant, " - "num_rings cannot be inferred and must be " - "specified." - ) + raise ValueError("Since width and spacing are constant, " + "num_rings cannot be inferred and must be " + "specified.") else: if width_is_list: if num_rings != len(width): raise ValueError("num_rings does not match width list") if spacing_is_list: - if num_rings - 1 != len(spacing): + if num_rings-1 != len(spacing): raise ValueError("num_rings does not match spacing list") # Now regularlize the input. if not width_is_list: width = np.ones(num_rings) * width - if spacing is None: + if spacing == None: spacing = [] else: if not spacing_is_list: @@ -4734,19 +4421,17 @@ def ring_edges(inner_radius, width, spacing=0, num_rings=None): return edges -def get_non_uniform_edges( - centers, - width=4, - number_rings=1, - spacing=0, -): - """ + +def get_non_uniform_edges( centers, width = 4, number_rings=1, spacing=0, ): + ''' YG CHX Spe 6 get_non_uniform_edges( centers, width = 4, number_rings=3 ) Calculate the inner and outer radius of a set of non uniform distributed rings by giving ring centers For each center, there are number_rings with each of width + + LW 04/02/2024: fixed checking whether 'width' is iterable Parameters ---------- @@ -4767,293 +4452,276 @@ def get_non_uniform_edges( ------- edges : array inner and outer radius for each ring - """ + ''' - if number_rings is None: + if number_rings == None: number_rings = 1 - edges = np.zeros([len(centers) * number_rings, 2]) - # print( width ) - - if not isinstance(width, collections.Iterable): - width = np.ones_like(centers) * width + edges = np.zeros( [len(centers)*number_rings, 2] ) + + try: + iter(width) + except: + width = np.ones_like( centers ) * width for i, c in enumerate(centers): - edges[i * number_rings : (i + 1) * number_rings, :] = ring_edges( - inner_radius=c - width[i] * number_rings / 2, width=width[i], spacing=spacing, num_rings=number_rings - ) + edges[i*number_rings:(i+1)*number_rings,:] = ring_edges( inner_radius = c - width[i]*number_rings/2, + width= width[i], spacing= spacing, num_rings=number_rings) return edges -def trans_tf_to_td(tf, dtype="dframe"): - """July 02, 2015, Y.G.@CHX +def trans_tf_to_td(tf, dtype = 'dframe'): + '''July 02, 2015, Y.G.@CHX Translate epoch time to string - """ - import datetime - - import numpy as np + ''' import pandas as pd - - """translate time.float to time.date, + import numpy as np + import datetime + '''translate time.float to time.date, td.type dframe: a dataframe td.type list, a list - """ - if dtype is "dframe": - ind = tf.index - else: - ind = range(len(tf)) - td = np.array([datetime.datetime.fromtimestamp(tf[i]) for i in ind]) + ''' + if dtype == 'dframe':ind = tf.index + else:ind = range(len(tf)) + td = np.array([ datetime.datetime.fromtimestamp(tf[i]) for i in ind ]) return td -def trans_td_to_tf(td, dtype="dframe"): - """July 02, 2015, Y.G.@CHX + +def trans_td_to_tf(td, dtype = 'dframe'): + '''July 02, 2015, Y.G.@CHX Translate string to epoch time - """ + ''' import time - import numpy as np - - """translate time.date to time.float, + '''translate time.date to time.float, td.type dframe: a dataframe td.type list, a list - """ - if dtype is "dframe": - ind = td.index - else: - ind = range(len(td)) - # tf = np.array([ time.mktime(td[i].timetuple()) for i in range(len(td)) ]) - tf = np.array([time.mktime(td[i].timetuple()) for i in ind]) + ''' + if dtype == 'dframe':ind = td.index + else:ind = range(len(td)) + #tf = np.array([ time.mktime(td[i].timetuple()) for i in range(len(td)) ]) + tf = np.array([ time.mktime(td[i].timetuple()) for i in ind]) return tf -def get_averaged_data_from_multi_res( - multi_res, keystr="g2", different_length=True, verbose=False, cal_errorbar=False -): - """Y.G. Dec 22, 2016 - get average data from multi-run analysis result - Parameters: - multi_res: dict, generated by function run_xpcs_xsvs_single - each key is a uid, inside each uid are also dict with key as 'g2','g4' et.al. - keystr: string, get the averaged keystr - different_length: if True, do careful average for different length results - return: - array, averaged results - """ +def get_averaged_data_from_multi_res( multi_res, keystr='g2', different_length= True, verbose=False, + cal_errorbar=False): + '''Y.G. Dec 22, 2016 + get average data from multi-run analysis result + Parameters: + multi_res: dict, generated by function run_xpcs_xsvs_single + each key is a uid, inside each uid are also dict with key as 'g2','g4' et.al. + keystr: string, get the averaged keystr + different_length: if True, do careful average for different length results + return: + array, averaged results + + ''' maxM = 0 mkeys = multi_res.keys() if not different_length: - n = 0 - for i, key in enumerate(list(mkeys)): + n=0 + for i, key in enumerate( list( mkeys) ): keystri = multi_res[key][keystr] - if i == 0: + if i ==0: keystr_average = keystri else: keystr_average += keystri - n += 1 - keystr_average /= n + n +=1 + keystr_average /=n else: length_dict = {} - D = 1 - for i, key in enumerate(list(mkeys)): + D= 1 + for i, key in enumerate( list( mkeys) ): if verbose: - print(i, key) + print(i,key) shapes = multi_res[key][keystr].shape - M = shapes[0] - if i == 0: - if len(shapes) == 2: - D = 2 + M=shapes[0] + if i ==0: + if len(shapes)==2: + D=2 maxN = shapes[1] - elif len(shapes) == 3: - D = 3 - maxN = shapes[2] # in case of two-time correlation + elif len(shapes)==3: + D=3 + maxN = shapes[2] #in case of two-time correlation if (M) not in length_dict: - length_dict[(M)] = 1 + length_dict[(M) ] =1 else: - length_dict[(M)] += 1 - maxM = max(maxM, M) - # print( length_dict ) + length_dict[(M) ] += 1 + maxM = max( maxM, M ) + #print( length_dict ) avg_count = {} - sk = np.array(sorted(length_dict)) - for i, k in enumerate(sk): - avg_count[k] = np.sum(np.array([length_dict[k] for k in sk[i:]])) - # print(length_dict, avg_count) - if D == 2: - # print('here') - keystr_average = np.zeros([maxM, maxN]) - elif D == 3: - keystr_average = np.zeros([maxM, maxM, maxN]) + sk = np.array( sorted(length_dict) ) + for i, k in enumerate( sk ): + avg_count[k] = np.sum( np.array( [ length_dict[k] for k in sk[i:] ] ) ) + #print(length_dict, avg_count) + if D==2: + #print('here') + keystr_average = np.zeros( [maxM, maxN] ) + elif D==3: + keystr_average = np.zeros( [maxM, maxM, maxN ] ) else: - keystr_average = np.zeros([maxM]) - for i, key in enumerate(list(mkeys)): + keystr_average = np.zeros( [maxM] ) + for i, key in enumerate( list( mkeys) ): keystri = multi_res[key][keystr] Mi = keystri.shape[0] - if D != 3: - keystr_average[:Mi] += keystri + if D!=3: + keystr_average[:Mi] += keystri else: - keystr_average[:Mi, :Mi, :] += keystri - if D != 3: - keystr_average[: sk[0]] /= avg_count[sk[0]] + keystr_average[:Mi,:Mi,:] += keystri + if D!=3: + keystr_average[:sk[0]] /= avg_count[sk[0]] else: - keystr_average[: sk[0], : sk[0], :] /= avg_count[sk[0]] - for i in range(0, len(sk) - 1): - if D != 3: - keystr_average[sk[i] : sk[i + 1]] /= avg_count[sk[i + 1]] + keystr_average[:sk[0],:sk[0], : ] /= avg_count[sk[0]] + for i in range( 0, len(sk)-1 ): + if D!=3: + keystr_average[sk[i]:sk[i+1]] /= avg_count[sk[i+1]] else: - keystr_average[sk[i] : sk[i + 1], sk[i] : sk[i + 1], :] /= avg_count[sk[i + 1]] + keystr_average[sk[i]:sk[i+1],sk[i]:sk[i+1],:] /= avg_count[sk[i+1]] return keystr_average -def save_g2_general(g2, taus, qr=None, qz=None, uid="uid", path=None, return_res=False): - """Y.G. Dec 29, 2016 +def save_g2_general( g2, taus, qr=None, qz=None, uid='uid', path=None, return_res= False ): - save g2 results, - res_pargs should contain - g2: one-time correlation function - taus, lags of g2 - qr: the qr center, same length as g2 - qz: the qz or angle center, same length as g2 - path: - uid: + '''Y.G. Dec 29, 2016 - """ + save g2 results, + res_pargs should contain + g2: one-time correlation function + taus, lags of g2 + qr: the qr center, same length as g2 + qz: the qz or angle center, same length as g2 + path: + uid: + + ''' - df = DataFrame(np.hstack([(taus).reshape(len(g2), 1), g2])) - t, qs = g2.shape + df = DataFrame( np.hstack( [ (taus).reshape( len(g2),1) , g2] ) ) + t,qs = g2.shape if qr is None: - qr = range(qs) + qr = range( qs ) if qz is None: - df.columns = ["tau"] + [str(qr_) for qr_ in qr] + df.columns = ( ['tau'] + [str(qr_) for qr_ in qr ] ) else: - df.columns = ["tau"] + [str(qr_) + "_" + str(qz_) for (qr_, qz_) in zip(qr, qz)] + df.columns = ( ['tau'] + [ str(qr_) +'_'+ str(qz_) for (qr_,qz_) in zip(qr,qz) ] ) - # dt =datetime.now() - # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - # if filename is None: + #if filename is None: filename = uid - # filename = 'uid=%s--g2.csv' % (uid) - # filename += '-uid=%s-%s.csv' % (uid,CurTime) - # filename += '-uid=%s.csv' % (uid) + #filename = 'uid=%s--g2.csv' % (uid) + #filename += '-uid=%s-%s.csv' % (uid,CurTime) + #filename += '-uid=%s.csv' % (uid) filename1 = os.path.join(path, filename) df.to_csv(filename1) - print("The correlation function is saved in %s with filename as %s" % (path, filename)) + print( 'The correlation function is saved in %s with filename as %s'%( path, filename)) if return_res: return df ########### -# *for g2 fit and plot - +#*for g2 fit and plot def stretched_auto_corr_scat_factor(x, beta, relaxation_rate, alpha=1.0, baseline=1): - return beta * np.exp(-2 * (relaxation_rate * x) ** alpha) + baseline - + return beta * np.exp(-2 * (relaxation_rate * x)**alpha ) + baseline -def simple_exponential(x, beta, relaxation_rate, baseline=1): - """relation_rate: unit 1/s""" +def simple_exponential(x, beta, relaxation_rate, baseline=1): + '''relation_rate: unit 1/s ''' return beta * np.exp(-2 * relaxation_rate * x) + baseline -def simple_exponential_with_vibration(x, beta, relaxation_rate, freq, amp, baseline=1): - return beta * (1 + amp * np.cos(2 * np.pi * freq * x)) * np.exp(-2 * relaxation_rate * x) + baseline - - -def stretched_auto_corr_scat_factor_with_vibration(x, beta, relaxation_rate, alpha, freq, amp, baseline=1): - return beta * (1 + amp * np.cos(2 * np.pi * freq * x)) * np.exp(-2 * (relaxation_rate * x) ** alpha) + baseline +def simple_exponential_with_vibration(x, beta, relaxation_rate, freq, amp, baseline=1): + return beta * (1 + amp*np.cos( 2*np.pi*freq* x) )* np.exp(-2 * relaxation_rate * x) + baseline +def stretched_auto_corr_scat_factor_with_vibration(x, beta, relaxation_rate, alpha, freq, amp, baseline=1): + return beta * (1 + amp*np.cos( 2*np.pi*freq* x) )* np.exp(-2 * (relaxation_rate * x)**alpha ) + baseline -def flow_para_function_with_vibration(x, beta, relaxation_rate, flow_velocity, freq, amp, baseline=1): - vibration_part = 1 + amp * np.cos(2 * np.pi * freq * x) - Diff_part = np.exp(-2 * relaxation_rate * x) - Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 - return beta * vibration_part * Diff_part * Flow_part + baseline +def flow_para_function_with_vibration( x, beta, relaxation_rate, flow_velocity, freq, amp, baseline=1): + vibration_part = (1 + amp*np.cos( 2*np.pi*freq* x) ) + Diff_part= np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 + return beta* vibration_part* Diff_part * Flow_part + baseline -def flow_para_function(x, beta, relaxation_rate, flow_velocity, baseline=1): - """flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )""" +def flow_para_function( x, beta, relaxation_rate, flow_velocity, baseline=1): + '''flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )''' - Diff_part = np.exp(-2 * relaxation_rate * x) - Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 - return beta * Diff_part * Flow_part + baseline + Diff_part= np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 + return beta*Diff_part * Flow_part + baseline -def flow_para_function_explicitq(x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0): - """Nov 9, 2017 Basically, make q vector to (qr, angle), +def flow_para_function_explicitq( x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0 ): + '''Nov 9, 2017 Basically, make q vector to (qr, angle), ###relaxation_rate is actually a diffusion rate flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) Diffusion part: np.exp( -2*D q^2 *tau ) q_ang: would be np.radians( ang - 90 ) - """ + ''' - Diff_part = np.exp(-2 * (diffusion * qr**2 * x) ** alpha) - if flow_velocity != 0: - if np.cos(q_ang) >= 1e-8: - Flow_part = ( - np.pi**2 - / (16 * x * flow_velocity * qr * abs(np.cos(q_ang))) - * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity * qr * abs(np.cos(q_ang))))) ** 2 - ) + Diff_part= np.exp(-2 * ( diffusion* qr**2 * x)**alpha ) + if flow_velocity !=0: + if np.cos( q_ang ) >= 1e-8: + Flow_part = np.pi**2/(16*x*flow_velocity*qr* abs(np.cos(q_ang)) ) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity * qr* abs(np.cos(q_ang)) ) ) )**2 else: Flow_part = 1 else: Flow_part = 1 - return beta * Diff_part * Flow_part + baseline + return beta*Diff_part * Flow_part + baseline -def get_flow_velocity(average_velocity, shape_factor): - return average_velocity * (1 - shape_factor) / (1 + shape_factor) +def get_flow_velocity( average_velocity, shape_factor): -def stretched_flow_para_function(x, beta, relaxation_rate, alpha, flow_velocity, baseline=1): - """ + return average_velocity * (1- shape_factor)/(1+ shape_factor) + +def stretched_flow_para_function( x, beta, relaxation_rate, alpha, flow_velocity, baseline=1): + ''' flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) - """ - Diff_part = np.exp(-2 * (relaxation_rate * x) ** alpha) - Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 - return beta * Diff_part * Flow_part + baseline + ''' + Diff_part= np.exp(-2 * (relaxation_rate * x)**alpha ) + Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 + return beta*Diff_part * Flow_part + baseline -def get_g2_fit_general_two_steps( - g2, taus, function="simple_exponential", second_fit_range=[0, 20], sequential_fit=False, *argv, **kwargs -): - """ +def get_g2_fit_general_two_steps( g2, taus, function='simple_exponential', + second_fit_range=[0,20], + sequential_fit=False, *argv,**kwargs): + ''' Fit g2 in two steps, i) Using the "function" to fit whole g2 to get baseline and beta (contrast) ii) Then using the obtained baseline and beta to fit g2 in a "second_fit_range" by using simple_exponential function - """ - g2_fit_result, taus_fit, g2_fit = get_g2_fit_general(g2, taus, function, sequential_fit, *argv, **kwargs) + ''' + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, function, sequential_fit, *argv,**kwargs) guess_values = {} - for k in list(g2_fit_result[0].params.keys()): - guess_values[k] = np.array([g2_fit_result[i].params[k].value for i in range(g2.shape[1])]) + for k in list (g2_fit_result[0].params.keys()): + guess_values[k] = np.array( [ g2_fit_result[i].params[k].value + for i in range( g2.shape[1] ) ]) - if "guess_limits" in kwargs: - guess_limits = kwargs["guess_limits"] + if 'guess_limits' in kwargs: + guess_limits = kwargs['guess_limits'] else: - guess_limits = dict(baseline=[1, 1.8], alpha=[0, 2], beta=[0.0, 1], relaxation_rate=[0.001, 10000]) - - g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( - g2, - taus, - function="simple_exponential", - sequential_fit=sequential_fit, - fit_range=second_fit_range, - fit_variables={"baseline": False, "beta": False, "alpha": False, "relaxation_rate": True}, - guess_values=guess_values, - guess_limits=guess_limits, - ) + guess_limits = dict( baseline =[1, 1.8], alpha=[0, 2], + beta = [0., 1], relaxation_rate= [0.001, 10000]) + + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, function ='simple_exponential', + sequential_fit= sequential_fit, fit_range=second_fit_range, + fit_variables={'baseline':False, 'beta': False, 'alpha':False,'relaxation_rate':True}, + guess_values= guess_values, guess_limits = guess_limits ) return g2_fit_result, taus_fit, g2_fit -def get_g2_fit_general( - g2, taus, function="simple_exponential", sequential_fit=False, qval_dict=None, ang_init=90, *argv, **kwargs -): - """ +def get_g2_fit_general( g2, taus, function='simple_exponential', + sequential_fit=False, qval_dict = None, + ang_init = 90, *argv,**kwargs): + ''' Nov 9, 2017, give qval_dict for using function of flow_para_function_explicitq qval_dict: a dict with qr and ang (in unit of degrees).") @@ -5109,307 +4777,275 @@ def get_g2_fit_general( g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_g2_fit_paras.csv', path=data_dir ) - """ + ''' - if "fit_range" in kwargs.keys(): - fit_range = kwargs["fit_range"] + if 'fit_range' in kwargs.keys(): + fit_range = kwargs['fit_range'] else: - fit_range = None + fit_range=None + num_rings = g2.shape[1] - if "fit_variables" in kwargs: - additional_var = kwargs["fit_variables"] - _vars = [k for k in list(additional_var.keys()) if additional_var[k] is False] + if 'fit_variables' in kwargs: + additional_var = kwargs['fit_variables'] + _vars =[ k for k in list( additional_var.keys()) if additional_var[k] == False] else: _vars = [] - if function == "simple_exponential" or function == "simple": - _vars = np.unique(_vars + ["alpha"]) - mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= list( _vars) ) - elif function == "stretched_exponential" or function == "stretched": - mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= _vars) - elif function == "stretched_vibration": - mod = Model(stretched_auto_corr_scat_factor_with_vibration) # , independent_vars= _vars) - elif function == "flow_para_function" or function == "flow_para": - mod = Model(flow_para_function) # , independent_vars= _vars) - elif function == "flow_para_function_explicitq" or function == "flow_para_qang": - mod = Model(flow_para_function_explicitq) # , independent_vars= _vars) - elif function == "flow_para_function_with_vibration" or function == "flow_vibration": - mod = Model(flow_para_function_with_vibration) + if function=='simple_exponential' or function=='simple': + _vars = np.unique ( _vars + ['alpha']) + mod = Model(stretched_auto_corr_scat_factor)#, independent_vars= list( _vars) ) + elif function=='stretched_exponential' or function=='stretched': + mod = Model(stretched_auto_corr_scat_factor)#, independent_vars= _vars) + elif function=='stretched_vibration': + mod = Model(stretched_auto_corr_scat_factor_with_vibration)#, independent_vars= _vars) + elif function=='flow_para_function' or function=='flow_para': + mod = Model(flow_para_function)#, independent_vars= _vars) + elif function=='flow_para_function_explicitq' or function=='flow_para_qang': + mod = Model(flow_para_function_explicitq)#, independent_vars= _vars) + elif function=='flow_para_function_with_vibration' or function=='flow_vibration': + mod = Model( flow_para_function_with_vibration ) else: - print( - "The %s is not supported.The supported functions include simple_exponential and stretched_exponential" - % function - ) - - mod.set_param_hint("baseline", min=0.5, max=2.5) - mod.set_param_hint("beta", min=0.0, max=1.0) - mod.set_param_hint("alpha", min=0.0) - mod.set_param_hint("relaxation_rate", min=0.0, max=1000) - mod.set_param_hint("flow_velocity", min=0) - mod.set_param_hint("diffusion", min=0.0, max=2e8) - - if "guess_limits" in kwargs: - guess_limits = kwargs["guess_limits"] - for k in list(guess_limits.keys()): - mod.set_param_hint(k, min=guess_limits[k][0], max=guess_limits[k][1]) - - if function == "flow_para_function" or function == "flow_para" or function == "flow_vibration": - mod.set_param_hint("flow_velocity", min=0) - if function == "flow_para_function_explicitq" or function == "flow_para_qang": - mod.set_param_hint("flow_velocity", min=0) - mod.set_param_hint("diffusion", min=0.0, max=2e8) - if function == "stretched_vibration" or function == "flow_vibration": - mod.set_param_hint("freq", min=0) - mod.set_param_hint("amp", min=0) - - _guess_val = dict(beta=0.1, alpha=1.0, relaxation_rate=0.005, baseline=1.0) - if "guess_values" in kwargs: - guess_values = kwargs["guess_values"] - _guess_val.update(guess_values) - - _beta = _guess_val["beta"] - _alpha = _guess_val["alpha"] - _relaxation_rate = _guess_val["relaxation_rate"] - _baseline = _guess_val["baseline"] - if isinstance(_beta, (np.ndarray, list)): - _beta_ = _beta[0] + print ("The %s is not supported.The supported functions include simple_exponential and stretched_exponential"%function) + + mod.set_param_hint( 'baseline', min=0.5, max= 2.5 ) + mod.set_param_hint( 'beta', min=0.0, max=1.0 ) + mod.set_param_hint( 'alpha', min=0.0 ) + mod.set_param_hint( 'relaxation_rate', min=0.0, max= 1000 ) + mod.set_param_hint( 'flow_velocity', min=0) + mod.set_param_hint( 'diffusion', min=0.0, max= 2e8 ) + + if 'guess_limits' in kwargs: + guess_limits = kwargs['guess_limits'] + for k in list( guess_limits.keys() ): + mod.set_param_hint( k, min= guess_limits[k][0], max= guess_limits[k][1] ) + + if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration': + mod.set_param_hint( 'flow_velocity', min=0) + if function=='flow_para_function_explicitq' or function=='flow_para_qang': + mod.set_param_hint( 'flow_velocity', min=0) + mod.set_param_hint( 'diffusion', min=0.0, max= 2e8 ) + if function=='stretched_vibration' or function=='flow_vibration': + mod.set_param_hint( 'freq', min=0) + mod.set_param_hint( 'amp', min=0) + + _guess_val = dict( beta=.1, alpha=1.0, relaxation_rate =0.005, baseline=1.0) + if 'guess_values' in kwargs: + guess_values = kwargs['guess_values'] + _guess_val.update( guess_values ) + + _beta=_guess_val['beta'] + _alpha=_guess_val['alpha'] + _relaxation_rate = _guess_val['relaxation_rate'] + _baseline= _guess_val['baseline'] + if isinstance( _beta, (np.ndarray, list) ): + _beta_=_beta[0] else: - _beta_ = _beta - if isinstance(_baseline, (np.ndarray, list)): + _beta_=_beta + if isinstance( _baseline, (np.ndarray, list) ): _baseline_ = _baseline[0] else: _baseline_ = _baseline - if isinstance(_relaxation_rate, (np.ndarray, list)): - _relaxation_rate_ = _relaxation_rate[0] + if isinstance( _relaxation_rate, (np.ndarray, list) ): + _relaxation_rate_= _relaxation_rate[0] else: - _relaxation_rate_ = _relaxation_rate - if isinstance(_alpha, (np.ndarray, list)): + _relaxation_rate_= _relaxation_rate + if isinstance( _alpha, (np.ndarray, list) ): _alpha_ = _alpha[0] else: _alpha_ = _alpha - pars = mod.make_params(beta=_beta_, alpha=_alpha_, relaxation_rate=_relaxation_rate_, baseline=_baseline_) + pars = mod.make_params( beta=_beta_, alpha=_alpha_, + relaxation_rate =_relaxation_rate_, baseline= _baseline_) - if function == "flow_para_function" or function == "flow_para": - _flow_velocity = _guess_val["flow_velocity"] - if isinstance(_flow_velocity, (np.ndarray, list)): + if function=='flow_para_function' or function=='flow_para': + _flow_velocity =_guess_val['flow_velocity'] + if isinstance( _flow_velocity, (np.ndarray, list) ): _flow_velocity_ = _flow_velocity[0] else: _flow_velocity_ = _flow_velocity - pars = mod.make_params( - beta=_beta_, - alpha=_alpha_, - flow_velocity=_flow_velocity_, - relaxation_rate=_relaxation_rate_, - baseline=_baseline_, - ) - - if function == "flow_para_function_explicitq" or function == "flow_para_qang": - _flow_velocity = _guess_val["flow_velocity"] - _diffusion = _guess_val["diffusion"] - _guess_val["qr"] = 1 - _guess_val["q_ang"] = 0 - if isinstance(_flow_velocity, (np.ndarray, list)): + pars = mod.make_params( beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_, + relaxation_rate =_relaxation_rate_, baseline= _baseline_) + + if function=='flow_para_function_explicitq' or function=='flow_para_qang': + _flow_velocity =_guess_val['flow_velocity'] + _diffusion =_guess_val['diffusion'] + _guess_val['qr'] = 1 + _guess_val['q_ang'] = 0 + if isinstance( _flow_velocity, (np.ndarray, list) ): _flow_velocity_ = _flow_velocity[0] else: _flow_velocity_ = _flow_velocity - if isinstance(_diffusion, (np.ndarray, list)): + if isinstance( _diffusion, (np.ndarray, list) ): _diffusion_ = _diffusion[0] else: _diffusion_ = _diffusion - pars = mod.make_params( - beta=_beta_, - alpha=_alpha_, - flow_velocity=_flow_velocity_, - diffusion=_diffusion_, - baseline=_baseline_, - qr=1, - q_ang=0, - ) - - if function == "stretched_vibration": - _freq = _guess_val["freq"] - _amp = _guess_val["amp"] - pars = mod.make_params( - beta=_beta, alpha=_alpha, freq=_freq, amp=_amp, relaxation_rate=_relaxation_rate, baseline=_baseline - ) - - if function == "flow_vibration": - _flow_velocity = _guess_val["flow_velocity"] - _freq = _guess_val["freq"] - _amp = _guess_val["amp"] - pars = mod.make_params( - beta=_beta, - freq=_freq, - amp=_amp, - flow_velocity=_flow_velocity, - relaxation_rate=_relaxation_rate, - baseline=_baseline, - ) + pars = mod.make_params( beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_, + diffusion =_diffusion_, baseline= _baseline_, + qr=1, q_ang=0 + ) + + if function=='stretched_vibration': + _freq =_guess_val['freq'] + _amp = _guess_val['amp'] + pars = mod.make_params( beta=_beta, alpha=_alpha, freq=_freq, amp = _amp, + relaxation_rate =_relaxation_rate, baseline= _baseline) + + if function=='flow_vibration': + _flow_velocity =_guess_val['flow_velocity'] + _freq =_guess_val['freq'] + _amp = _guess_val['amp'] + pars = mod.make_params( beta=_beta, freq=_freq, amp = _amp,flow_velocity=_flow_velocity, + relaxation_rate =_relaxation_rate, baseline= _baseline) for v in _vars: - pars["%s" % v].vary = False - # print( pars ) + pars['%s'%v].vary = False + #print( pars ) fit_res = [] model_data = [] for i in range(num_rings): - if fit_range is not None: - y_ = g2[1:, i][fit_range[0] : fit_range[1]] - lags_ = taus[1:][fit_range[0] : fit_range[1]] + if fit_range != None: + y_=g2[1:, i][fit_range[0]:fit_range[1]] + lags_=taus[1:][fit_range[0]:fit_range[1]] else: - y_ = g2[1:, i] - lags_ = taus[1:] + y_=g2[1:, i] + lags_=taus[1:] mm = ~np.isnan(y_) - y = y_[mm] + y = y_[mm] lags = lags_[mm] - # print( i, mm.shape, y.shape, y_.shape, lags.shape, lags_.shape ) - # y=y_ - # lags=lags_ - # print( _relaxation_rate ) + #print( i, mm.shape, y.shape, y_.shape, lags.shape, lags_.shape ) + #y=y_ + #lags=lags_ + #print( _relaxation_rate ) for k in list(pars.keys()): - # print(k, _guess_val[k] ) + #print(k, _guess_val[k] ) try: - if isinstance(_guess_val[k], (np.ndarray, list)): + if isinstance( _guess_val[k], (np.ndarray, list) ): pars[k].value = _guess_val[k][i] except: pass if True: - if isinstance(_beta, (np.ndarray, list)): - # pars['beta'].value = _guess_val['beta'][i] - _beta_ = _guess_val["beta"][i] - if isinstance(_baseline, (np.ndarray, list)): - # pars['baseline'].value = _guess_val['baseline'][i] - _baseline_ = _guess_val["baseline"][i] - if isinstance(_relaxation_rate, (np.ndarray, list)): - # pars['relaxation_rate'].value = _guess_val['relaxation_rate'][i] - _relaxation_rate_ = _guess_val["relaxation_rate"][i] - if isinstance(_alpha, (np.ndarray, list)): - # pars['alpha'].value = _guess_val['alpha'][i] - _alpha_ = _guess_val["alpha"][i] - # for k in list(pars.keys()): - # print(k, _guess_val[k] ) + if isinstance( _beta, (np.ndarray, list) ): + #pars['beta'].value = _guess_val['beta'][i] + _beta_ = _guess_val['beta'][i] + if isinstance( _baseline, (np.ndarray, list) ): + #pars['baseline'].value = _guess_val['baseline'][i] + _baseline_ = _guess_val['baseline'][i] + if isinstance( _relaxation_rate, (np.ndarray, list) ): + #pars['relaxation_rate'].value = _guess_val['relaxation_rate'][i] + _relaxation_rate_ = _guess_val['relaxation_rate'][i] + if isinstance( _alpha, (np.ndarray, list) ): + #pars['alpha'].value = _guess_val['alpha'][i] + _alpha_ = _guess_val['alpha'][i] + #for k in list(pars.keys()): + #print(k, _guess_val[k] ) # pars[k].value = _guess_val[k][i] - if function == "flow_para_function_explicitq" or function == "flow_para_qang": - if qval_dict is None: + if function=='flow_para_function_explicitq' or function=='flow_para_qang': + if qval_dict == None: print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") else: - pars = mod.make_params( - beta=_beta_, - alpha=_alpha_, - flow_velocity=_flow_velocity_, - diffusion=_diffusion_, - baseline=_baseline_, - qr=qval_dict[i][0], - q_ang=abs(np.radians(qval_dict[i][1] - ang_init)), - ) - - pars["qr"].vary = False - pars["q_ang"].vary = False + + pars = mod.make_params( + beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_, + diffusion =_diffusion_, baseline= _baseline_, + qr = qval_dict[i][0], q_ang = abs(np.radians( qval_dict[i][1] - ang_init) ) ) + + + pars['qr'].vary = False + pars['q_ang'].vary = False for v in _vars: - pars["%s" % v].vary = False + pars['%s'%v].vary = False - # if i==20: + #if i==20: # print(pars) - # print( pars ) - result1 = mod.fit(y, pars, x=lags) - # print(qval_dict[i][0], qval_dict[i][1], y) + #print( pars ) + result1 = mod.fit(y, pars, x =lags ) + #print(qval_dict[i][0], qval_dict[i][1], y) if sequential_fit: for k in list(pars.keys()): - # print( pars ) + #print( pars ) if k in list(result1.best_values.keys()): pars[k].value = result1.best_values[k] - fit_res.append(result1) - # model_data.append( result1.best_fit ) - yf = result1.model.eval(params=result1.params, x=lags_) - model_data.append(yf) - return fit_res, lags_, np.array(model_data).T - - -def get_short_long_labels_from_qval_dict(qval_dict, geometry="saxs"): - """Y.G. 2016, Dec 26 - Get short/long labels from a qval_dict - Parameters - ---------- - qval_dict, dict, with key as roi number, - format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs - format as {1: [qr1], 2: [qr2] ...} for saxs - format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs - geometry: - 'saxs': a saxs with Qr partition - 'ang_saxs': a saxs with Qr and angular partition - 'gi_saxs': gisaxs with Qz, Qr - """ - - Nqs = len(qval_dict.keys()) - len_qrz = len(list(qval_dict.values())[0]) - # qr_label = sorted( np.array( list( qval_dict.values() ) )[:,0] ) - qr_label = np.array(list(qval_dict.values()))[:, 0] - if geometry == "gi_saxs" or geometry == "ang_saxs": # or geometry=='gi_waxs': + fit_res.append( result1) + #model_data.append( result1.best_fit ) + yf=result1.model.eval(params=result1.params, x= lags_ ) + model_data.append( yf ) + return fit_res, lags_, np.array( model_data ).T + + + + +def get_short_long_labels_from_qval_dict(qval_dict, geometry='saxs'): + '''Y.G. 2016, Dec 26 + Get short/long labels from a qval_dict + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + geometry: + 'saxs': a saxs with Qr partition + 'ang_saxs': a saxs with Qr and angular partition + 'gi_saxs': gisaxs with Qz, Qr + ''' + + Nqs = len( qval_dict.keys()) + len_qrz = len( list( qval_dict.values() )[0] ) + #qr_label = sorted( np.array( list( qval_dict.values() ) )[:,0] ) + qr_label = np.array( list( qval_dict.values() ) )[:,0] + if geometry=='gi_saxs' or geometry=='ang_saxs':# or geometry=='gi_waxs': if len_qrz < 2: - print("please give qz or qang for the q-label") + print( "please give qz or qang for the q-label") else: - # qz_label = sorted( np.array( list( qval_dict.values() ) )[:,1] ) - qz_label = np.array(list(qval_dict.values()))[:, 1] + #qz_label = sorted( np.array( list( qval_dict.values() ) )[:,1] ) + qz_label = np.array( list( qval_dict.values() ) )[:,1] else: - qz_label = np.array([0]) + qz_label = np.array( [0] ) - uqz_label = np.unique(qz_label) - num_qz = len(uqz_label) + uqz_label = np.unique( qz_label ) + num_qz = len( uqz_label) - uqr_label = np.unique(qr_label) - num_qr = len(uqr_label) + uqr_label = np.unique( qr_label ) + num_qr = len( uqr_label) - # print( uqr_label, uqz_label ) - if len(uqr_label) >= len(uqz_label): - master_plot = "qz" # one qz for many sub plots of each qr + #print( uqr_label, uqz_label ) + if len( uqr_label ) >= len( uqz_label ): + master_plot= 'qz' #one qz for many sub plots of each qr else: - master_plot = "qr" + master_plot= 'qr' - mastp = master_plot - if geometry == "ang_saxs": - mastp = "ang" + mastp= master_plot + if geometry == 'ang_saxs': + mastp= 'ang' num_short = min(num_qz, num_qr) - num_long = max(num_qz, num_qr) + num_long = max(num_qz, num_qr) - # print( mastp, num_short, num_long) + #print( mastp, num_short, num_long) if num_qz != num_qr: - short_label = [qz_label, qr_label][np.argmin([num_qz, num_qr])] - long_label = [qz_label, qr_label][np.argmax([num_qz, num_qr])] - short_ulabel = [uqz_label, uqr_label][np.argmin([num_qz, num_qr])] - long_ulabel = [uqz_label, uqr_label][np.argmax([num_qz, num_qr])] + short_label = [qz_label,qr_label][ np.argmin( [num_qz, num_qr] ) ] + long_label = [qz_label,qr_label][ np.argmax( [num_qz, num_qr] ) ] + short_ulabel = [uqz_label,uqr_label][ np.argmin( [num_qz, num_qr] ) ] + long_ulabel = [uqz_label,uqr_label][ np.argmax( [num_qz, num_qr] ) ] else: short_label = qz_label - long_label = qr_label + long_label = qr_label short_ulabel = uqz_label - long_ulabel = uqr_label - # print( long_ulabel ) - # print( qz_label,qr_label ) - # print( short_label, long_label ) + long_ulabel = uqr_label + #print( long_ulabel ) + #print( qz_label,qr_label ) + #print( short_label, long_label ) - if geometry == "saxs" or geometry == "gi_waxs": - ind_long = [range(num_long)] + if geometry == 'saxs' or geometry == 'gi_waxs': + ind_long = [ range( num_long ) ] else: - ind_long = [np.where(short_label == i)[0] for i in short_ulabel] - - if Nqs == 1: - long_ulabel = list(qval_dict.values())[0] - long_label = list(qval_dict.values())[0] - return ( - qr_label, - qz_label, - num_qz, - num_qr, - num_short, - num_long, - short_label, - long_label, - short_ulabel, - long_ulabel, - ind_long, - master_plot, - mastp, - ) + ind_long = [ np.where( short_label == i)[0] for i in short_ulabel ] + + + if Nqs == 1: + long_ulabel = list( qval_dict.values() )[0] + long_label = list( qval_dict.values() )[0] + return qr_label, qz_label, num_qz, num_qr, num_short,num_long, short_label, long_label,short_ulabel,long_ulabel, ind_long, master_plot, mastp ############################################ @@ -5417,32 +5053,17 @@ def get_short_long_labels_from_qval_dict(qval_dict, geometry="saxs"): ############################################ -def plot_g2_general( - g2_dict, - taus_dict, - qval_dict, - g2_err_dict=None, - fit_res=None, - geometry="saxs", - filename="g2", - path=None, - function="simple_exponential", - g2_labels=None, - fig_ysize=12, - qth_interest=None, - ylabel="g2", - return_fig=False, - append_name="", - outsize=(2000, 2400), - max_plotnum_fig=16, - figsize=(10, 12), - show_average_ang_saxs=True, - qphi_analysis=False, - fontsize_sublabel=12, - *argv, - **kwargs, -): - """ + + +def plot_g2_general( g2_dict, taus_dict, qval_dict, g2_err_dict = None, + fit_res=None, geometry='saxs',filename='g2', + path=None, function='simple_exponential', g2_labels=None, + fig_ysize= 12, qth_interest = None, + ylabel='g2', return_fig=False, append_name='', outsize=(2000, 2400), + max_plotnum_fig=16, figsize=(10, 12), show_average_ang_saxs=True, + qphi_analysis = False, fontsize_sublabel = 12, + *argv,**kwargs): + ''' Jan 10, 2018 add g2_err_dict option to plot g2 with error bar Oct31, 2017 add qth_interest option @@ -5485,387 +5106,370 @@ def plot_g2_general( ToDoList: plot an average g2 for ang_saxs for each q - """ + ''' - if ylabel == "g2": - ylabel = "g_2" - if ylabel == "g4": - ylabel = "g_4" + if ylabel=='g2': + ylabel='g_2' + if ylabel=='g4': + ylabel='g_4' - if geometry == "saxs": + if geometry =='saxs': if qphi_analysis: - geometry = "ang_saxs" - if qth_interest is not None: + geometry = 'ang_saxs' + if qth_interest != None: if not isinstance(qth_interest, list): - print("Please give a list for qth_interest") + print('Please give a list for qth_interest') else: - # g2_dict0, taus_dict0, qval_dict0, fit_res0= g2_dict, taus_dict, qval_dict, fit_res - qth_interest = np.array(qth_interest) - 1 + #g2_dict0, taus_dict0, qval_dict0, fit_res0= g2_dict, taus_dict, qval_dict, fit_res + qth_interest = np.array( qth_interest ) -1 g2_dict_ = {} - # taus_dict_ = {} + #taus_dict_ = {} for k in list(g2_dict.keys()): - g2_dict_[k] = g2_dict[k][:, [i for i in qth_interest]] - # for k in list(taus_dict.keys()): + g2_dict_[k] = g2_dict[k][:,[i for i in qth_interest]] + #for k in list(taus_dict.keys()): # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] taus_dict_ = taus_dict - qval_dict_ = {k: qval_dict[k] for k in qth_interest} - if fit_res is not None: - fit_res_ = [fit_res[k] for k in qth_interest] + qval_dict_ = {k:qval_dict[k] for k in qth_interest} + if fit_res != None: + fit_res_ = [ fit_res[k] for k in qth_interest ] else: fit_res_ = None else: g2_dict_, taus_dict_, qval_dict_, fit_res_ = g2_dict, taus_dict, qval_dict, fit_res - ( - qr_label, - qz_label, - num_qz, - num_qr, - num_short, - num_long, - short_label, - long_label, - short_ulabel, - long_ulabel, - ind_long, - master_plot, - mastp, - ) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) fps = [] - # $print( num_short, num_long ) + #$print( num_short, num_long ) - for s_ind in range(num_short): - ind_long_i = ind_long[s_ind] - num_long_i = len(ind_long_i) - # if show_average_ang_saxs: + for s_ind in range( num_short ): + ind_long_i = ind_long[ s_ind ] + num_long_i = len( ind_long_i ) + #if show_average_ang_saxs: # if geometry=='ang_saxs': # num_long_i += 1 if RUN_GUI: fig = Figure(figsize=(10, 12)) else: - # fig = plt.figure( ) - if num_long_i <= 4: - if master_plot != "qz": + #fig = plt.figure( ) + if num_long_i <=4: + if master_plot != 'qz': fig = plt.figure(figsize=(8, 6)) else: - if num_short > 1: + if num_short>1: fig = plt.figure(figsize=(8, 4)) else: fig = plt.figure(figsize=(10, 6)) - # print('Here') + #print('Here') elif num_long_i > max_plotnum_fig: - num_fig = int(np.ceil(num_long_i / max_plotnum_fig)) # num_long_i //16 - fig = [plt.figure(figsize=figsize) for i in range(num_fig)] - # print( figsize ) + num_fig = int(np.ceil(num_long_i/max_plotnum_fig)) #num_long_i //16 + fig = [ plt.figure(figsize=figsize) for i in range(num_fig) ] + #print( figsize ) else: - # print('Here') - if master_plot != "qz": + #print('Here') + if master_plot != 'qz': fig = plt.figure(figsize=figsize) else: fig = plt.figure(figsize=(10, 10)) - if master_plot == "qz": - if geometry == "ang_saxs": - title_short = "Angle= %.2f" % (short_ulabel[s_ind]) + r"$^\circ$" - elif geometry == "gi_saxs": - title_short = r"$Q_z= $" + "%.4f" % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + if master_plot == 'qz': + if geometry=='ang_saxs': + title_short = 'Angle= %.2f'%( short_ulabel[s_ind] ) + r'$^\circ$' + elif geometry=='gi_saxs': + title_short = r'$Q_z= $' + '%.4f'%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' else: - title_short = "" - else: # qr - if geometry == "ang_saxs" or geometry == "gi_saxs": - title_short = r"$Q_r= $" + "%.5f " % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + title_short = '' + else: #qr + if geometry=='ang_saxs' or geometry=='gi_saxs': + title_short = r'$Q_r= $' + '%.5f '%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' else: - title_short = "" - # print(geometry) - # filename ='' - til = "%s:--->%s" % (filename, title_short) - if num_long_i <= 4: - plt.title(til, fontsize=14, y=1.15) - # plt.title( til,fontsize=20, y =1.06) - # print('here') + title_short='' + #print(geometry) + #filename ='' + til = '%s:--->%s'%(filename, title_short ) + if num_long_i <=4: + plt.title( til,fontsize= 14, y =1.15) + #plt.title( til,fontsize=20, y =1.06) + #print('here') else: - plt.title(til, fontsize=20, y=1.06) - # print( num_long ) - if num_long != 1: - # print( 'here') - plt.axis("off") - # sy = min(num_long_i,4) - sy = min(num_long_i, int(np.ceil(min(max_plotnum_fig, num_long_i) / 4))) - # fig.set_size_inches(10, 12) - # fig.set_size_inches(10, fig_ysize ) + plt.title( til,fontsize=20, y =1.06) + #print( num_long ) + if num_long!=1: + #print( 'here') + plt.axis('off') + #sy = min(num_long_i,4) + sy = min(num_long_i, int( np.ceil( min(max_plotnum_fig,num_long_i)/4)) ) + #fig.set_size_inches(10, 12) + #fig.set_size_inches(10, fig_ysize ) else: - sy = 1 - # fig.set_size_inches(8,6) - # plt.axis('off') - sx = min(4, int(np.ceil(min(max_plotnum_fig, num_long_i) / float(sy)))) + sy =1 + #fig.set_size_inches(8,6) + #plt.axis('off') + sx = min(4, int( np.ceil( min(max_plotnum_fig,num_long_i)/float(sy) ) )) temp = sy sy = sx sx = temp - # print( num_long_i, sx, sy ) - # print( master_plot ) - # print(ind_long_i, len(ind_long_i) ) + #print( num_long_i, sx, sy ) + #print( master_plot ) + #print(ind_long_i, len(ind_long_i) ) - for i, l_ind in enumerate(ind_long_i): + for i, l_ind in enumerate( ind_long_i ): if num_long_i <= max_plotnum_fig: - # if s_ind ==2: + #if s_ind ==2: # print('Here') # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) - ax = fig.add_subplot(sx, sy, i + 1) - if sx == 1: - if sy == 1: - plt.axis("on") + ax = fig.add_subplot(sx,sy, i + 1 ) + if sx==1: + if sy==1: + plt.axis('on') else: - # fig_subnum = l_ind//max_plotnum_fig - # ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) - fig_subnum = i // max_plotnum_fig - # print( i, sx,sy, fig_subnum, max_plotnum_fig, i + 1 - fig_subnum*max_plotnum_fig ) - ax = fig[fig_subnum].add_subplot(sx, sy, i + 1 - fig_subnum * max_plotnum_fig) + #fig_subnum = l_ind//max_plotnum_fig + #ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) + fig_subnum = i//max_plotnum_fig + #print( i, sx,sy, fig_subnum, max_plotnum_fig, i + 1 - fig_subnum*max_plotnum_fig ) + ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) + - ax.set_ylabel(r"$%s$" % ylabel + "(" + r"$\tau$" + ")") + ax.set_ylabel( r"$%s$"%ylabel + '(' + r'$\tau$' + ')' ) ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) - if master_plot == "qz" or master_plot == "angle": - if geometry != "gi_waxs": - title_long = r"$Q_r= $" + "%.5f " % (long_label[l_ind]) + r"$\AA^{-1}$" + if master_plot == 'qz' or master_plot == 'angle': + if geometry!='gi_waxs': + title_long = r'$Q_r= $'+'%.5f '%( long_label[l_ind] ) + r'$\AA^{-1}$' else: - title_long = r"$Q_r= $" + "%i " % (long_label[l_ind]) - # print( title_long,long_label,l_ind ) + title_long = r'$Q_r= $'+'%i '%( long_label[l_ind] ) + #print( title_long,long_label,l_ind ) else: - if geometry == "ang_saxs": - # title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) + r'$^\circ$' + '( %d )'%(l_ind) - title_long = "Ang= " + "%.2f" % (long_label[l_ind]) # + r'$^\circ$' + '( %d )'%(l_ind) - elif geometry == "gi_saxs": - title_long = r"$Q_z= $" + "%.5f " % (long_label[l_ind]) + r"$\AA^{-1}$" + if geometry=='ang_saxs': + #title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) + r'$^\circ$' + '( %d )'%(l_ind) + title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) #+ r'$^\circ$' + '( %d )'%(l_ind) + elif geometry=='gi_saxs': + title_long = r'$Q_z= $'+ '%.5f '%( long_label[l_ind] ) + r'$\AA^{-1}$' else: - title_long = "" - # print( master_plot ) - if master_plot != "qz": - ax.set_title(title_long + " (%s )" % (1 + l_ind), y=1.1, fontsize=12) + title_long = '' + #print( master_plot ) + if master_plot != 'qz': + ax.set_title(title_long + ' (%s )'%(1+l_ind), y =1.1, fontsize=12) else: - ax.set_title(title_long + " (%s )" % (1 + l_ind), y=1.05, fontsize=fontsize_sublabel) - # print( geometry ) - # print( title_long ) - if qth_interest is not None: # it might have a bug here, todolist!!! + ax.set_title(title_long + ' (%s )'%(1+l_ind), y =1.05, fontsize= fontsize_sublabel) + #print( geometry ) + #print( title_long ) + if qth_interest != None:#it might have a bug here, todolist!!! lab = sorted(list(qval_dict_.keys())) - # print( lab, l_ind) - ax.set_title(title_long + " (%s )" % (lab[l_ind] + 1), y=1.05, fontsize=12) - for ki, k in enumerate(list(g2_dict_.keys())): - if ki == 0: - c = "b" - if fit_res is None: - m = "-o" + #print( lab, l_ind) + ax.set_title(title_long + ' (%s )'%( lab[l_ind] +1), y =1.05, fontsize= 12) + for ki, k in enumerate( list(g2_dict_.keys()) ): + if ki==0: + c='b' + if fit_res == None: + m='-o' else: - m = "o" - elif ki == 1: - c = "r" - if fit_res is None: - m = "s" + m='o' + elif ki==1: + c='r' + if fit_res == None: + m='s' else: - m = "-" - elif ki == 2: - c = "g" - m = "-D" + m='-' + elif ki==2: + c='g' + m='-D' else: - c = colors[ki + 2] - m = "-%s" % markers[ki + 2] + c = colors[ki+2] + m= '-%s'%markers[ki+2] try: dumy = g2_dict_[k].shape - # print( 'here is the shape' ) + #print( 'here is the shape' ) islist = False except: - islist_n = len(g2_dict_[k]) + islist_n = len( g2_dict_[k] ) islist = True - # print( 'here is the list' ) + #print( 'here is the list' ) if islist: - for nlst in range(islist_n): - m = "-%s" % markers[nlst] - # print(m) - y = g2_dict_[k][nlst][:, l_ind] + for nlst in range( islist_n ): + m = '-%s'%markers[ nlst ] + #print(m) + y=g2_dict_[k][nlst][:, l_ind ] x = taus_dict_[k][nlst] - if ki == 0: - ymin, ymax = min(y), max(y[1:]) - if g2_err_dict is None: - if g2_labels is None: - ax.semilogx(x, y, m, color=c, markersize=6) + if ki==0: + ymin,ymax = min(y), max(y[1:]) + if g2_err_dict == None: + if g2_labels == None: + ax.semilogx(x, y, m, color=c, markersize=6) else: - # print('here ki ={} nlst = {}'.format( ki, nlst )) - if nlst == 0: - ax.semilogx(x, y, m, color=c, markersize=6, label=g2_labels[ki]) + #print('here ki ={} nlst = {}'.format( ki, nlst )) + if nlst==0: + ax.semilogx(x, y, m, color=c,markersize=6, label=g2_labels[ki]) else: - ax.semilogx(x, y, m, color=c, markersize=6) + ax.semilogx(x, y, m, color=c,markersize=6) else: - yerr = g2_err_dict[k][nlst][:, l_ind] - if g2_labels is None: - ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) + yerr= g2_err_dict[k][nlst][:, l_ind ] + if g2_labels == None: + ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6) else: - if nlst == 0: - ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6, label=g2_labels[ki]) + if nlst==0: + ax.errorbar(x, y, yerr=yerr, fmt=m, + color=c,markersize=6, label=g2_labels[ki]) else: - ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) - ax.set_xscale("log", nonposx="clip") - if nlst == 0: - if l_ind == 0: - ax.legend(loc="best", fontsize=8, fancybox=True, framealpha=0.5) + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c,markersize=6) + ax.set_xscale("log", nonposx='clip') + if nlst==0: + if l_ind==0: + ax.legend(loc='best', fontsize = 8, fancybox=True, framealpha=0.5) else: - y = g2_dict_[k][:, l_ind] + y=g2_dict_[k][:, l_ind ] x = taus_dict_[k] - if ki == 0: - ymin, ymax = min(y), max(y[1:]) - if g2_err_dict is None: - if g2_labels is None: - ax.semilogx(x, y, m, color=c, markersize=6) + if ki==0: + ymin,ymax = min(y), max(y[1:]) + if g2_err_dict == None: + if g2_labels == None: + ax.semilogx(x, y, m, color=c, markersize=6) else: - ax.semilogx(x, y, m, color=c, markersize=6, label=g2_labels[ki]) + ax.semilogx(x, y, m, color=c,markersize=6, label=g2_labels[ki]) else: - yerr = g2_err_dict[k][:, l_ind] - # print(x.shape, y.shape, yerr.shape) - # print(yerr) - if g2_labels is None: - ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) + yerr= g2_err_dict[k][:, l_ind ] + #print(x.shape, y.shape, yerr.shape) + #print(yerr) + if g2_labels == None: + ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6) else: - ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6, label=g2_labels[ki]) - ax.set_xscale("log", nonposx="clip") - if l_ind == 0: - ax.legend(loc="best", fontsize=8, fancybox=True, framealpha=0.5) + ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6,label=g2_labels[ki] ) + ax.set_xscale("log", nonposx='clip') + if l_ind==0: + ax.legend(loc='best', fontsize = 8, fancybox=True, framealpha=0.5) - if fit_res_ is not None: + if fit_res_ != None: result1 = fit_res_[l_ind] - # print (result1.best_values) - - beta = result1.best_values["beta"] - baseline = result1.best_values["baseline"] - if function == "simple_exponential" or function == "simple": - rate = result1.best_values["relaxation_rate"] - alpha = 1.0 - elif function == "stretched_exponential" or function == "stretched": - rate = result1.best_values["relaxation_rate"] - alpha = result1.best_values["alpha"] - elif function == "stretched_vibration": - rate = result1.best_values["relaxation_rate"] - alpha = result1.best_values["alpha"] - freq = result1.best_values["freq"] - elif function == "flow_vibration": - rate = result1.best_values["relaxation_rate"] - freq = result1.best_values["freq"] - if function == "flow_para_function" or function == "flow_para" or function == "flow_vibration": - rate = result1.best_values["relaxation_rate"] - flow = result1.best_values["flow_velocity"] - if function == "flow_para_function_explicitq" or function == "flow_para_qang": - diff = result1.best_values["diffusion"] + #print (result1.best_values) + + beta = result1.best_values['beta'] + baseline = result1.best_values['baseline'] + if function=='simple_exponential' or function=='simple': + rate = result1.best_values['relaxation_rate'] + alpha =1.0 + elif function=='stretched_exponential' or function=='stretched': + rate = result1.best_values['relaxation_rate'] + alpha = result1.best_values['alpha'] + elif function=='stretched_vibration': + rate = result1.best_values['relaxation_rate'] + alpha = result1.best_values['alpha'] + freq = result1.best_values['freq'] + elif function=='flow_vibration': + rate = result1.best_values['relaxation_rate'] + freq = result1.best_values['freq'] + if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration': + rate = result1.best_values['relaxation_rate'] + flow = result1.best_values['flow_velocity'] + if function=='flow_para_function_explicitq' or function=='flow_para_qang': + diff = result1.best_values['diffusion'] qrr = short_ulabel[s_ind] - # print(qrr) + #print(qrr) rate = diff * qrr**2 - flow = result1.best_values["flow_velocity"] - if qval_dict_ is None: + flow = result1.best_values['flow_velocity'] + if qval_dict_ == None: print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") else: pass - if rate != 0: - txts = r"$\tau_0$" + r"$ = %.3f$" % (1 / rate) + r"$ s$" + if rate!=0: + txts = r'$\tau_0$' + r'$ = %.3f$'%(1/rate) + r'$ s$' else: - txts = r"$\tau_0$" + r"$ = inf$" + r"$ s$" - x = 0.25 - y0 = 0.9 + txts = r'$\tau_0$' + r'$ = inf$' + r'$ s$' + x=0.25 + y0=0.9 fontsize = 12 - ax.text(x=x, y=y0, s=txts, fontsize=fontsize, transform=ax.transAxes) - # print(function) - dt = 0 - if ( - function != "flow_para_function" - and function != "flow_para" - and function != "flow_vibration" - and function != "flow_para_qang" - ): - txts = r"$\alpha$" + r"$ = %.3f$" % (alpha) + ax.text(x =x, y= y0, s=txts, fontsize=fontsize, transform=ax.transAxes) + #print(function) + dt=0 + if function!='flow_para_function' and function!='flow_para' and function!='flow_vibration' and function!='flow_para_qang': + txts = r'$\alpha$' + r'$ = %.3f$'%(alpha) + dt +=0.1 + #txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x =x, y= y0-dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r'$baseline$' + r'$ = %.3f$'%( baseline) + dt +=0.1 + ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration' or function=='flow_para_qang': + txts = r'$flow_v$' + r'$ = %.3f$'%( flow) dt += 0.1 - # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' - ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) - - txts = r"$baseline$" + r"$ = %.3f$" % (baseline) - dt += 0.1 - ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) - - if ( - function == "flow_para_function" - or function == "flow_para" - or function == "flow_vibration" - or function == "flow_para_qang" - ): - txts = r"$flow_v$" + r"$ = %.3f$" % (flow) + ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + if function=='stretched_vibration' or function=='flow_vibration': + txts = r'$vibration$' + r'$ = %.1f Hz$'%( freq) dt += 0.1 - ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) - if function == "stretched_vibration" or function == "flow_vibration": - txts = r"$vibration$" + r"$ = %.1f Hz$" % (freq) - dt += 0.1 - ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + ax.text(x =x, y= y0-dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r'$\beta$' + r'$ = %.3f$'%( beta ) + dt +=0.1 + ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) - txts = r"$\beta$" + r"$ = %.3f$" % (beta) - dt += 0.1 - ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) - if "ylim" in kwargs: - ax.set_ylim(kwargs["ylim"]) - elif "vlim" in kwargs: - vmin, vmax = kwargs["vlim"] + if 'ylim' in kwargs: + ax.set_ylim( kwargs['ylim']) + elif 'vlim' in kwargs: + vmin, vmax =kwargs['vlim'] try: - ax.set_ylim([ymin * vmin, ymax * vmax]) + ax.set_ylim([ymin*vmin, ymax*vmax ]) except: pass else: pass - if "xlim" in kwargs: - ax.set_xlim(kwargs["xlim"]) - if num_short == 1: + if 'xlim' in kwargs: + ax.set_xlim( kwargs['xlim']) + if num_short == 1: fp = path + filename else: - fp = path + filename + "_%s_%s" % (mastp, s_ind) + fp = path + filename + '_%s_%s'%(mastp, s_ind) - if append_name is not "": + if append_name != '': fp = fp + append_name - fps.append(fp + ".png") - # if num_long_i <= 16: + fps.append( fp + '.png' ) + #if num_long_i <= 16: if num_long_i <= max_plotnum_fig: fig.set_tight_layout(True) - # fig.tight_layout() - # print(fig) + #fig.tight_layout() + #print(fig) try: - plt.savefig(fp + ".png", dpi=fig.dpi) + plt.savefig( fp + '.png', dpi=fig.dpi) except: - print("Can not save figure here.") + print('Can not save figure here.') else: - fps = [] + fps=[] for fn, f in enumerate(fig): f.set_tight_layout(True) - fp = path + filename + "_q_%s_%s" % (fn * 16, (fn + 1) * 16) - if append_name is not "": + fp = path + filename + '_q_%s_%s'%(fn*16, (fn+1)*16) + if append_name != '': fp = fp + append_name - fps.append(fp + ".png") - f.savefig(fp + ".png", dpi=f.dpi) - # plt.savefig( fp + '.png', dpi=fig.dpi) - # combine each saved images together - - if (num_short != 1) or (num_long_i > 16): - outputfile = path + filename + ".png" - if append_name is not "": - outputfile = path + filename + append_name + "__joint.png" + fps.append( fp + '.png' ) + f.savefig( fp + '.png', dpi=f.dpi) + #plt.savefig( fp + '.png', dpi=fig.dpi) + #combine each saved images together + + if (num_short !=1) or (num_long_i > 16): + outputfile = path + filename + '.png' + if append_name != '': + outputfile = path + filename + append_name + '__joint.png' else: - outputfile = path + filename + "__joint.png" - combine_images(fps, outputfile, outsize=outsize) + outputfile = path + filename + '__joint.png' + combine_images( fps, outputfile, outsize= outsize ) if return_fig: return fig + def power_func(x, D0, power=2): return D0 * x**power -def get_q_rate_fit_general(qval_dict, rate, geometry="saxs", weights=None, *argv, **kwargs): - """ +def get_q_rate_fit_general( qval_dict, rate, geometry ='saxs', weights=None, *argv,**kwargs): + ''' Dec 26,2016, Y.G.@CHX Fit q~rate by a power law function and fit curve pass (0,0) @@ -5884,78 +5488,57 @@ def get_q_rate_fit_general(qval_dict, rate, geometry="saxs", weights=None, *argv Return: D0 qrate_fit_res - """ + ''' - power_variable = False + power_variable=False - if "fit_range" in kwargs.keys(): - fit_range = kwargs["fit_range"] + if 'fit_range' in kwargs.keys(): + fit_range = kwargs['fit_range'] else: - fit_range = None + fit_range= None - mod = Model(power_func) - # mod.set_param_hint( 'power', min=0.5, max= 10 ) - # mod.set_param_hint( 'D0', min=0 ) - pars = mod.make_params(power=2, D0=1 * 10 ^ (-5)) + mod = Model( power_func ) + #mod.set_param_hint( 'power', min=0.5, max= 10 ) + #mod.set_param_hint( 'D0', min=0 ) + pars = mod.make_params( power = 2, D0=1*10^(-5) ) if power_variable: - pars["power"].vary = True + pars['power'].vary = True else: - pars["power"].vary = False - - ( - qr_label, - qz_label, - num_qz, - num_qr, - num_short, - num_long, - short_label, - long_label, - short_ulabel, - long_ulabel, - ind_long, - master_plot, - mastp, - ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + pars['power'].vary = False + + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) Nqr = num_long Nqz = num_short - D0 = np.zeros(Nqz) - power = 2 # np.zeros( Nqz ) - qrate_fit_res = [] - # print(Nqz) - for i in range(Nqz): - ind_long_i = ind_long[i] - y = np.array(rate)[ind_long_i] - x = long_label[ind_long_i] - # print(y,x) - if fit_range is not None: - y = y[fit_range[0] : fit_range[1]] - x = x[fit_range[0] : fit_range[1]] - # print (i, y,x) - _result = mod.fit(y, pars, x=x, weights=weights) - qrate_fit_res.append(_result) - D0[i] = _result.best_values["D0"] - # power[i] = _result.best_values['power'] - print("The fitted diffusion coefficient D0 is: %.3e A^2S-1" % D0[i]) + D0= np.zeros( Nqz ) + power= 2 #np.zeros( Nqz ) + qrate_fit_res=[] + #print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[ i ] + y = np.array( rate )[ind_long_i] + x = long_label[ind_long_i] + #print(y,x) + if fit_range != None: + y=y[fit_range[0]:fit_range[1]] + x=x[fit_range[0]:fit_range[1]] + #print (i, y,x) + _result = mod.fit(y, pars, x = x ,weights=weights ) + qrate_fit_res.append( _result ) + D0[i] = _result.best_values['D0'] + #power[i] = _result.best_values['power'] + print ('The fitted diffusion coefficient D0 is: %.3e A^2S-1'%D0[i]) return D0, qrate_fit_res -def plot_q_rate_fit_general( - qval_dict, - rate, - qrate_fit_res, - geometry="saxs", - ylim=None, - plot_all_range=True, - plot_index_range=None, - show_text=True, - return_fig=False, - show_fit=True, - *argv, - **kwargs, -): - """ +def plot_q_rate_fit_general( qval_dict, rate, qrate_fit_res, geometry ='saxs', ylim = None, + plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False, + show_fit=True, + *argv,**kwargs): + ''' Dec 26,2016, Y.G.@CHX plot q~rate fitted by a power law function and fit curve pass (0,0) @@ -5973,133 +5556,119 @@ def plot_q_rate_fit_general( Otherwise, power is variable. show_fit:, bool, if False, not show the fit - """ + ''' - if "uid" in kwargs.keys(): - uid = kwargs["uid"] + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] else: - uid = "uid" - if "path" in kwargs.keys(): - path = kwargs["path"] + uid = 'uid' + if 'path' in kwargs.keys(): + path = kwargs['path'] else: - path = "" - ( - qr_label, - qz_label, - num_qz, - num_qr, - num_short, - num_long, - short_label, - long_label, - short_ulabel, - long_ulabel, - ind_long, - master_plot, - mastp, - ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + path = '' + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) power = 2 - fig, ax = plt.subplots() - plt.title(r"$Q^%s$" % (power) + "-Rate-%s_Fit" % (uid), fontsize=20, y=1.06) + fig,ax = plt.subplots() + plt.title(r'$Q^%s$'%(power) + '-Rate-%s_Fit'%(uid),fontsize=20, y =1.06) Nqz = num_short - if Nqz != 1: - ls = "--" + if Nqz!=1: + ls = '--' else: - ls = "" - for i in range(Nqz): - ind_long_i = ind_long[i] - y = np.array(rate)[ind_long_i] - x = long_label[ind_long_i] - D0 = qrate_fit_res[i].best_values["D0"] - # print(i, x, y, D0 ) - if Nqz != 1: - label = r"$q_z=%.5f$" % short_ulabel[i] + ls='' + for i in range(Nqz): + ind_long_i = ind_long[ i ] + y = np.array( rate )[ind_long_i] + x = long_label[ind_long_i] + D0 = qrate_fit_res[i].best_values['D0'] + #print(i, x, y, D0 ) + if Nqz!=1: + label=r'$q_z=%.5f$'%short_ulabel[i] else: - label = "" - ax.plot(x**power, y, marker="o", ls=ls, label=label) + label='' + ax.plot(x**power, y, marker = 'o', ls =ls, label=label) yfit = qrate_fit_res[i].best_fit if show_fit: if plot_all_range: - ax.plot(x**power, x**power * D0, "-r") + ax.plot(x**power, x**power*D0, '-r') else: - ax.plot((x**power)[: len(yfit)], yfit, "-r") + ax.plot( (x**power)[:len(yfit) ], yfit, '-r') if show_text: - txts = r"$D0: %.3e$" % D0 + r" $A^2$" + r"$s^{-1}$" - dy = 0.1 - ax.text(x=0.15, y=0.65 - dy * i, s=txts, fontsize=14, transform=ax.transAxes) - if Nqz != 1: - legend = ax.legend(loc="best") - - if plot_index_range is not None: - d1, d2 = plot_index_range - d2 = min(len(x) - 1, d2) - ax.set_xlim((x**power)[d1], (x**power)[d2]) - ax.set_ylim(y[d1], y[d2]) - if ylim is not None: - ax.set_ylim(ylim) - - ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$)") - ax.set_xlabel("$q^%s$" r"($\AA^{-2}$)" % power) - fp = path + "%s_Q_Rate" % (uid) + "_fit.png" - fig.savefig(fp, dpi=fig.dpi) + txts = r'$D0: %.3e$'%D0 + r' $A^2$' + r'$s^{-1}$' + dy=0.1 + ax.text(x =0.15, y=.65 -dy *i, s=txts, fontsize=14, transform=ax.transAxes) + if Nqz!=1:legend = ax.legend(loc='best') + + if plot_index_range != None: + d1,d2 = plot_index_range + d2 = min( len(x)-1, d2 ) + ax.set_xlim( (x**power)[d1], (x**power)[d2] ) + ax.set_ylim( y[d1],y[d2]) + if ylim != None: + ax.set_ylim( ylim ) + + ax.set_ylabel('Relaxation rate 'r'$\gamma$'"($s^{-1}$)") + ax.set_xlabel("$q^%s$"r'($\AA^{-2}$)'%power) + fp = path + '%s_Q_Rate'%(uid) + '_fit.png' + fig.savefig( fp, dpi=fig.dpi) fig.tight_layout() if return_fig: - return fig, ax + return fig,ax -def save_g2_fit_para_tocsv(fit_res, filename, path): - """Y.G. Dec 29, 2016, +def save_g2_fit_para_tocsv( fit_res, filename, path): + '''Y.G. Dec 29, 2016, save g2 fitted parameter to csv file - """ - col = list(fit_res[0].best_values.keys()) - m, n = len(fit_res), len(col) - data = np.zeros([m, n]) - for i in range(m): - data[i] = list(fit_res[i].best_values.values()) - df = DataFrame(data) + ''' + col = list( fit_res[0].best_values.keys() ) + m,n = len( fit_res ), len( col ) + data = np.zeros( [m,n] ) + for i in range( m ): + data[i] = list( fit_res[i].best_values.values() ) + df = DataFrame( data ) df.columns = col - filename1 = os.path.join(path, filename) # + '.csv') + filename1 = os.path.join(path, filename) # + '.csv') df.to_csv(filename1) - print("The g2 fitting parameters are saved in %s" % filename1) + print( "The g2 fitting parameters are saved in %s"%filename1) return df -def R_2(ydata, fit_data): - """Calculates R squared for a particular fit - by L.W. + +def R_2(ydata,fit_data): + ''' Calculates R squared for a particular fit - by L.W. usage R_2(ydata,fit_data) returns R2 by L.W. Feb. 2019 - """ - y_ave = np.average(ydata) - SS_tot = np.sum((np.array(ydata) - y_ave) ** 2) - # print('SS_tot: %s'%SS_tot) - SS_res = np.sum((np.array(ydata) - np.array(fit_data)) ** 2) - # print('SS_res: %s'%SS_res) - return 1 - SS_res / SS_tot - - -def is_outlier(points, thresh=3.5, verbose=False): - """MAD test""" + ''' + y_ave=np.average(ydata) + SS_tot=np.sum((np.array(ydata)-y_ave)**2) + #print('SS_tot: %s'%SS_tot) + SS_res=np.sum((np.array(ydata)-np.array(fit_data))**2) + #print('SS_res: %s'%SS_res) + return 1-SS_res/SS_tot + +def is_outlier(points,thresh=3.5,verbose=False): + """MAD test + """ points.tolist() - if len(points) == 1: - points = points[:, None] + if len(points) ==1: + points=points[:,None] if verbose: - print("input to is_outlier is a single point...") - median = np.median(points) * np.ones(np.shape(points)) # , axis=0) - - diff = (points - median) ** 2 - diff = np.sqrt(diff) - med_abs_deviation = np.median(diff) - modified_z_score = 0.6745 * diff / med_abs_deviation + print('input to is_outlier is a single point...') + median = np.median(points)*np.ones(np.shape(points))#, axis=0) + + diff = (points-median)**2 + diff=np.sqrt(diff) + med_abs_deviation= np.median(diff) + modified_z_score = .6745*diff/med_abs_deviation return modified_z_score > thresh - -def outlier_mask( - avg_img, mask, roi_mask, outlier_threshold=7.5, maximum_outlier_fraction=0.1, verbose=False, plot=False -): +def outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False): """ outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) avg_img: average image data (2D) @@ -6117,104 +5686,67 @@ def outlier_mask( by LW 06/21/2023 """ hhmask = np.ones(np.shape(roi_mask)) - pc = 1 - - for rn in np.arange(1, np.max(roi_mask) + 1, 1): - rm = np.zeros(np.shape(roi_mask)) - rm = rm - 1 - rm[np.where(roi_mask == rn)] = 1 - pixel = roi.roi_pixel_values(avg_img * rm, roi_mask, [rn]) - out_l = is_outlier((avg_img * mask * rm)[rm > -1], thresh=outlier_threshold) - if np.nanmax(out_l) > 0: # Did detect at least one outlier - ave_roi_int = np.nanmean((pixel[0][0])[out_l < 1]) - if verbose: - print("ROI #%s\naverage ROI intensity: %s" % (rn, ave_roi_int)) + pc=1 + + for rn in np.arange(1,np.max(roi_mask)+1,1): + rm=np.zeros(np.shape(roi_mask));rm=rm-1;rm[np.where( roi_mask == rn)]=1 + pixel = roi.roi_pixel_values(avg_img*rm, roi_mask, [rn] ) + out_l = is_outlier((avg_img*mask*rm)[rm>-1], thresh=outlier_threshold) + if np.nanmax(out_l)>0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l<1]) + if verbose: print('ROI #%s\naverage ROI intensity: %s'%(rn,ave_roi_int)) try: - upper_outlier_threshold = np.nanmin((out_l * pixel[0][0])[out_l * pixel[0][0] > ave_roi_int]) - if verbose: - print("upper outlier threshold: %s" % upper_outlier_threshold) + upper_outlier_threshold = np.nanmin((out_l*pixel[0][0])[out_l*pixel[0][0]>ave_roi_int]) + if verbose: print('upper outlier threshold: %s'%upper_outlier_threshold) except: upper_outlier_threshold = False - if verbose: - print("no upper outlier threshold found") - ind1 = (out_l * pixel[0][0]) > 0 - ind2 = (out_l * pixel[0][0]) < ave_roi_int + if verbose: print('no upper outlier threshold found') + ind1 = (out_l*pixel[0][0])>0; ind2 = (out_l*pixel[0][0])< ave_roi_int try: - lower_outlier_threshold = np.nanmax((out_l * pixel[0][0])[ind1 * ind2]) + lower_outlier_threshold = np.nanmax((out_l*pixel[0][0])[ind1*ind2]) except: lower_outlier_threshold = False - if verbose: - print("no lower outlier threshold found") + if verbose: print('no lower outlier threshold found') else: - if verbose: - print("ROI #%s: no outliers detected" % rn) + if verbose: print('ROI #%s: no outliers detected'%rn) - ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi - outlier_fraction = np.sum(out_l) / len(pixel[0][0]) - if verbose: - print("fraction of pixel values detected as outliers: %s" % np.round(outlier_fraction, 2)) + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l)/len(pixel[0][0]) + if verbose: print('fraction of pixel values detected as outliers: %s'%np.round(outlier_fraction,2)) if outlier_fraction > maximum_outlier_fraction: - if verbose: - print( - "fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed" - % maximum_outlier_fraction - ) - upper_outlier_threshold = False - lower_outlier_threshold = False + if verbose: print('fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed'%maximum_outlier_fraction) + upper_outlier_threshold = False; lower_outlier_threshold = False if upper_outlier_threshold: - hhmask[avg_img * rm > upper_outlier_threshold] = 0 + hhmask[avg_img*rm > upper_outlier_threshold] = 0 if lower_outlier_threshold: - hhmask[avg_img * rm < lower_outlier_threshold] = 0 + hhmask[avg_img*rm < lower_outlier_threshold] = 0 if plot: - if pc == 1: - fig, ax = plt.subplots(1, 5, figsize=(24, 4)) - plt.subplot(1, 5, pc) - pc += 1 - if pc > 5: - pc = 1 - pixel = roi.roi_pixel_values(avg_img * rm * mask, roi_mask, [rn]) - plt.plot(pixel[0][0], "bo", markersize=1.5) + if pc == 1: fig,ax = plt.subplots(1,5,figsize=(24,4)) + plt.subplot(1,5,pc);pc+=1; + if pc>5: pc=1 + pixel = roi.roi_pixel_values(avg_img*rm*mask, roi_mask, [rn] ) + plt.plot( pixel[0][0] ,'bo',markersize=1.5 ) if upper_outlier_threshold or lower_outlier_threshold: - x = np.arange(len(out_l)) - plt.plot( - [x[0], x[-1]], - [ave_roi_int, ave_roi_int], - "g--", - label="ROI average: %s" % np.round(ave_roi_int, 4), - ) + x=np.arange(len(out_l)) + plt.plot([x[0],x[-1]],[ave_roi_int,ave_roi_int],'g--',label='ROI average: %s'%np.round(ave_roi_int,4)) if upper_outlier_threshold: - ind = (out_l * pixel[0][0]) > upper_outlier_threshold - plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") - plt.plot( - [x[0], x[-1]], - [upper_outlier_threshold, upper_outlier_threshold], - "r--", - label="upper thresh.: %s" % np.round(upper_outlier_threshold, 4), - ) + ind=(out_l*pixel[0][0])> upper_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[upper_outlier_threshold,upper_outlier_threshold],'r--',label='upper thresh.: %s'%np.round(upper_outlier_threshold,4)) if lower_outlier_threshold: - ind = (out_l * pixel[0][0]) < lower_outlier_threshold - plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") - plt.plot( - [x[0], x[-1]], - [lower_outlier_threshold, lower_outlier_threshold], - "r--", - label="lower thresh.: %s" % np.round(upper_outlier_threshold, 4), - ) - plt.ylabel("Intensity") - plt.xlabel("pixel") - plt.title("ROI #: %s" % rn) - plt.legend(loc="best", fontsize=8) + ind=(out_l*pixel[0][0])< lower_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[lower_outlier_threshold,lower_outlier_threshold],'r--',label='lower thresh.: %s'%np.round(upper_outlier_threshold,4)) + plt.ylabel('Intensity') ;plt.xlabel('pixel');plt.title('ROI #: %s'%rn);plt.legend(loc='best',fontsize=8) if plot: - fig, ax = plt.subplots() + fig,ax = plt.subplots() plt.imshow(hhmask) - hot_dark = np.nonzero(hhmask < 1) - cmap = plt.cm.get_cmap("viridis") - plt.plot(hot_dark[1], hot_dark[0], "+", color=cmap(0)) - plt.xlabel("pixel") - plt.ylabel("pixel") - plt.title("masked pixels with outlier threshold: %s" % outlier_threshold) - - return hhmask + hot_dark=np.nonzero(hhmask<1) + cmap = plt.cm.get_cmap('viridis') + plt.plot(hot_dark[1],hot_dark[0],'+',color=cmap(0)) + plt.xlabel('pixel');plt.ylabel('pixel');plt.title('masked pixels with outlier threshold: %s'%outlier_threshold) + + return hhmask \ No newline at end of file diff --git a/pyCHX/chx_xpcs_xsvs_jupyter_V1.py b/pyCHX/chx_xpcs_xsvs_jupyter_V1.py index 6e5beea..6b10886 100644 --- a/pyCHX/chx_xpcs_xsvs_jupyter_V1.py +++ b/pyCHX/chx_xpcs_xsvs_jupyter_V1.py @@ -1,14 +1,14 @@ -# from pyCHX.chx_generic_functions import get_short_long_labels_from_qval_dict -# RUN_GUI = False -# from pyCHX.chx_libs import markers -import pandas as pds - -from pyCHX.chx_libs import colors, markers from pyCHX.chx_packages import * +from pyCHX.chx_libs import markers, colors +#from pyCHX.chx_generic_functions import get_short_long_labels_from_qval_dict +#RUN_GUI = False +#from pyCHX.chx_libs import markers +import pandas as pds +# temporary fix: get_data() uses depreciated np.float and gets imported from pyCHX/chx_correlationc.py -> clobber function with temporary fix: +%run /nsls2/data/chx/legacy/analysis/2022_3/lwiegart/development/chx_analysis_setup.ipynb - -def get_t_iqc_uids(uid_list, setup_pargs, slice_num=10, slice_width=1): - """Get Iq at different time edge (difined by slice_num and slice_width) for a list of uids +def get_t_iqc_uids( uid_list, setup_pargs, slice_num= 10, slice_width= 1): + '''Get Iq at different time edge (difined by slice_num and slice_width) for a list of uids Input: uid_list: list of string (uid) setup_pargs: dict, for caculation of Iq, the key of this dict should include @@ -20,98 +20,80 @@ def get_t_iqc_uids(uid_list, setup_pargs, slice_num=10, slice_width=1): Output: qs: dict, with uid as key, with value as q values iqsts:dict, with uid as key, with value as iq values - tstamp:dict, with uid as key, with value as time values - - """ + tstamp:dict, with uid as key, with value as time values + + ''' iqsts = {} tstamp = {} qs = {} label = [] for uid in uid_list: - md = get_meta_data(uid) - luid = md["uid"] - timeperframe = md["cam_acquire_period"] - N = md["cam_num_images"] - filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % luid + md = get_meta_data( uid ) + luid = md['uid'] + timeperframe = md['cam_acquire_period'] + N = md['cam_num_images'] + filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%luid good_start = 5 - FD = Multifile(filename, good_start, N) - Nimg = FD.end - FD.beg - time_edge = create_time_slice(Nimg, slice_num=slice_num, slice_width=slice_width, edges=None) - time_edge = np.array(time_edge) + good_start - # print( time_edge ) - tstamp[uid] = time_edge[:, 0] * timeperframe - qpt, iqsts[uid], qt = get_t_iqc(FD, time_edge, None, pargs=setup_pargs, nx=1500) - qs[uid] = qt - + FD = Multifile(filename, good_start, N ) + Nimg = FD.end - FD.beg + time_edge = create_time_slice( Nimg, slice_num= slice_num, slice_width= slice_width, edges = None ) + time_edge = np.array( time_edge ) + good_start + #print( time_edge ) + tstamp[uid] = time_edge[:,0] * timeperframe + qpt, iqsts[uid], qt = get_t_iqc( FD, time_edge, None, pargs=setup_pargs, nx=1500 ) + qs[uid] = qt + return qs, iqsts, tstamp -def plot_t_iqtMq2(qt, iqst, tstamp, ax=None, perf=""): - """plot q2~Iq at differnt time""" + + +def plot_t_iqtMq2(qt, iqst, tstamp, ax=None, perf='' ): + '''plot q2~Iq at differnt time''' if ax is None: fig, ax = plt.subplots() q = qt for i in range(iqst.shape[0]): yi = iqst[i] * q**2 - time_labeli = perf + "time_%s s" % (round(tstamp[i], 3)) - plot1D( - x=q, - y=yi, - legend=time_labeli, - xlabel="Q (A-1)", - ylabel="I(q)*Q^2", - title="I(q)*Q^2 ~ time", - m=markers[i], - c=colors[i], - ax=ax, - ylim=[-0.001, 0.005], - ) # , xlim=[0.007,0.1] ) - - -def plot_t_iqc_uids(qs, iqsts, tstamps): - """plot q2~Iq at differnt time for a uid list""" + time_labeli = perf+'time_%s s'%( round( tstamp[i], 3) ) + plot1D( x = q, y = yi, legend= time_labeli, xlabel='Q (A-1)', ylabel='I(q)*Q^2', title='I(q)*Q^2 ~ time', + m=markers[i], c = colors[i], ax=ax, ylim=[ -0.001, 0.005]) #, xlim=[0.007,0.1] ) + + +def plot_t_iqc_uids( qs, iqsts, tstamps ): + '''plot q2~Iq at differnt time for a uid list + ''' keys = list(qs.keys()) fig, ax = plt.subplots() for uid in keys: qt = qs[uid] iqst = iqsts[uid] - tstamp = tstamps[uid] - plot_t_iqtMq2(qt, iqst, tstamp, ax=ax, perf=uid + "_") - - -def plot_entries_from_csvlist( - csv_list, - uid_list, - inDir, - key="g2", - qth=1, - legend_size=8, - yshift=0.01, - ymulti=1, - xlim=None, - ylim=None, - uid_length=None, - legend=None, - fp_fulluid=True, -): - """ + tstamp = tstamps[uid] + plot_t_iqtMq2(qt, iqst, tstamp, ax=ax, perf=uid + '_' ) + + +def plot_entries_from_csvlist( csv_list, uid_list, inDir, key = 'g2', qth = 1, legend_size=8, + yshift= 0.01, ymulti=1, xlim=None, ylim=None,uid_length=None, + legend=None, fp_fulluid=True ): + + ''' YG Feb2, 2018, make yshift be also a list - + YG June 9, 2017@CHX YG Sep 29, 2017@CHX. plot enteries for a list csvs Input: csv_list: list, a list of uid (string) inDir: string, imported folder for saved analysis results - key: string, plot entry, surport - 'g2' for one-time, + key: string, plot entry, surport + 'g2' for one-time, 'iq' for q~iq - 'mean_int_sets' for mean intensity of each roi as a function of frame + 'mean_int_sets' for mean intensity of each roi as a function of frame TODOLIST:#also can plot the following - dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', - 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', - 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', - 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) + dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', + 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', + 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', + 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) qth: integer, the intesrest q number yshift: float, values of shift in y direction xlim: [x1,x2], for plot x limit @@ -121,143 +103,90 @@ def plot_entries_from_csvlist( Example: uid_list = ['5492b9', '54c5e0'] plot_entries_from_uids( uid_list, inDir, yshift = 0.01, key= 'g2', ylim=[1, 1.2]) - """ - + ''' + uid_dict = {} - fig, ax = plt.subplots() + fig, ax =plt.subplots() for uid in uid_list: if uid_length is not None: uid_ = uid[:uid_length] else: - uid_ = uid - # print(uid_) - uid_dict[uid_] = get_meta_data(uid)["uid"] - # for i, u in enumerate( list( uid_dict.keys() )): - - for i, fp in enumerate(list(csv_list)): - u = uid_list[i] # print(u) - inDiru = inDir + u + "/" + uid_=uid + #print(uid_) + uid_dict[uid_] = get_meta_data( uid )['uid'] + #for i, u in enumerate( list( uid_dict.keys() )): + + for i,fp in enumerate( list(csv_list)): + u = uid_list[i] #print(u) + inDiru = inDir + u + '/' if fp_fulluid: - inDiru = inDir + uid_dict[u] + "/" + inDiru = inDir + uid_dict[u] + '/' else: - inDiru = inDir + u + "/" - d = pds.read_csv(inDiru + fp) - # print(d) - - if key == "g2": - taus = d["tau"][1:] - col = d.columns[qth + 1] - # print( qth+1, col ) - y = d[col][1:] + inDiru = inDir + u + '/' + d = pds.read_csv( inDiru + fp ) + #print(d) + + if key == 'g2': + taus = d['tau'][1:] + col = d.columns[qth +1] + #print( qth+1, col ) + y= d[col][1:] if legend is None: - leg = u + leg=u else: - leg = "uid=%s-->" % u + legend[i] - if isinstance(yshift, list): + leg='uid=%s-->'%u+legend[i] + if isinstance(yshift,list): yshift_ = yshift[i] ii = i + 1 else: yshift_ = yshift ii = i - plot1D( - x=taus, - y=y + yshift_ * ii, - c=colors[i], - m=markers[i], - ax=ax, - logx=True, - legend=leg, - xlabel="t (sec)", - ylabel="g2", - legend_size=legend_size, - ) - title = "Q = %s" % (col) + plot1D( x = taus, y=y + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=True, legend= leg, + xlabel='t (sec)', ylabel='g2', legend_size=legend_size,) + title='Q = %s'%(col) ax.set_title(title) - elif key == "imgsum": - y = total_res[key] - plot1D( - y=d + yshift_ * ii, - c=colors[i], - m=markers[i], - ax=ax, - logx=False, - legend=u, - xlabel="Frame", - ylabel="imgsum", - ) - - elif key == "iq": - x = total_res["q_saxs"] - y = total_res["iq_saxs"] - plot1D( - x=x, - y=y * ymulti[i] + yshift_ * ii, - c=colors[i], - m=markers[i], - ax=ax, - logx=False, - logy=True, - legend=u, - xlabel="Q " r"($\AA^{-1}$)", - ylabel="I(q)", - ) + elif key=='imgsum': + y = total_res[key] + plot1D( y=d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, + xlabel='Frame', ylabel='imgsum',) + + elif key == 'iq': + x= total_res['q_saxs'] + y= total_res['iq_saxs'] + plot1D( x=x, y= y* ymulti[i] + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx= False, logy=True, + legend= u, xlabel ='Q 'r'($\AA^{-1}$)', ylabel = "I(q)" ) else: - d = total_res[key][:, qth] - plot1D( - x=np.arange(len(d)), - y=d + yshift_ * ii, - c=colors[i], - m=markers[i], - ax=ax, - logx=False, - legend=u, - xlabel="xx", - ylabel=key, - ) - if key == "mean_int_sets": - ax.set_xlabel("frame ") - if xlim is not None: - ax.set_xlim(xlim) - if ylim is not None: - ax.set_ylim(ylim) - return fig, ax - - -def plot_entries_from_uids( - uid_list, - inDir, - key="g2", - qth=1, - legend_size=8, - yshift=0.01, - ymulti=1, - xlim=None, - ylim=None, - legend=None, - uid_length=None, - filename_list=None, - fp_fulluid=False, - fp_append=None, -): # ,title='' ): - """ + d = total_res[key][:,qth] + plot1D( x = np.arange(len(d)), y= d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, + xlabel= 'xx', ylabel=key ) + if key=='mean_int_sets':ax.set_xlabel( 'frame ') + if xlim is not None:ax.set_xlim(xlim) + if ylim is not None:ax.set_ylim(ylim) + return fig,ax + + +def plot_entries_from_uids( uid_list, inDir, key= 'g2', qth = 1, legend_size=8, + yshift= 0.01, ymulti=1, xlim=None, ylim=None,legend=None, uid_length = None, filename_list=None, fp_fulluid=False, fp_append = None ):#,title='' ): + + ''' YG Feb2, 2018, make yshift be also a list - + YG June 9, 2017@CHX YG Sep 29, 2017@CHX. plot enteries for a list uids Input: uid_list: list, a list of uid (string) inDir: string, imported folder for saved analysis results - key: string, plot entry, surport - 'g2' for one-time, + key: string, plot entry, surport + 'g2' for one-time, 'iq' for q~iq - 'mean_int_sets' for mean intensity of each roi as a function of frame + 'mean_int_sets' for mean intensity of each roi as a function of frame TODOLIST:#also can plot the following - dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', - 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', - 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', - 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) + dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', + 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', + 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', + 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) qth: integer, the intesrest q number yshift: float, values of shift in y direction xlim: [x1,x2], for plot x limit @@ -267,111 +196,78 @@ def plot_entries_from_uids( Example: uid_list = ['5492b9', '54c5e0'] plot_entries_from_uids( uid_list, inDir, yshift = 0.01, key= 'g2', ylim=[1, 1.2]) - """ - + ''' + uid_dict = {} - fig, ax = plt.subplots() + fig, ax =plt.subplots() for uid in uid_list: if uid_length is not None: uid_ = uid[:uid_length] else: - uid_ = uid - # print(uid_) - uid_dict[uid_] = get_meta_data(uid)["uid"] - # for i, u in enumerate( list( uid_dict.keys() )): - for i, u in enumerate(list(uid_list)): - # print(u) - if isinstance(yshift, list): + uid_=uid + #print(uid_) + uid_dict[uid_] = get_meta_data( uid )['uid'] + #for i, u in enumerate( list( uid_dict.keys() )): + for i,u in enumerate( list(uid_list)): + #print(u) + if isinstance(yshift,list): yshift_ = yshift[i] ii = i + 1 else: yshift_ = yshift - ii = i + ii = i if uid_length is not None: - u = u[:uid_length] - inDiru = inDir + u + "/" + u = u[:uid_length] + inDiru = inDir + u + '/' if fp_fulluid: - inDiru = inDir + uid_dict[u] + "/" + inDiru = inDir + uid_dict[u] + '/' else: - inDiru = inDir + u + "/" + inDiru = inDir + u + '/' if filename_list is None: - if fp_append is not None: - filename = "uid=%s%s_Res.h5" % (uid_dict[u], fp_append) - else: - filename = "uid=%s_Res.h5" % uid_dict[u] + if fp_append is not None: + filename = 'uid=%s%s_Res.h5'%(uid_dict[u],fp_append ) + else: + filename = 'uid=%s_Res.h5'%uid_dict[u] else: - filename = filename_list[i] - total_res = extract_xpcs_results_from_h5(filename=filename, import_dir=inDiru, exclude_keys=["g12b"]) - if key == "g2": - d = total_res[key][1:, qth] - taus = total_res["taus"][1:] + filename = filename_list[i] + total_res = extract_xpcs_results_from_h5( filename = filename, + import_dir = inDiru, exclude_keys = ['g12b'] ) + if key=='g2': + d = total_res[key][1:,qth] + taus = total_res['taus'][1:] if legend is None: - leg = u + leg=u else: - leg = "uid=%s-->" % u + legend[i] - plot1D( - x=taus, - y=d + yshift_ * ii, - c=colors[i], - m=markers[i], - ax=ax, - logx=True, - legend=leg, - xlabel="t (sec)", - ylabel="g2", - legend_size=legend_size, - ) - title = "Q = %s" % (total_res["qval_dict"][qth]) + leg='uid=%s-->'%u+legend[i] + plot1D( x = taus, y=d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=True, legend= leg, + xlabel='t (sec)', ylabel='g2', legend_size=legend_size,) + title='Q = %s'%(total_res['qval_dict'][qth]) ax.set_title(title) - elif key == "imgsum": - d = total_res[key] - plot1D( - y=d + yshift_ * ii, - c=colors[i], - m=markers[i], - ax=ax, - logx=False, - legend=u, - xlabel="Frame", - ylabel="imgsum", - ) - - elif key == "iq": - x = total_res["q_saxs"] - y = total_res["iq_saxs"] - plot1D( - x=x, - y=y * ymulti[i] + yshift_ * ii, - c=colors[i], - m=markers[i], - ax=ax, - logx=False, - logy=True, - legend=u, - xlabel="Q " r"($\AA^{-1}$)", - ylabel="I(q)", - ) + elif key=='imgsum': + d = total_res[key] + plot1D( y=d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, + xlabel='Frame', ylabel='imgsum',) + + elif key == 'iq': + + x= total_res['q_saxs'] + y= total_res['iq_saxs'] + plot1D( x=x, y= y* ymulti[i] + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx= False, logy=True, + legend= u, xlabel ='Q 'r'($\AA^{-1}$)', ylabel = "I(q)" ) else: - d = total_res[key][:, qth] - plot1D( - x=np.arange(len(d)), - y=d + yshift_ * ii, - c=colors[i], - m=markers[i], - ax=ax, - logx=False, - legend=u, - xlabel="xx", - ylabel=key, - ) - if key == "mean_int_sets": - ax.set_xlabel("frame ") - if xlim is not None: - ax.set_xlim(xlim) - if ylim is not None: - ax.set_ylim(ylim) - return fig, ax + d = total_res[key][:,qth] + plot1D( x = np.arange(len(d)), y= d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, + xlabel= 'xx', ylabel=key ) + if key=='mean_int_sets':ax.set_xlabel( 'frame ') + if xlim is not None:ax.set_xlim(xlim) + if ylim is not None:ax.set_ylim(ylim) + return fig,ax + + + + + #################################################################################################### @@ -379,8 +275,11 @@ def plot_entries_from_uids( ################################################################################################# -def get_iq_from_uids(uids, mask, setup_pargs): - """Y.G. developed July 17, 2017 @CHX + + + +def get_iq_from_uids( uids, mask, setup_pargs ): + ''' Y.G. developed July 17, 2017 @CHX Get q-Iq of a uids dict, each uid could corrrespond one frame or a time seriers uids: dict, val: meaningful decription, key: a list of uids mask: bool-type 2D array @@ -392,414 +291,367 @@ def get_iq_from_uids(uids, mask, setup_pargs): 'exposuretime': 0.99998999, 'lambda_': 1.2845441, 'path': '/XF11ID/analysis/2017_2/yuzhang/Results/Yang_Pressure/', - - """ - Nuid = len(np.concatenate(np.array(list(uids.values())))) - label = np.zeros([Nuid + 1], dtype=object) - img_data = {} # np.zeros( [ Nuid, avg_img.shape[0], avg_img.shape[1]]) - - n = 0 + + ''' + Nuid = len( np.concatenate( np.array( list(uids.values()) ) ) ) + label = np.zeros( [ Nuid+1], dtype=object) + img_data = {} #np.zeros( [ Nuid, avg_img.shape[0], avg_img.shape[1]]) + + n = 0 for k in list(uids.keys()): for uid in uids[k]: - uidstr = "uid=%s" % uid + + uidstr = 'uid=%s'%uid sud = get_sid_filenames(db[uid]) - # print(sud) - md = get_meta_data(uid) - imgs = load_data(uid, md["detector"], reverse=True) - md.update(imgs.md) - Nimg = len(imgs) - if Nimg != 1: - filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % sud[1] - mask0, avg_img, imgsum, bad_frame_list = compress_eigerdata( - imgs, - mask, - md, - filename, - force_compress=False, - para_compress=True, - bad_pixel_threshold=1e14, - bins=1, - num_sub=100, - num_max_para_process=500, - with_pickle=True, - ) + #print(sud) + md = get_meta_data( uid ) + imgs = load_data( uid, md['detector'], reverse= True ) + md.update( imgs.md ); + Nimg = len(imgs); + if Nimg !=1: + filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%sud[1] + mask0, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename, + force_compress= False, para_compress= True, bad_pixel_threshold = 1e14, + bins=1, num_sub= 100, num_max_para_process= 500, with_pickle=True ) else: avg_img = imgs[0] - show_img( - avg_img, - vmin=0.00001, - vmax=1e1, - logs=True, - aspect=1, # save_format='tif', - image_name=uidstr + "_img_avg", - save=True, - path=setup_pargs["path"], - cmap=cmap_albula, - ) - - setup_pargs["uid"] = uidstr - - qp_saxs, iq_saxs, q_saxs = get_circular_average(avg_img, mask, pargs=setup_pargs, save=True) - if n == 0: - iqs = np.zeros([len(q_saxs), Nuid + 1]) - iqs[:, 0] = q_saxs - label[0] = "q" - img_data[k + "_" + uid] = avg_img - iqs[:, n + 1] = iq_saxs - label[n + 1] = k + "_" + uid - n += 1 - plot_circular_average( - qp_saxs, - iq_saxs, - q_saxs, - pargs=setup_pargs, - xlim=[q_saxs.min(), q_saxs.max() * 0.9], - ylim=[iq_saxs.min(), iq_saxs.max()], - ) - if "filename" in list(setup_pargs.keys()): - filename = setup_pargs["filename"] + show_img( avg_img, vmin=0.00001, vmax= 1e1, logs=True, aspect=1, #save_format='tif', + image_name= uidstr + '_img_avg', save=True, + path=setup_pargs['path'], cmap = cmap_albula ) + + setup_pargs['uid'] = uidstr + + qp_saxs, iq_saxs, q_saxs = get_circular_average( avg_img, mask, + pargs= setup_pargs, save=True ) + if n ==0: + iqs = np.zeros( [ len(q_saxs), Nuid+1]) + iqs[:,0] = q_saxs + label[0] = 'q' + img_data[ k + '_'+ uid ] = avg_img + iqs[:,n+1] = iq_saxs + label[n+1] = k + '_'+ uid + n +=1 + plot_circular_average( qp_saxs, iq_saxs, q_saxs, pargs= setup_pargs, + xlim=[q_saxs.min(), q_saxs.max()*0.9], ylim = [iq_saxs.min(), iq_saxs.max()] ) + if 'filename' in list(setup_pargs.keys()): + filename = setup_pargs['filename'] else: - filename = "qIq.csv" - pd = save_arrays(iqs, label=label, dtype="array", filename=filename, path=setup_pargs["path"], return_res=True) + filename = 'qIq.csv' + pd = save_arrays( iqs, label=label, dtype='array', filename= filename, + path= setup_pargs['path'], return_res=True) return pd, img_data - - -def wait_func(wait_time=2): - print("Waiting %s secdons for upcoming data..." % wait_time) - time.sleep(wait_time) - # print( 'Starting to do something here...') - - -def wait_data_acquistion_finish(uid, wait_time=2, max_try_num=3): - """check the completion of a data uid acquistion - Parameter: - uid: - wait_time: the waiting step in unit of second - check_func: the function to check the completion - max_try_num: the maximum number for waiting - Return: - True: completion - False: not completion (include waiting time exceeds the max_wait_time) - - """ + + + +def wait_func( wait_time = 2 ): + print( 'Waiting %s secdons for upcoming data...'%wait_time) + time.sleep( wait_time) + #print( 'Starting to do something here...') + +def wait_data_acquistion_finish( uid, wait_time = 2, max_try_num = 3 ): + '''check the completion of a data uid acquistion + Parameter: + uid: + wait_time: the waiting step in unit of second + check_func: the function to check the completion + max_try_num: the maximum number for waiting + Return: + True: completion + False: not completion (include waiting time exceeds the max_wait_time) + + ''' FINISH = False Fake_FINISH = True - w = 0 - sleep_time = 0 - while not FINISH: + w = 0 + sleep_time = 0 + while( not FINISH): try: - get_meta_data(uid) + get_meta_data( uid ) FINISH = True - print("The data acquistion finished.") - print("Starting to do something here...") - except: - wait_func(wait_time=wait_time) + print( 'The data acquistion finished.') + print( 'Starting to do something here...') + except: + wait_func( wait_time = wait_time ) w += 1 - print("Try number: %s" % w) - if w > max_try_num: - print("There could be something going wrong with data acquistion.") - print("Force to terminate after %s tries." % w) + print('Try number: %s'%w) + if w> max_try_num: + print( 'There could be something going wrong with data acquistion.') + print( 'Force to terminate after %s tries.'%w) FINISH = True Fake_FINISH = False - sleep_time += wait_time - return FINISH * Fake_FINISH # , sleep_time - - -def get_uids_by_range(start_uidth=-1, end_uidth=0): - """Y.G. Dec 22, 2016 - A wrap funciton to find uids by giving start and end uid number, i.e. -10, -1 - Return: - uids: list, uid with 8 character length - fuids: list, uid with full length - - """ - hdrs = list([db[n] for n in range(start_uidth, end_uidth)]) - if len(hdrs) != 0: - print("Totally %s uids are found." % (len(hdrs))) - - uids = [] # short uid - fuids = [] # full uid - for hdr in hdrs: - fuid = hdr["start"]["uid"] - uids.append(fuid[:8]) - fuids.append(fuid) - uids = uids[::-1] - fuids = fuids[::-1] - return np.array(uids), np.array(fuids) - - -def get_uids_in_time_period(start_time, stop_time): - """Y.G. Dec 22, 2016 - A wrap funciton to find uids by giving start and end time - Return: - uids: list, uid with 8 character length - fuids: list, uid with full length - - """ - hdrs = list(db(start_time=start_time, stop_time=stop_time)) - if len(hdrs) != 0: - print("Totally %s uids are found." % (len(hdrs))) - - uids = [] # short uid - fuids = [] # full uid - for hdr in hdrs: - fuid = hdr["start"]["uid"] - uids.append(fuid[:8]) - fuids.append(fuid) - uids = uids[::-1] - fuids = fuids[::-1] - return np.array(uids), np.array(fuids) - - -def do_compress_on_line(start_time, stop_time, mask_dict=None, mask=None, wait_time=2, max_try_num=3): - """Y.G. Mar 10, 2017 - Do on-line compress by giving start time and stop time - Parameters: - mask_dict: a dict, e.g., {mask1: mask_array1, mask2:mask_array2} - wait_time: search interval time - max_try_num: for each found uid, will try max_try_num*wait_time seconds - Return: - running time - """ - - t0 = time.time() - uids, fuids = get_uids_in_time_period(start_time, stop_time) - print(fuids) + sleep_time += wait_time + return FINISH * Fake_FINISH #, sleep_time + +def get_uids_by_range( start_uidth=-1, end_uidth = 0 ): + '''Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end uid number, i.e. -10, -1 + Return: + uids: list, uid with 8 character length + fuids: list, uid with full length + + ''' + hdrs = list([ db[n] for n in range(start_uidth, end_uidth)] ) + if len(hdrs)!=0: + print ('Totally %s uids are found.'%(len(hdrs))) + + uids=[] #short uid + fuids=[] #full uid + for hdr in hdrs: + fuid = hdr['start']['uid'] + uids.append( fuid[:8] ) + fuids.append( fuid ) + uids=uids[::-1] + fuids=fuids[::-1] + return np.array(uids), np.array(fuids) + + +def get_uids_in_time_period( start_time, stop_time ): + '''Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end time + Return: + uids: list, uid with 8 character length + fuids: list, uid with full length + + ''' + hdrs = list( db(start_time= start_time, stop_time = stop_time) ) + if len(hdrs)!=0: + print ('Totally %s uids are found.'%(len(hdrs))) + + uids=[] #short uid + fuids=[] #full uid + for hdr in hdrs: + fuid = hdr['start']['uid'] + uids.append( fuid[:8] ) + fuids.append( fuid ) + uids=uids[::-1] + fuids=fuids[::-1] + return np.array(uids), np.array(fuids) + +def do_compress_on_line( start_time, stop_time, mask_dict=None, mask=None, + wait_time = 2, max_try_num = 3 ): + '''Y.G. Mar 10, 2017 + Do on-line compress by giving start time and stop time + Parameters: + mask_dict: a dict, e.g., {mask1: mask_array1, mask2:mask_array2} + wait_time: search interval time + max_try_num: for each found uid, will try max_try_num*wait_time seconds + Return: + running time + ''' + + t0 = time.time() + uids, fuids = get_uids_in_time_period(start_time, stop_time) + print( fuids ) if len(fuids): for uid in fuids: - print("*" * 50) - print("Do compress for %s now..." % uid) - if db[uid]["start"]["plan_name"] == "count": - finish = wait_data_acquistion_finish(uid, wait_time, max_try_num) - if finish: + print('*'*50) + print('Do compress for %s now...'%uid) + if db[uid]['start']['plan_name'] == 'count': + finish = wait_data_acquistion_finish( uid, wait_time,max_try_num ) + if finish: try: - md = get_meta_data(uid) - compress_multi_uids( - [uid], - mask=mask, - mask_dict=mask_dict, - force_compress=False, - para_compress=True, - bin_frame_number=1, - ) - - update_olog_uid(uid=md["uid"], text="Data are on-line sparsified!", attachments=None) + md = get_meta_data( uid ) + compress_multi_uids( [ uid ], mask=mask, mask_dict = mask_dict, + force_compress=False, para_compress= True, bin_frame_number=1 ) + + update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) except: - print("There are something wrong with this data: %s..." % uid) - print("*" * 50) + print('There are something wrong with this data: %s...'%uid) + print('*'*50) return time.time() - t0 -def realtime_xpcs_analysis( - start_time, stop_time, run_pargs, md_update=None, wait_time=2, max_try_num=3, emulation=False, clear_plot=False -): - """Y.G. Mar 10, 2017 - Do on-line xpcs by giving start time and stop time - Parameters: - run_pargs: all the run control parameters, including giving roi_mask - md_update: if not None, a dict, will update all the found uid metadata by this md_update - e.g, - md['beam_center_x'] = 1012 - md['beam_center_y']= 1020 - md['det_distance']= 16718.0 - wait_time: search interval time - max_try_num: for each found uid, will try max_try_num*wait_time seconds - emulation: if True, it will only check dataset and not do real analysis - Return: - running time - """ - t0 = time.time() - uids, fuids = get_uids_in_time_period(start_time, stop_time) - # print( fuids ) +def realtime_xpcs_analysis( start_time, stop_time, run_pargs, md_update=None, + wait_time = 2, max_try_num = 3, emulation=False,clear_plot=False ): + '''Y.G. Mar 10, 2017 + Do on-line xpcs by giving start time and stop time + Parameters: + run_pargs: all the run control parameters, including giving roi_mask + md_update: if not None, a dict, will update all the found uid metadata by this md_update + e.g, + md['beam_center_x'] = 1012 + md['beam_center_y']= 1020 + md['det_distance']= 16718.0 + wait_time: search interval time + max_try_num: for each found uid, will try max_try_num*wait_time seconds + emulation: if True, it will only check dataset and not do real analysis + Return: + running time + ''' + + t0 = time.time() + uids, fuids = get_uids_in_time_period(start_time, stop_time) + #print( fuids ) if len(fuids): for uid in fuids: - print("*" * 50) - # print('Do compress for %s now...'%uid) - print("Starting analysis for %s now..." % uid) - if db[uid]["start"]["plan_name"] == "count" or db[uid]["start"]["plan_name"] == "manual_count": - # if db[uid]['start']['dtype'] =='xpcs': - finish = wait_data_acquistion_finish(uid, wait_time, max_try_num) - if finish: + print('*'*50) + #print('Do compress for %s now...'%uid) + print('Starting analysis for %s now...'%uid) + if db[uid]['start']['plan_name'] == 'count' or db[uid]['start']['plan_name'] == 'manual_count': + #if db[uid]['start']['dtype'] =='xpcs': + finish = wait_data_acquistion_finish( uid, wait_time,max_try_num ) + if finish: try: - md = get_meta_data(uid) + md = get_meta_data( uid ) ##corect some metadata if md_update is not None: - md.update(md_update) - # if 'username' in list(md.keys()): - # try: + md.update( md_update ) + #if 'username' in list(md.keys()): + #try: # md_cor['username'] = md_update['username'] - # except: + #except: # md_cor = None - # uid = uid[:8] - # print(md_cor) + #uid = uid[:8] + #print(md_cor) if not emulation: - # suid=uid[:6] - run_xpcs_xsvs_single( - uid, run_pargs=run_pargs, md_cor=None, return_res=False, clear_plot=clear_plot - ) - # update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) + #suid=uid[:6] + run_xpcs_xsvs_single( uid, run_pargs= run_pargs, md_cor = None, + return_res= False, clear_plot=clear_plot ) + #update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) except: - print("There are something wrong with this data: %s..." % uid) + print('There are something wrong with this data: %s...'%uid) else: - print("\nThis is not a XPCS series. We will simiply ignore it.") - print("*" * 50) + print('\nThis is not a XPCS series. We will simiply ignore it.') + print('*'*50) + + #print( 'Sleep 10 sec here!!!') + #time.sleep(10) + + return time.time() - t0 + + + + + + + + - # print( 'Sleep 10 sec here!!!') - # time.sleep(10) - return time.time() - t0 #################################################################################################### ##compress multi uids, sequential compress for uids, but for each uid, can apply parallel compress## ################################################################################################# -def compress_multi_uids( - uids, - mask, - mask_dict=None, - force_compress=False, - para_compress=True, - bin_frame_number=1, - reverse=True, - rot90=False, - use_local_disk=True, -): - """Compress time series data for a set of uids +def compress_multi_uids( uids, mask, mask_dict = None, force_compress=False, para_compress= True, bin_frame_number=1, + reverse=True, rot90=False,use_local_disk=True): + ''' Compress time series data for a set of uids Parameters: uids: list, a list of uid mask: bool array, mask array force_compress: default is False, just load the compresssed data; if True, will compress it to overwrite the old compressed data para_compress: apply the parallel compress algorithm - bin_frame_number: + bin_frame_number: Return: None, save the compressed data in, by default, /XF11ID/analysis/Compressed_Data with filename as '/uid_%s.cmp' uid is the full uid string - + e.g., compress_multi_uids( uids, mask, force_compress= False, bin_frame_number=1 ) - - """ - for uid in uids: - print("UID: %s is in processing..." % uid) - if validate_uid(uid): - md = get_meta_data(uid) - imgs = load_data(uid, md["detector"], reverse=reverse, rot90=rot90) + + ''' + for uid in uids: + print('UID: %s is in processing...'%uid) + if validate_uid( uid ): + md = get_meta_data( uid ) + imgs = load_data( uid, md['detector'], reverse= reverse, rot90=rot90 ) sud = get_sid_filenames(db[uid]) for pa in sud[2]: - if "master.h5" in pa: - data_fullpath = pa - print(imgs, data_fullpath) + if 'master.h5' in pa: + data_fullpath = pa + print( imgs, data_fullpath ) if mask_dict is not None: - mask = mask_dict[md["detector"]] - print("The detecotr is: %s" % md["detector"]) - md.update(imgs.md) + mask = mask_dict[md['detector']] + print('The detecotr is: %s'% md['detector']) + md.update( imgs.md ) if not use_local_disk: - cmp_path = "/nsls2/xf11id1/analysis/Compressed_Data" + cmp_path = '/nsls2/xf11id1/analysis/Compressed_Data' else: - cmp_path = "/tmp_data/compressed" - cmp_path = "/nsls2/xf11id1/analysis/Compressed_Data" - if bin_frame_number == 1: - cmp_file = "/uid_%s.cmp" % md["uid"] + cmp_path = '/tmp_data/compressed' + cmp_path = '/nsls2/xf11id1/analysis/Compressed_Data' + if bin_frame_number==1: + cmp_file = '/uid_%s.cmp'%md['uid'] else: - cmp_file = "/uid_%s_bined--%s.cmp" % (md["uid"], bin_frame_number) - filename = cmp_path + cmp_file - mask, avg_img, imgsum, bad_frame_list = compress_eigerdata( - imgs, - mask, - md, - filename, - force_compress=force_compress, - para_compress=para_compress, - bad_pixel_threshold=1e14, - reverse=reverse, - rot90=rot90, - bins=bin_frame_number, - num_sub=100, - num_max_para_process=500, - with_pickle=True, - direct_load_data=use_local_disk, - data_path=data_fullpath, - ) - - print("Done!") + cmp_file = '/uid_%s_bined--%s.cmp'%(md['uid'],bin_frame_number) + filename = cmp_path + cmp_file + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename, + force_compress= force_compress, para_compress= para_compress, bad_pixel_threshold = 1e14, + reverse=reverse, rot90=rot90, + bins=bin_frame_number, num_sub= 100, num_max_para_process= 500, with_pickle=True, + direct_load_data =use_local_disk, data_path = data_fullpath, ) + print('Done!') + #################################################################################################### ##get_two_time_mulit_uids, sequential cal for uids, but apply parallel for each uid ## ################################################################################################# - -def get_two_time_mulit_uids( - uids, - roi_mask, - norm=None, - bin_frame_number=1, - path=None, - force_generate=False, - md=None, - imgs=None, - direct_load_data=False, - compress_path=None, -): - """Calculate two time correlation by using auto_two_Arrayc func for a set of uids, +def get_two_time_mulit_uids( uids, roi_mask, norm= None, bin_frame_number=1, path=None, force_generate=False, + md=None, imgs=None,direct_load_data=False,compress_path=None ): + + ''' Calculate two time correlation by using auto_two_Arrayc func for a set of uids, if the two-time resutls are already created, by default (force_generate=False), just pass Parameters: uids: list, a list of uid roi_mask: bool array, roi mask array - norm: the normalization array - path: string, where to save the two time + norm: the normalization array + path: string, where to save the two time force_generate: default, False, if the two-time resutls are already created, just pass if True, will force to calculate two-time no matter exist or not - + Return: - None, save the two-time in as path + uid + 'uid=%s_g12b'%uid - + None, save the two-time in as path + uid + 'uid=%s_g12b'%uid + e.g., - get_two_time_mulit_uids( guids, roi_mask, norm= norm,bin_frame_number=1, + get_two_time_mulit_uids( guids, roi_mask, norm= norm,bin_frame_number=1, path= data_dir,force_generate=False ) - - """ - + + ''' + qind, pixelist = roi.extract_label_indices(roi_mask) for uid in uids: - print("UID: %s is in processing..." % uid) + print('UID: %s is in processing...'%uid) if not direct_load_data: - md = get_meta_data(uid) - imgs = load_data(uid, md["detector"], reverse=True) + md = get_meta_data( uid ) + imgs = load_data( uid, md['detector'], reverse= True ) else: pass N = len(imgs) - # print( N ) + #print( N ) if compress_path is None: - compress_path = "/XF11ID/analysis/Compressed_Data/" - if bin_frame_number == 1: - filename = "%s" % compress_path + "uid_%s.cmp" % md["uid"] + compress_path = '/XF11ID/analysis/Compressed_Data/' + if bin_frame_number==1: + filename = '%s'%compress_path +'uid_%s.cmp'%md['uid'] else: - filename = "%s" % compress_path + "uid_%s_bined--%s.cmp" % (md["uid"], bin_frame_number) - - FD = Multifile(filename, 0, N // bin_frame_number) - # print( FD.beg, FD.end) - uid_ = md["uid"] - os.makedirs(path + uid_ + "/", exist_ok=True) - filename = path + uid_ + "/" + "uid=%s_g12b" % uid + filename = '%s'%compress_path +'uid_%s_bined--%s.cmp'%(md['uid'],bin_frame_number) + + FD = Multifile(filename, 0, N//bin_frame_number) + #print( FD.beg, FD.end) + uid_ = md['uid'] + os.makedirs(path + uid_ + '/', exist_ok=True) + filename = path + uid_ + '/' + 'uid=%s_g12b'%uid doit = True if not force_generate: - if os.path.exists(filename + ".npy"): - doit = False - print("The two time correlation function for uid=%s is already calculated. Just pass..." % uid) - if doit: - data_pixel = Get_Pixel_Arrayc(FD, pixelist, norm=norm).get_data() - g12b = auto_two_Arrayc(data_pixel, roi_mask, index=None) - np.save(filename, g12b) + if os.path.exists( filename + '.npy'): + doit=False + print('The two time correlation function for uid=%s is already calculated. Just pass...'%uid) + if doit: + data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm= norm ).get_data() + g12b = auto_two_Arrayc( data_pixel, roi_mask, index = None ) + np.save( filename, g12b) del g12b - print("The two time correlation function for uid={} is saved as {}.".format(uid, filename)) + print( 'The two time correlation function for uid={} is saved as {}.'.format(uid, filename )) + + -def get_series_g2_from_g12( - g12b, fra_num_by_dose=None, dose_label=None, good_start=0, log_taus=True, num_bufs=8, time_step=1 -): - """ + + +def get_series_g2_from_g12( g12b, fra_num_by_dose = None, dose_label = None, + good_start=0, log_taus = True, num_bufs=8, time_step=1 ): + ''' Get a series of one-time function from two-time by giving noframes Parameters: g12b: a two time function @@ -807,86 +659,75 @@ def get_series_g2_from_g12( fra_num_by_dose: a list, correlation number starting from index 0, if this number is larger than g12b length, will give a warning message, and will use g12b length to replace this number - by default is None, will = [ g12b.shape[0] ] + by default is None, will = [ g12b.shape[0] ] dose_label: the label of each dose, also is the keys of returned g2, lag - log_taus: if true, will only return a g2 with the correponding tau values + log_taus: if true, will only return a g2 with the correponding tau values as calculated by multi-tau defined taus Return: - + g2_series, a dict, with keys as dose_label (corrected on if warning message is given) lag_steps, the corresponding lags - - """ - g2 = {} + + ''' + g2={} lag_steps = {} - L, L, qs = g12b.shape + L,L,qs= g12b.shape if fra_num_by_dose is None: - fra_num_by_dose = [L] + fra_num_by_dose = [L] if dose_label is None: - dose_label = fra_num_by_dose - fra_num_by_dose = sorted(fra_num_by_dose) - dose_label = sorted(dose_label) + dose_label = fra_num_by_dose + fra_num_by_dose = sorted( fra_num_by_dose ) + dose_label = sorted( dose_label ) for i, good_end in enumerate(fra_num_by_dose): - key = round(dose_label[i], 3) - # print( good_end ) - if good_end > L: - warnings.warn( - "Warning: the dose value is too large, and please check the maxium dose in this data set and give a smaller dose value. We will use the maxium dose of the data." - ) - good_end = L - if not log_taus: - g2[key] = get_one_time_from_two_time(g12b[good_start:good_end, good_start:good_end, :]) - else: - # print( good_end, num_bufs ) - lag_step = get_multi_tau_lag_steps(good_end, num_bufs) - lag_step = lag_step[lag_step < good_end - good_start] - # print( len(lag_steps ) ) + key = round(dose_label[i] ,3) + #print( good_end ) + if good_end>L: + warnings.warn("Warning: the dose value is too large, and please check the maxium dose in this data set and give a smaller dose value. We will use the maxium dose of the data.") + good_end = L + if not log_taus: + g2[ key ] = get_one_time_from_two_time(g12b[good_start:good_end,good_start:good_end,:] ) + else: + #print( good_end, num_bufs ) + lag_step = get_multi_tau_lag_steps(good_end, num_bufs) + lag_step = lag_step[ lag_step < good_end - good_start] + #print( len(lag_steps ) ) lag_steps[key] = lag_step * time_step - g2[key] = get_one_time_from_two_time(g12b[good_start:good_end, good_start:good_end, :])[lag_step] - + g2[key] = get_one_time_from_two_time(g12b[good_start:good_end,good_start:good_end,:] )[lag_step] + return lag_steps, g2 + - -def get_fra_num_by_dose(exp_dose, exp_time, att=1, dead_time=2): - """ +def get_fra_num_by_dose( exp_dose, exp_time, att=1, dead_time =2 ): + ''' Calculate the frame number to be correlated by giving a X-ray exposure dose - + Paramters: exp_dose: a list, the exposed dose, e.g., in unit of exp_time(ms)*N(fram num)*att( attenuation) exp_time: float, the exposure time for a xpcs time sereies dead_time: dead time for the fast shutter reponse time, CHX = 2ms Return: - noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) + noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) e.g., - + no_dose_fra = get_fra_num_by_dose( exp_dose = [ 3.34* 20, 3.34*50, 3.34*100, 3.34*502, 3.34*505 ], exp_time = 1.34, dead_time = 2) - - --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) - """ - return np.int_(np.array(exp_dose) / (exp_time + dead_time) / att) - - -def get_series_one_time_mulit_uids( - uids, - qval_dict, - trans=None, - good_start=0, - path=None, - exposure_dose=None, - dead_time=0, - num_bufs=8, - save_g2=True, - md=None, - imgs=None, - direct_load_data=False, -): - """Calculate a dose depedent series of one time correlations from two time + + --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) + ''' + return np.int_( np.array( exp_dose )/( exp_time + dead_time)/ att ) + + + +def get_series_one_time_mulit_uids( uids, qval_dict, trans = None, good_start=0, path=None, + exposure_dose = None, dead_time = 0, + num_bufs =8, save_g2=True, + md = None, imgs=None, direct_load_data= False ): + ''' Calculate a dose depedent series of one time correlations from two time Parameters: uids: list, a list of uid trans: list, same length as uids, the transmission list exposure_dose: list, a list x-ray exposure dose; - by default is None, namely, = [ max_frame_number ], + by default is None, namely, = [ max_frame_number ], can be [3.34 334, 3340] in unit of ms, in unit of exp_time(ms)*N(fram num)*att( attenuation) path: string, where to load the two time, if None, ask for it the real g12 path is two_time_path + uid + '/' @@ -894,197 +735,150 @@ def get_series_one_time_mulit_uids( Return: taus_uids, with keys as uid, and taus_uids[uid] is also a dict, with keys as dose_frame - g2_uids, with keys as uid, and + g2_uids, with keys as uid, and g2_uids[uid] is also a dict, with keys as dose_frame will also save g2 results to the 'path' - """ - + ''' + if path is None: - print("Please calculate two time function first by using get_two_time_mulit_uids function.") + print( 'Please calculate two time function first by using get_two_time_mulit_uids function.') else: taus_uids = {} g2_uids = {} for i, uid in enumerate(uids): - print("UID: %s is in processing..." % uid) + print('UID: %s is in processing...'%uid) if not direct_load_data: - md = get_meta_data(uid) - imgs = load_data(uid, md["detector"], reverse=True) - # print(md) - detectors = md["detector"] - if isinstance(detectors, list): - if len(detectors) > 1: - if "_image" in md["detector"]: - pref = md["detector"][:-5] + md = get_meta_data( uid ) + imgs = load_data( uid, md['detector'], reverse= True ) + #print(md) + detectors = md['detector'] + if isinstance( detectors,list): + if len(detectors)>1: + if '_image' in md['detector']: + pref = md['detector'][:-5] else: - pref = md["detector"] - for k in [ - "beam_center_x", - "beam_center_y", - "cam_acquire_time", - "cam_acquire_period", - "cam_num_images", - "wavelength", - "det_distance", - "photon_energy", - ]: - md[k] = md[pref + "%s" % k] - + pref=md['detector'] + for k in [ 'beam_center_x', 'beam_center_y','cam_acquire_time','cam_acquire_period','cam_num_images', + 'wavelength', 'det_distance', 'photon_energy']: + md[k] = md[ pref + '%s'%k] + else: pass N = len(imgs) if exposure_dose is None: exposure_dose = [N] try: - g2_path = path + uid + "/" - g12b = np.load(g2_path + "uid=%s_g12b.npy" % uid) + g2_path = path + uid + '/' + g12b = np.load( g2_path + 'uid=%s_g12b.npy'%uid) except: - g2_path = path + md["uid"] + "/" - g12b = np.load(g2_path + "uid=%s_g12b.npy" % uid) + g2_path = path + md['uid'] + '/' + g12b = np.load( g2_path + 'uid=%s_g12b.npy'%uid) try: - exp_time = float(md["cam_acquire_time"]) # *1000 #from second to ms - except: - exp_time = float(md["exposure time"]) # * 1000 #from second to ms - if trans is None: + exp_time = float( md['cam_acquire_time']) #*1000 #from second to ms + except: + exp_time = float( md['exposure time']) #* 1000 #from second to ms + if trans is None: try: - transi = md["transmission"] + transi = md['transmission'] except: - transi = [1] + transi = [1] else: transi = trans[i] - fra_num_by_dose = get_fra_num_by_dose( - exp_dose=exposure_dose, exp_time=exp_time, dead_time=dead_time, att=transi - ) - - print("uid: %s--> fra_num_by_dose: %s" % (uid, fra_num_by_dose)) - - taus_uid, g2_uid = get_series_g2_from_g12( - g12b, - fra_num_by_dose=fra_num_by_dose, - dose_label=exposure_dose, - good_start=good_start, - num_bufs=num_bufs, - time_step=exp_time, - ) # md['cam_acquire_period'] ) - g2_uids["uid_%03d=%s" % (i, uid)] = g2_uid - taus_uids["uid_%03d=%s" % (i, uid)] = taus_uid + fra_num_by_dose = get_fra_num_by_dose( exp_dose = exposure_dose, + exp_time =exp_time, dead_time = dead_time, att = transi ) + + print( 'uid: %s--> fra_num_by_dose: %s'%(uid, fra_num_by_dose ) ) + + taus_uid, g2_uid = get_series_g2_from_g12( g12b, fra_num_by_dose=fra_num_by_dose, + dose_label = exposure_dose, + good_start=good_start, num_bufs=num_bufs, + time_step = exp_time)#md['cam_acquire_period'] ) + g2_uids['uid_%03d=%s'%(i,uid)] = g2_uid + taus_uids['uid_%03d=%s'%(i,uid)] = taus_uid if save_g2: - for k in list(g2_uid.keys()): - # print(k) - uid_ = uid + "_fra_%s_%s" % (good_start, k) - save_g2_general( - g2_uid[k], - taus=taus_uid[k], - qr=np.array(list(qval_dict.values()))[:, 0], - uid=uid_ + "_g2.csv", - path=g2_path, - return_res=False, - ) + for k in list( g2_uid.keys()): + #print(k) + uid_ = uid + '_fra_%s_%s'%(good_start, k ) + save_g2_general( g2_uid[k], taus=taus_uid[k],qr=np.array( list( qval_dict.values() ) )[:,0], + uid=uid_+'_g2.csv', path= g2_path, return_res=False ) return taus_uids, g2_uids - - -def plot_dose_g2( - taus_uids, - g2_uids, - qval_dict, - qth_interest=None, - ylim=[0.95, 1.05], - vshift=0.1, - fit_res=None, - geometry="saxs", - filename="dose" + "_g2", - legend_size=None, - path=None, - function=None, - g2_labels=None, - ylabel="g2_dose", - append_name="_dose", - return_fig=False, -): - """Plot a does-dependent g2 + + + + +def plot_dose_g2( taus_uids, g2_uids, qval_dict, qth_interest = None, ylim=[0.95, 1.05], vshift=0.1, + fit_res= None, geometry= 'saxs',filename= 'dose'+'_g2', legend_size=None, + path= None, function= None, g2_labels=None, ylabel= 'g2_dose', append_name= '_dose', + return_fig=False): + '''Plot a does-dependent g2 taus_uids, dict, with format as {uid1: { dose1: tau_1, dose2: tau_2...}, uid2: ...} g2_uids, dict, with format as {uid1: { dose1: g2_1, dose2: g2_2...}, uid2: ...} qval_dict: a dict of qvals vshift: float, vertical shift value of different dose of g2 - - """ - - uids = sorted(list(taus_uids.keys())) - # print( uids ) - dose = sorted(list(taus_uids[uids[0]].keys())) - if qth_interest is None: - g2_dict = {} + + ''' + + uids = sorted( list( taus_uids.keys() ) ) + #print( uids ) + dose = sorted( list( taus_uids[ uids[0] ].keys() ) ) + if qth_interest is None: + g2_dict= {} taus_dict = {} if g2_labels is None: - g2_labels = [] - for i in range(len(dose)): + g2_labels = [] + for i in range( len( dose )): g2_dict[i + 1] = [] - taus_dict[i + 1] = [] - # print ( i ) - for j in range(len(uids)): - # print( uids[i] , dose[j]) - g2_dict[i + 1].append(g2_uids[uids[j]][dose[i]] + vshift * i) - taus_dict[i + 1].append(taus_uids[uids[j]][dose[i]]) - if j == 0: - g2_labels.append("Dose_%s" % dose[i]) - - plot_g2_general( - g2_dict, - taus_dict, - ylim=[ylim[0], ylim[1] + vshift * len(dose)], - qval_dict=qval_dict, - fit_res=None, - geometry=geometry, - filename=filename, - path=path, - function=function, - ylabel=ylabel, - g2_labels=g2_labels, - append_name=append_name, - ) - + taus_dict[i +1 ] = [] + #print ( i ) + for j in range( len( uids )): + #print( uids[i] , dose[j]) + g2_dict[i +1 ].append( g2_uids[ uids[j] ][ dose[i] ] + vshift*i ) + taus_dict[i +1 ].append( taus_uids[ uids[j] ][ dose[i] ] ) + if j ==0: + g2_labels.append( 'Dose_%s'%dose[i] ) + + plot_g2_general( g2_dict, taus_dict, + ylim=[ylim[0], ylim[1] + vshift * len(dose)], + qval_dict = qval_dict, fit_res= None, geometry= geometry,filename= filename, + path= path, function= function, ylabel= ylabel, g2_labels=g2_labels, append_name= append_name ) + else: - fig, ax = plt.subplots() - q = qval_dict[qth_interest - 1][0] - j = 0 + fig,ax= plt.subplots() + q = qval_dict[qth_interest-1][0] + j = 0 for uid in uids: - # uid = uids[0] - # print( uid ) - dose_list = sorted(list(taus_uids["%s" % uid].keys())) - # print( dose_list ) + #uid = uids[0] + #print( uid ) + dose_list = sorted( list(taus_uids['%s'%uid].keys()) ) + #print( dose_list ) for i, dose in enumerate(dose_list): dose = float(dose) - if j == 0: - legend = "dose_%s" % round(dose, 2) + if j ==0: + legend= 'dose_%s'%round(dose,2) else: - legend = "" - - # print( markers[i], colors[i] ) - - plot1D( - x=taus_uids["%s" % uid][dose_list[i]], - y=g2_uids["%s" % uid][dose_list[i]][:, qth_interest] + i * vshift, - logx=True, - ax=ax, - legend=legend, - m=markers[i], - c=colors[i], - lw=3, - title="%s_Q=%s" % (uid, q) + r"$\AA^{-1}$", - legend_size=legend_size, - ) - ylabel = "g2--Dose (trans*exptime_sec)" - j += 1 - - ax.set_ylabel(r"$%s$" % ylabel + "(" + r"$\tau$" + ")") - ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) - ax.set_ylim(ylim) + legend = '' + + #print( markers[i], colors[i] ) + + plot1D(x= taus_uids['%s'%uid][dose_list[i]], + y =g2_uids['%s'%uid][dose_list[i]][:,qth_interest] + i*vshift, + logx=True, ax=ax, legend= legend, m = markers[i], c= colors[i], + lw=3, title='%s_Q=%s'%(uid, q) + r'$\AA^{-1}$', legend_size=legend_size ) + ylabel='g2--Dose (trans*exptime_sec)' + j +=1 + + ax.set_ylabel( r"$%s$"%ylabel + '(' + r'$\tau$' + ')' ) + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + ax.set_ylim ( ylim ) if return_fig: return fig, ax - # return taus_dict, g2_dict + #return taus_dict, g2_dict + + -def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse=True, clear_plot=False): - """Y.G. Dec 22, 2016 +def run_xpcs_xsvs_single( uid, run_pargs, md_cor=None, return_res=False,reverse=True, clear_plot=False ): + '''Y.G. Dec 22, 2016 Run XPCS XSVS analysis for a single uid Parameters: uid: unique id @@ -1094,10 +888,10 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= save analysis result to csv/png/h5 files return_res: if true, return a dict, containing g2,g4,g12,contrast et.al. depending on the run type An example for the run_pargs: - - run_pargs= dict( + + run_pargs= dict( scat_geometry = 'gi_saxs' #suport 'saxs', 'gi_saxs', 'ang_saxs' (for anisotropics saxs or flow-xpcs) - force_compress = True,#False, + force_compress = True,#False, para_compress = True, run_fit_form = False, run_waterfall = True,#False, @@ -1111,1342 +905,794 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= att_pdf_report = True, show_plot = False, - CYCLE = '2016_3', + CYCLE = '2016_3', mask_path = '/XF11ID/analysis/2016_3/masks/', - mask_name = 'Nov28_4M_SAXS_mask.npy', - good_start = 5, + mask_name = 'Nov28_4M_SAXS_mask.npy', + good_start = 5, uniformq = True, inner_radius= 0.005, #0.005 for 50 nm, 0.006, #for 10nm/coralpor - outer_radius = 0.04, #0.04 for 50 nm, 0.05, #for 10nm/coralpor + outer_radius = 0.04, #0.04 for 50 nm, 0.05, #for 10nm/coralpor num_rings = 12, - gap_ring_number = 6, - number_rings= 1, - #qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 - #width = 0.0002 - qth_interest = 1, #the intested single qth + gap_ring_number = 6, + number_rings= 1, + #qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 + #width = 0.0002 + qth_interest = 1, #the intested single qth use_sqnorm = False, use_imgsum_norm = True, - pdf_version = '_1' #for pdf report name + pdf_version = '_1' #for pdf report name ) - + md_cor: if not None, will update the metadata with md_cor - - """ - - scat_geometry = run_pargs["scat_geometry"] - force_compress = run_pargs["force_compress"] - para_compress = run_pargs["para_compress"] - run_fit_form = run_pargs["run_fit_form"] - run_waterfall = run_pargs["run_waterfall"] - run_t_ROI_Inten = run_pargs["run_t_ROI_Inten"] - - # run_fit_g2 = run_pargs['run_fit_g2'], - fit_g2_func = run_pargs["fit_g2_func"] - run_one_time = run_pargs["run_one_time"] - run_two_time = run_pargs["run_two_time"] - run_four_time = run_pargs["run_four_time"] - run_xsvs = run_pargs["run_xsvs"] + + ''' + + scat_geometry = run_pargs['scat_geometry'] + force_compress = run_pargs['force_compress'] + para_compress = run_pargs['para_compress'] + run_fit_form = run_pargs['run_fit_form'] + run_waterfall = run_pargs['run_waterfall'] + run_t_ROI_Inten = run_pargs['run_t_ROI_Inten'] + + #run_fit_g2 = run_pargs['run_fit_g2'], + fit_g2_func = run_pargs['fit_g2_func'] + run_one_time = run_pargs['run_one_time'] + run_two_time = run_pargs['run_two_time'] + run_four_time = run_pargs['run_four_time'] + run_xsvs=run_pargs['run_xsvs'] try: - run_dose = run_pargs["run_dose"] + run_dose = run_pargs['run_dose'] except: - run_dose = False - ############################################################### - if scat_geometry == "gi_saxs": # to be done for other types - run_xsvs = False - ############################################################### - + run_dose= False ############################################################### - if scat_geometry == "ang_saxs": - run_xsvs = False - run_waterfall = False - run_two_time = False - run_four_time = False - run_t_ROI_Inten = False + if scat_geometry =='gi_saxs': #to be done for other types + run_xsvs = False; + ############################################################### + ############################################################### - if "bin_frame" in list(run_pargs.keys()): - bin_frame = run_pargs["bin_frame"] - bin_frame_number = run_pargs["bin_frame_number"] + if scat_geometry == 'ang_saxs': + run_xsvs= False;run_waterfall=False;run_two_time=False;run_four_time=False;run_t_ROI_Inten=False; + ############################################################### + if 'bin_frame' in list( run_pargs.keys() ): + bin_frame = run_pargs['bin_frame'] + bin_frame_number= run_pargs['bin_frame_number'] else: - bin_frame = False + bin_frame = False if not bin_frame: - bin_frame_number = 1 - - att_pdf_report = run_pargs["att_pdf_report"] - show_plot = run_pargs["show_plot"] - CYCLE = run_pargs["CYCLE"] - mask_path = run_pargs["mask_path"] - mask_name = run_pargs["mask_name"] - good_start = run_pargs["good_start"] - use_imgsum_norm = run_pargs["use_imgsum_norm"] + bin_frame_number = 1 + + att_pdf_report = run_pargs['att_pdf_report'] + show_plot = run_pargs['show_plot'] + CYCLE = run_pargs['CYCLE'] + mask_path = run_pargs['mask_path'] + mask_name = run_pargs['mask_name'] + good_start = run_pargs['good_start'] + use_imgsum_norm = run_pargs['use_imgsum_norm'] try: - use_sqnorm = run_pargs["use_sqnorm"] + use_sqnorm = run_pargs['use_sqnorm'] except: use_sqnorm = False try: - inc_x0 = run_pargs["inc_x0"] - inc_y0 = run_pargs["inc_y0"] + inc_x0 = run_pargs['inc_x0'] + inc_y0 = run_pargs['inc_y0'] except: inc_x0 = None - inc_y0 = None - - # for different scattering geogmetry, we only need to change roi_mask - # and qval_dict - qval_dict = run_pargs["qval_dict"] - if scat_geometry != "ang_saxs": - roi_mask = run_pargs["roi_mask"] - qind, pixelist = roi.extract_label_indices(roi_mask) + inc_y0= None + + #for different scattering geogmetry, we only need to change roi_mask + #and qval_dict + qval_dict = run_pargs['qval_dict'] + if scat_geometry != 'ang_saxs': + roi_mask = run_pargs['roi_mask'] + qind, pixelist = roi.extract_label_indices( roi_mask ) noqs = len(np.unique(qind)) - nopr = np.bincount(qind, minlength=(noqs + 1))[1:] - - else: - roi_mask_p = run_pargs["roi_mask_p"] - qval_dict_p = run_pargs["qval_dict_p"] - roi_mask_v = run_pargs["roi_mask_v"] - qval_dict_v = run_pargs["qval_dict_v"] - - if scat_geometry == "gi_saxs": - refl_x0 = run_pargs["refl_x0"] - refl_y0 = run_pargs["refl_y0"] - Qr, Qz, qr_map, qz_map = run_pargs["Qr"], run_pargs["Qz"], run_pargs["qr_map"], run_pargs["qz_map"] - - taus = None - g2 = None - tausb = None - g2b = None - g12b = None - taus4 = None - g4 = None - times_xsv = None - contrast_factorL = None - qth_interest = run_pargs["qth_interest"] - pdf_version = run_pargs["pdf_version"] - + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + + else: + roi_mask_p = run_pargs['roi_mask_p'] + qval_dict_p = run_pargs['qval_dict_p'] + roi_mask_v = run_pargs['roi_mask_v'] + qval_dict_v = run_pargs['qval_dict_v'] + + if scat_geometry == 'gi_saxs': + refl_x0 = run_pargs['refl_x0'] + refl_y0 = run_pargs['refl_y0'] + Qr, Qz, qr_map, qz_map = run_pargs['Qr'], run_pargs['Qz'], run_pargs['qr_map'], run_pargs['qz_map'] + + + taus=None;g2=None;tausb=None;g2b=None;g12b=None;taus4=None;g4=None;times_xsv=None;contrast_factorL=None; + qth_interest = run_pargs['qth_interest'] + pdf_version = run_pargs['pdf_version'] + + try: - username = run_pargs["username"] + username = run_pargs['username'] except: username = getpass.getuser() - - data_dir0 = os.path.join("/XF11ID/analysis/", CYCLE, username, "Results/") + + data_dir0 = os.path.join('/XF11ID/analysis/', CYCLE, username, 'Results/') os.makedirs(data_dir0, exist_ok=True) - print("Results from this analysis will be stashed in the directory %s" % data_dir0) - # uid = (sys.argv)[1] - print("*" * 40) - print("*" * 5 + "The processing uid is: %s" % uid + "*" * 5) - print("*" * 40) - suid = uid # [:6] - data_dir = os.path.join(data_dir0, "%s/" % suid) + print('Results from this analysis will be stashed in the directory %s' % data_dir0) + #uid = (sys.argv)[1] + print ('*'*40) + print ( '*'*5 + 'The processing uid is: %s'%uid + '*'*5) + print ('*'*40) + suid = uid #[:6] + data_dir = os.path.join(data_dir0, '%s/'%suid) os.makedirs(data_dir, exist_ok=True) - print("Results from this analysis will be stashed in the directory %s" % data_dir) - md = get_meta_data(uid) - uidstr = "uid=%s" % uid[:6] - imgs = load_data(uid, md["detector"], reverse=reverse) - md.update(imgs.md) + print('Results from this analysis will be stashed in the directory %s' % data_dir) + md = get_meta_data( uid ) + uidstr = 'uid=%s'%uid[:6] + imgs = load_data( uid, md['detector'], reverse= reverse ) + md.update( imgs.md ) Nimg = len(imgs) if md_cor is not None: - md.update(md_cor) - + md.update( md_cor ) + + if inc_x0 is not None: - md["beam_center_x"] = inc_x0 + md['beam_center_x']= inc_x0 if inc_y0 is not None: - md["beam_center_y"] = inc_y0 - - # print( run_pargs ) - # print( run_pargs['inc_x0'],run_pargs['inc_y0'] ) - # print( inc_x0, inc_y0 ) - - if md["detector"] == "eiger1m_single_image": - Chip_Mask = np.load("/XF11ID/analysis/2017_1/masks/Eiger1M_Chip_Mask.npy") - elif md["detector"] == "eiger4m_single_image" or md["detector"] == "image": - Chip_Mask = np.array(np.load("/XF11ID/analysis/2017_1/masks/Eiger4M_chip_mask.npy"), dtype=bool) - BadPix = np.load("/XF11ID/analysis/2018_1/BadPix_4M.npy") + md['beam_center_y']= inc_y0 + + #print( run_pargs ) + #print( run_pargs['inc_x0'],run_pargs['inc_y0'] ) + #print( inc_x0, inc_y0 ) + + if md['detector'] =='eiger1m_single_image': + Chip_Mask=np.load( '/XF11ID/analysis/2017_1/masks/Eiger1M_Chip_Mask.npy') + elif md['detector'] =='eiger4m_single_image' or md['detector'] == 'image': + Chip_Mask= np.array(np.load( '/XF11ID/analysis/2017_1/masks/Eiger4M_chip_mask.npy'), dtype=bool) + BadPix = np.load('/XF11ID/analysis/2018_1/BadPix_4M.npy' ) Chip_Mask.ravel()[BadPix] = 0 - elif md["detector"] == "eiger500K_single_image": - Chip_Mask = 1 # to be defined the chip mask + elif md['detector'] =='eiger500K_single_image': + Chip_Mask= 1 #to be defined the chip mask else: Chip_Mask = 1 - # show_img(Chip_Mask) - - center = [int(md["beam_center_y"]), int(md["beam_center_x"])] # beam center [y,x] for python image - - pixel_mask = 1 - np.int_(np.array(imgs.md["pixel_mask"], dtype=bool)) - print("The data are: %s" % imgs) - + #show_img(Chip_Mask) + + center = [ int(md['beam_center_y']),int( md['beam_center_x'] ) ] #beam center [y,x] for python image + + + pixel_mask = 1- np.int_( np.array( imgs.md['pixel_mask'], dtype= bool) ) + print( 'The data are: %s' %imgs ) + if False: - print_dict( - md, - [ - "suid", - "number of images", - "uid", - "scan_id", - "start_time", - "stop_time", - "sample", - "Measurement", - "acquire period", - "exposure time", - "det_distanc", - "beam_center_x", - "beam_center_y", - ], - ) - ## Overwrite Some Metadata if Wrong Input - dpix, lambda_, Ldet, exposuretime, timeperframe, center = check_lost_metadata( - md, Nimg, inc_x0=inc_x0, inc_y0=inc_y0, pixelsize=7.5 * 10 * (-5) - ) - - print("The beam center is: %s" % center) - - timeperframe *= bin_frame_number - - setup_pargs = dict( - uid=uidstr, - dpix=dpix, - Ldet=Ldet, - lambda_=lambda_, - exposuretime=exposuretime, - timeperframe=timeperframe, - center=center, - path=data_dir, - ) - # print_dict( setup_pargs ) - - mask = load_mask(mask_path, mask_name, plot_=False, image_name=uidstr + "_mask", reverse=reverse) + print_dict( md, ['suid', 'number of images', 'uid', 'scan_id', 'start_time', 'stop_time', 'sample', 'Measurement', + 'acquire period', 'exposure time', + 'det_distanc', 'beam_center_x', 'beam_center_y', ] ) + ## Overwrite Some Metadata if Wrong Input + dpix, lambda_, Ldet, exposuretime, timeperframe, center = check_lost_metadata( + md, Nimg, inc_x0 = inc_x0, inc_y0= inc_y0, pixelsize = 7.5*10*(-5) ) + + print( 'The beam center is: %s'%center ) + + timeperframe *= bin_frame_number + + setup_pargs=dict(uid=uidstr, dpix= dpix, Ldet=Ldet, lambda_= lambda_, exposuretime=exposuretime, + timeperframe=timeperframe, center=center, path= data_dir) + #print_dict( setup_pargs ) + + mask = load_mask(mask_path, mask_name, plot_ = False, image_name = uidstr + '_mask', reverse=reverse ) mask *= pixel_mask - if md["detector"] == "eiger4m_single_image": - mask[:, 2069] = 0 # False #Concluded from the previous results - show_img(mask, image_name=uidstr + "_mask", save=True, path=data_dir) - mask_load = mask.copy() - imgsa = apply_mask(imgs, mask) + if md['detector'] =='eiger4m_single_image': + mask[:,2069] =0 # False #Concluded from the previous results + show_img(mask,image_name = uidstr + '_mask', save=True, path=data_dir) + mask_load=mask.copy() + imgsa = apply_mask( imgs, mask ) - img_choice_N = 2 - img_samp_index = random.sample(range(len(imgs)), img_choice_N) - avg_img = get_avg_img(imgsa, img_samp_index, plot_=False, uid=uidstr) + img_choice_N = 2 + img_samp_index = random.sample( range(len(imgs)), img_choice_N) + avg_img = get_avg_img( imgsa, img_samp_index, plot_ = False, uid =uidstr) + if avg_img.max() == 0: - print("There are no photons recorded for this uid: %s" % uid) - print("The data analysis should be terminated! Please try another uid.") - - else: - if scat_geometry != "saxs": - show_img( - avg_img, - vmin=0.1, - vmax=np.max(avg_img * 0.1), - logs=True, - image_name=uidstr + "_%s_frames_avg" % img_choice_N, - save=True, - path=data_dir, - ) - else: - show_saxs_qmap( - avg_img, - setup_pargs, - width=400, - show_pixel=False, - vmin=0.1, - vmax=np.max(avg_img), - logs=True, - image_name=uidstr + "_%s_frames_avg" % img_choice_N, - ) - - compress = True - photon_occ = len(np.where(avg_img)[0]) / (imgsa[0].size) - # compress = photon_occ < .4 #if the photon ocupation < 0.5, do compress - print("The non-zeros photon occupation is %s." % (photon_occ)) - print("Will " + "Always " + ["NOT", "DO"][compress] + " apply compress process.") - # good_start = 5 #make the good_start at least 0 - t0 = time.time() - filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % md["uid"] - mask, avg_img, imgsum, bad_frame_list = compress_eigerdata( - imgs, - mask, - md, - filename, - force_compress=force_compress, - para_compress=para_compress, - bad_pixel_threshold=1e14, - bins=bin_frame_number, - num_sub=100, - num_max_para_process=500, - with_pickle=True, - ) - min_inten = 10 - good_start = max(good_start, np.where(np.array(imgsum) > min_inten)[0][0]) - print("The good_start frame number is: %s " % good_start) + print('There are no photons recorded for this uid: %s'%uid) + print('The data analysis should be terminated! Please try another uid.') + + else: + if scat_geometry !='saxs': + show_img( avg_img, vmin=.1, vmax=np.max(avg_img*.1), logs=True, + image_name= uidstr + '_%s_frames_avg'%img_choice_N, save=True, path=data_dir) + else: + show_saxs_qmap( avg_img, setup_pargs, width=400, show_pixel = False, + vmin=.1, vmax= np.max(avg_img), logs=True, image_name= uidstr + '_%s_frames_avg'%img_choice_N ) + + compress=True + photon_occ = len( np.where(avg_img)[0] ) / ( imgsa[0].size) + #compress = photon_occ < .4 #if the photon ocupation < 0.5, do compress + print ("The non-zeros photon occupation is %s."%( photon_occ)) + print("Will " + 'Always ' + ['NOT', 'DO'][compress] + " apply compress process.") + #good_start = 5 #make the good_start at least 0 + t0= time.time() + filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%md['uid'] + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename, + force_compress= force_compress, para_compress= para_compress, bad_pixel_threshold= 1e14, + bins=bin_frame_number, num_sub= 100, num_max_para_process= 500, with_pickle=True ) + min_inten = 10 + good_start = max(good_start, np.where( np.array(imgsum) > min_inten )[0][0] ) + print ('The good_start frame number is: %s '%good_start) FD = Multifile(filename, good_start, len(imgs)) - # FD = Multifile(filename, good_start, 100) - uid_ = uidstr + "_fra_%s_%s" % (FD.beg, FD.end) - print(uid_) - plot1D( - y=imgsum[np.array([i for i in np.arange(good_start, len(imgsum)) if i not in bad_frame_list])], - title=uidstr + "_imgsum", - xlabel="Frame", - ylabel="Total_Intensity", - legend="imgsum", - ) + #FD = Multifile(filename, good_start, 100) + uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end) + print( uid_ ) + plot1D( y = imgsum[ np.array( [i for i in np.arange(good_start, len(imgsum)) if i not in bad_frame_list])], + title =uidstr + '_imgsum', xlabel='Frame', ylabel='Total_Intensity', legend='imgsum' ) run_time(t0) - - mask = mask * Chip_Mask - - # %system free && sync && echo 3 > /proc/sys/vm/drop_caches && free + + mask = mask * Chip_Mask + + #%system free && sync && echo 3 > /proc/sys/vm/drop_caches && free ## Get bad frame list by a polynominal fit - bad_frame_list = get_bad_frame_list( - imgsum, - fit=True, - plot=True, - polyfit_order=30, - scale=5.5, - good_start=good_start, - uid=uidstr, - path=data_dir, - ) - print("The bad frame list length is: %s" % len(bad_frame_list)) - + bad_frame_list = get_bad_frame_list( imgsum, fit=True, plot=True,polyfit_order = 30, + scale= 5.5, good_start = good_start, uid= uidstr, path=data_dir) + print( 'The bad frame list length is: %s'%len(bad_frame_list) ) + ### Creat new mask by masking the bad pixels and get new avg_img if False: - mask = mask_exclude_badpixel(bp, mask, md["uid"]) - avg_img = get_avg_imgc(FD, sampling=1, bad_frame_list=bad_frame_list) - - show_img( - avg_img, - vmin=0.001, - vmax=np.max(avg_img), - logs=True, - aspect=1, # save_format='tif', - image_name=uidstr + "_img_avg", - save=True, - path=data_dir, - cmap=cmap_albula, - ) - - imgsum_y = imgsum[np.array([i for i in np.arange(len(imgsum)) if i not in bad_frame_list])] - imgsum_x = np.arange(len(imgsum_y)) - save_lists( - [imgsum_x, imgsum_y], label=["Frame", "Total_Intensity"], filename=uidstr + "_img_sum_t", path=data_dir - ) - plot1D( - y=imgsum_y, - title=uidstr + "_img_sum_t", - xlabel="Frame", - ylabel="Total_Intensity", - legend="imgsum", - save=True, - path=data_dir, - ) - + mask = mask_exclude_badpixel( bp, mask, md['uid']) + avg_img = get_avg_imgc( FD, sampling = 1, bad_frame_list=bad_frame_list ) + + show_img( avg_img, vmin=.001, vmax= np.max(avg_img), logs=True, aspect=1, #save_format='tif', + image_name= uidstr + '_img_avg', save=True, path=data_dir, cmap = cmap_albula ) + + imgsum_y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])] + imgsum_x = np.arange( len( imgsum_y)) + save_lists( [imgsum_x, imgsum_y], label=['Frame', 'Total_Intensity'], + filename=uidstr + '_img_sum_t', path= data_dir ) + plot1D( y = imgsum_y, title = uidstr + '_img_sum_t', xlabel='Frame', + ylabel='Total_Intensity', legend='imgsum', save=True, path=data_dir) + + ############for SAXS and ANG_SAXS (Flow_SAXS) - if scat_geometry == "saxs" or scat_geometry == "ang_saxs": - # show_saxs_qmap( avg_img, setup_pargs, width=600, vmin=.1, vmax=np.max(avg_img*.1), logs=True, - # image_name= uidstr + '_img_avg', save=True) - # np.save( data_dir + 'uid=%s--img-avg'%uid, avg_img) + if scat_geometry =='saxs' or scat_geometry =='ang_saxs': + + #show_saxs_qmap( avg_img, setup_pargs, width=600, vmin=.1, vmax=np.max(avg_img*.1), logs=True, + # image_name= uidstr + '_img_avg', save=True) + #np.save( data_dir + 'uid=%s--img-avg'%uid, avg_img) - # try: + #try: # hmask = create_hot_pixel_mask( avg_img, threshold = 1000, center=center, center_radius= 600) - # except: + #except: # hmask=1 - hmask = 1 - qp_saxs, iq_saxs, q_saxs = get_circular_average( - avg_img * Chip_Mask, mask * hmask * Chip_Mask, pargs=setup_pargs, save=True - ) - - plot_circular_average( - qp_saxs, - iq_saxs, - q_saxs, - pargs=setup_pargs, - xlim=[q_saxs.min(), q_saxs.max()], - ylim=[iq_saxs.min(), iq_saxs.max()], - ) - - # pd = trans_data_to_pd( np.where( hmask !=1), - # label=[md['uid']+'_hmask'+'x', md['uid']+'_hmask'+'y' ], dtype='list') - - # pd.to_csv('/XF11ID/analysis/Commissioning/eiger4M_badpixel.csv', mode='a' ) - - # mask =np.array( mask * hmask, dtype=bool) - # show_img( mask ) + hmask=1 + qp_saxs, iq_saxs, q_saxs = get_circular_average( avg_img * Chip_Mask, mask * hmask * Chip_Mask, pargs=setup_pargs, save=True ) + + plot_circular_average( qp_saxs, iq_saxs, q_saxs, pargs= setup_pargs, + xlim=[q_saxs.min(), q_saxs.max()], ylim = [iq_saxs.min(), iq_saxs.max()] ) + #pd = trans_data_to_pd( np.where( hmask !=1), + # label=[md['uid']+'_hmask'+'x', md['uid']+'_hmask'+'y' ], dtype='list') + + #pd.to_csv('/XF11ID/analysis/Commissioning/eiger4M_badpixel.csv', mode='a' ) + + #mask =np.array( mask * hmask, dtype=bool) + #show_img( mask ) + if run_fit_form: - form_res = fit_form_factor( - q_saxs, - iq_saxs, - guess_values={"radius": 2500, "sigma": 0.05, "delta_rho": 1e-10}, - fit_range=[0.0001, 0.015], - fit_variables={"radius": T, "sigma": T, "delta_rho": T}, - res_pargs=setup_pargs, - xlim=[0.0001, 0.015], - ) - - show_ROI_on_image( - avg_img, - roi_mask, - center, - label_on=False, - rwidth=700, - alpha=0.9, - save=True, - path=data_dir, - uid=uidstr, - vmin=np.min(avg_img), - vmax=np.max(avg_img), - ) - - qr = np.array([qval_dict[k][0] for k in list(qval_dict.keys())]) - plot_qIq_with_ROI( - q_saxs, - iq_saxs, - qr, - logs=True, - uid=uidstr, - xlim=[q_saxs.min(), q_saxs.max()], - ylim=[iq_saxs.min(), iq_saxs.max()], - save=True, - path=data_dir, - ) - - if scat_geometry != "ang_saxs": - Nimg = FD.end - FD.beg - time_edge = create_time_slice(N=Nimg, slice_num=3, slice_width=1, edges=None) - time_edge = np.array(time_edge) + good_start - # print( time_edge ) - qpt, iqst, qt = get_t_iqc(FD, time_edge, mask * Chip_Mask, pargs=setup_pargs, nx=1500) - plot_t_iqc( - qt, - iqst, - time_edge, - pargs=setup_pargs, - xlim=[qt.min(), qt.max()], - ylim=[iqst.min(), iqst.max()], - save=True, - ) - - elif scat_geometry == "gi_waxs": - # roi_mask[badpixel] = 0 - qr = np.array([qval_dict[k][0] for k in list(qval_dict.keys())]) - show_ROI_on_image( - avg_img, roi_mask, label_on=True, alpha=0.5, save=True, path=data_dir, uid=uidstr - ) # , vmin=1, vmax=15) - - elif scat_geometry == "gi_saxs": - show_img( - avg_img, - vmin=0.1, - vmax=np.max(avg_img * 0.1), - logs=True, - image_name=uidstr + "_img_avg", - save=True, - path=data_dir, - ) - ticks_ = get_qzr_map(qr_map, qz_map, inc_x0, Nzline=10, Nrline=10) - ticks = ticks_[:4] - plot_qzr_map(qr_map, qz_map, inc_x0, ticks=ticks_, data=avg_img, uid=uidstr, path=data_dir) - show_qzr_roi(avg_img, roi_mask, inc_x0, ticks, alpha=0.5, save=True, path=data_dir, uid=uidstr) - qr_1d_pds = cal_1d_qr(avg_img, Qr, Qz, qr_map, qz_map, inc_x0, setup_pargs=setup_pargs) - plot_qr_1d_with_ROI( - qr_1d_pds, - qr_center=np.unique(np.array(list(qval_dict.values()))[:, 0]), - loglog=False, - save=True, - uid=uidstr, - path=data_dir, - ) - - Nimg = FD.end - FD.beg - time_edge = create_time_slice(N=Nimg, slice_num=3, slice_width=1, edges=None) - time_edge = np.array(time_edge) + good_start - qrt_pds = get_t_qrc(FD, time_edge, Qr, Qz, qr_map, qz_map, path=data_dir, uid=uidstr) - plot_qrt_pds(qrt_pds, time_edge, qz_index=0, uid=uidstr, path=data_dir) + form_res = fit_form_factor( q_saxs,iq_saxs, guess_values={'radius': 2500, 'sigma':0.05, + 'delta_rho':1E-10 }, fit_range=[0.0001, 0.015], fit_variables={'radius': T, 'sigma':T, + 'delta_rho':T}, res_pargs=setup_pargs, xlim=[0.0001, 0.015]) + + show_ROI_on_image( avg_img, roi_mask, center, label_on = False, rwidth =700, alpha=.9, + save=True, path=data_dir, uid=uidstr, vmin= np.min(avg_img), vmax= np.max(avg_img) ) + + qr = np.array( [ qval_dict[k][0] for k in list( qval_dict.keys()) ] ) + plot_qIq_with_ROI( q_saxs, iq_saxs, qr, logs=True, uid=uidstr, xlim=[q_saxs.min(), q_saxs.max()], + ylim = [iq_saxs.min(), iq_saxs.max()], save=True, path=data_dir) + + if scat_geometry != 'ang_saxs': + Nimg = FD.end - FD.beg + time_edge = create_time_slice( N= Nimg, slice_num= 3, slice_width= 1, edges = None ) + time_edge = np.array( time_edge ) + good_start + #print( time_edge ) + qpt, iqst, qt = get_t_iqc( FD, time_edge, mask* Chip_Mask, pargs=setup_pargs, nx=1500 ) + plot_t_iqc( qt, iqst, time_edge, pargs=setup_pargs, xlim=[qt.min(), qt.max()], + ylim = [iqst.min(), iqst.max()], save=True ) + + elif scat_geometry == 'gi_waxs': + #roi_mask[badpixel] = 0 + qr = np.array( [ qval_dict[k][0] for k in list( qval_dict.keys()) ] ) + show_ROI_on_image( avg_img, roi_mask, label_on = True, alpha=.5,save=True, path= data_dir, uid=uidstr)#, vmin=1, vmax=15) + + elif scat_geometry == 'gi_saxs': + show_img( avg_img, vmin=.1, vmax=np.max(avg_img*.1), + logs=True, image_name= uidstr + '_img_avg', save=True, path=data_dir) + ticks_ = get_qzr_map( qr_map, qz_map, inc_x0, Nzline=10, Nrline=10 ) + ticks = ticks_[:4] + plot_qzr_map( qr_map, qz_map, inc_x0, ticks = ticks_, data= avg_img, uid= uidstr, path = data_dir ) + show_qzr_roi( avg_img, roi_mask, inc_x0, ticks, alpha=0.5, save=True, path=data_dir, uid=uidstr ) + qr_1d_pds = cal_1d_qr( avg_img, Qr, Qz, qr_map, qz_map, inc_x0, setup_pargs=setup_pargs ) + plot_qr_1d_with_ROI( qr_1d_pds, qr_center=np.unique( np.array(list( qval_dict.values() ) )[:,0] ), + loglog=False, save=True, uid=uidstr, path = data_dir) + + Nimg = FD.end - FD.beg + time_edge = create_time_slice( N= Nimg, slice_num= 3, slice_width= 1, edges = None ) + time_edge = np.array( time_edge ) + good_start + qrt_pds = get_t_qrc( FD, time_edge, Qr, Qz, qr_map, qz_map, path=data_dir, uid = uidstr ) + plot_qrt_pds( qrt_pds, time_edge, qz_index = 0, uid = uidstr, path = data_dir ) + + ############################## - ##the below works for all the geometries + ##the below works for all the geometries ######################################## - if scat_geometry != "ang_saxs": - roi_inten = check_ROI_intensity( - avg_img, roi_mask, ring_number=qth_interest, uid=uidstr, save=True, path=data_dir - ) - if scat_geometry == "saxs" or scat_geometry == "gi_saxs" or scat_geometry == "gi_waxs": + if scat_geometry !='ang_saxs': + roi_inten = check_ROI_intensity( avg_img, roi_mask, ring_number= qth_interest, uid =uidstr, save=True, path=data_dir ) + if scat_geometry =='saxs' or scat_geometry =='gi_saxs' or scat_geometry =='gi_waxs': if run_waterfall: - wat = cal_waterfallc(FD, roi_mask, qindex=qth_interest, save=True, path=data_dir, uid=uidstr) - if run_waterfall: - plot_waterfallc( - wat, - qindex=qth_interest, - aspect=None, - vmax=np.max(wat), - uid=uidstr, - save=True, - path=data_dir, - beg=FD.beg, - ) - ring_avg = None - + wat = cal_waterfallc( FD, roi_mask, + qindex= qth_interest, save = True, path=data_dir,uid=uidstr) + if run_waterfall: + plot_waterfallc( wat, qindex=qth_interest, aspect=None, + vmax= np.max(wat), uid=uidstr, save =True, + path=data_dir, beg= FD.beg) + ring_avg = None + if run_t_ROI_Inten: - times_roi, mean_int_sets = cal_each_ring_mean_intensityc( - FD, roi_mask, timeperframe=None, multi_cor=True - ) - plot_each_ring_mean_intensityc(times_roi, mean_int_sets, uid=uidstr, save=True, path=data_dir) - roi_avg = np.average(mean_int_sets, axis=0) + times_roi, mean_int_sets = cal_each_ring_mean_intensityc(FD, roi_mask, timeperframe = None, multi_cor=True ) + plot_each_ring_mean_intensityc( times_roi, mean_int_sets, uid = uidstr, save=True, path=data_dir ) + roi_avg = np.average( mean_int_sets, axis=0) - uid_ = uidstr + "_fra_%s_%s" % (FD.beg, FD.end) + uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end) lag_steps = None - + if use_sqnorm: - norm = get_pixelist_interp_iq(qp_saxs, iq_saxs, roi_mask, center) + norm = get_pixelist_interp_iq( qp_saxs, iq_saxs, roi_mask, center) else: - norm = None - + norm=None + define_good_series = False if define_good_series: - FD = Multifile(filename, beg=good_start, end=Nimg) - uid_ = uidstr + "_fra_%s_%s" % (FD.beg, FD.end) - print(uid_) - - if "g2_fit_variables" in list(run_pargs.keys()): - g2_fit_variables = run_pargs["g2_fit_variables"] + FD = Multifile(filename, beg = good_start, end = Nimg) + uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end) + print( uid_ ) + + if 'g2_fit_variables' in list( run_pargs.keys() ): + g2_fit_variables = run_pargs['g2_fit_variables'] else: - g2_fit_variables = {"baseline": True, "beta": True, "alpha": False, "relaxation_rate": True} + g2_fit_variables = {'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True} - if "g2_guess_values" in list(run_pargs.keys()): - g2_guess_values = run_pargs["g2_guess_values"] + if 'g2_guess_values' in list( run_pargs.keys() ): + g2_guess_values = run_pargs['g2_guess_values'] else: - g2_guess_values = { - "baseline": 1.0, - "beta": 0.05, - "alpha": 1.0, - "relaxation_rate": 0.01, - } - - if "g2_guess_limits" in list(run_pargs.keys()): - g2_guess_limits = run_pargs["g2_guess_limits"] + g2_guess_values= {'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,} + + if 'g2_guess_limits' in list( run_pargs.keys()): + g2_guess_limits = run_pargs['g2_guess_limits'] else: - g2_guess_limits = dict(baseline=[1, 2], alpha=[0, 2], beta=[0, 1], relaxation_rate=[0.001, 5000]) - - if run_one_time: + g2_guess_limits = dict( baseline =[1, 2], alpha=[0, 2], beta = [0, 1], relaxation_rate= [0.001, 5000]) + + if run_one_time: if use_imgsum_norm: imgsum_ = imgsum else: - imgsum_ = None - if scat_geometry != "ang_saxs": + imgsum_ = None + if scat_geometry !='ang_saxs': t0 = time.time() - g2, lag_steps = cal_g2p( - FD, roi_mask, bad_frame_list, good_start, num_buf=8, num_lev=None, imgsum=imgsum_, norm=norm - ) + g2, lag_steps = cal_g2p( FD, roi_mask, bad_frame_list,good_start, num_buf = 8, num_lev= None, + imgsum= imgsum_, norm=norm ) run_time(t0) - taus = lag_steps * timeperframe - g2_pds = save_g2_general( - g2, - taus=taus, - qr=np.array(list(qval_dict.values()))[:, 0], - uid=uid_ + "_g2.csv", - path=data_dir, - return_res=True, - ) - g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( - g2, - taus, - function=fit_g2_func, - vlim=[0.95, 1.05], - fit_range=None, - fit_variables=g2_fit_variables, - guess_values=g2_guess_values, - guess_limits=g2_guess_limits, - ) - - g2_fit_paras = save_g2_fit_para_tocsv( - g2_fit_result, filename=uid_ + "_g2_fit_paras.csv", path=data_dir - ) - - # if run_one_time: - # plot_g2_general( g2_dict={1:g2}, taus_dict={1:taus},vlim=[0.95, 1.05], qval_dict = qval_dict, fit_res= None, - # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') - - plot_g2_general( - g2_dict={1: g2, 2: g2_fit}, - taus_dict={1: taus, 2: taus_fit}, - vlim=[0.95, 1.05], - qval_dict=qval_dict, - fit_res=g2_fit_result, - geometry=scat_geometry, - filename=uid_ + "_g2", - path=data_dir, - function=fit_g2_func, - ylabel="g2", - append_name="_fit", - ) - - D0, qrate_fit_res = get_q_rate_fit_general( - qval_dict, g2_fit_paras["relaxation_rate"], geometry=scat_geometry - ) - plot_q_rate_fit_general( - qval_dict, - g2_fit_paras["relaxation_rate"], - qrate_fit_res, - geometry=scat_geometry, - uid=uid_, - path=data_dir, - ) - + taus = lag_steps * timeperframe + g2_pds = save_g2_general( g2, taus=taus,qr=np.array( list( qval_dict.values() ) )[:,0], + uid=uid_+'_g2.csv', path= data_dir, return_res=True ) + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, + function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, + fit_variables= g2_fit_variables, + guess_values= g2_guess_values, + guess_limits = g2_guess_limits) + + g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_g2_fit_paras.csv', path=data_dir ) + + #if run_one_time: + #plot_g2_general( g2_dict={1:g2}, taus_dict={1:taus},vlim=[0.95, 1.05], qval_dict = qval_dict, fit_res= None, + # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') + + plot_g2_general( g2_dict={1:g2, 2:g2_fit}, taus_dict={1:taus, 2:taus_fit},vlim=[0.95, 1.05], + qval_dict = qval_dict, fit_res= g2_fit_result, geometry=scat_geometry,filename=uid_ + '_g2', + path= data_dir, function= fit_g2_func, ylabel='g2', append_name= '_fit') + + D0, qrate_fit_res = get_q_rate_fit_general( qval_dict, g2_fit_paras['relaxation_rate'], geometry= scat_geometry ) + plot_q_rate_fit_general( qval_dict, g2_fit_paras['relaxation_rate'], qrate_fit_res, + geometry= scat_geometry,uid=uid_ , path= data_dir ) + + else: - t0 = time.time() - g2_v, lag_steps_v = cal_g2p( - FD, roi_mask_v, bad_frame_list, good_start, num_buf=8, num_lev=None, imgsum=imgsum_, norm=norm - ) - g2_p, lag_steps_p = cal_g2p( - FD, roi_mask_p, bad_frame_list, good_start, num_buf=8, num_lev=None, imgsum=imgsum_, norm=norm - ) - run_time(t0) + t0 = time.time() + g2_v, lag_steps_v = cal_g2p( FD, roi_mask_v, bad_frame_list,good_start, num_buf = 8, num_lev= None, + imgsum= imgsum_, norm=norm ) + g2_p, lag_steps_p = cal_g2p( FD, roi_mask_p, bad_frame_list,good_start, num_buf = 8, num_lev= None, + imgsum= imgsum_, norm=norm ) + run_time(t0) + + taus_v = lag_steps_v * timeperframe + g2_pds_v = save_g2_general( g2_v, taus=taus_v,qr=np.array( list( qval_dict_v.values() ) )[:,0], + uid=uid_+'_g2v.csv', path= data_dir, return_res=True ) + + taus_p = lag_steps_p * timeperframe + g2_pds_p = save_g2_general( g2_p, taus=taus_p,qr=np.array( list( qval_dict_p.values() ) )[:,0], + uid=uid_+'_g2p.csv', path= data_dir, return_res=True ) + + fit_g2_func_v = 'stretched' #for vertical + g2_fit_result_v, taus_fit_v, g2_fit_v = get_g2_fit_general( g2_v, taus_v, + function = fit_g2_func_v, vlim=[0.95, 1.05], fit_range= None, + fit_variables={'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True}, + guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,}) + g2_fit_paras_v = save_g2_fit_para_tocsv(g2_fit_result_v, filename= uid_ +'_g2_fit_paras_v.csv', path=data_dir ) + + fit_g2_func_p ='flow_para' #for parallel + g2_fit_result_p, taus_fit_p, g2_fit_p = get_g2_fit_general( g2_p, taus_p, + function = fit_g2_func_p, vlim=[0.95, 1.05], fit_range= None, + fit_variables={'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True,'flow_velocity':True}, + guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,'flow_velocity':1}) + g2_fit_paras_p = save_g2_fit_para_tocsv(g2_fit_result_p, filename= uid_ +'_g2_fit_paras_p.csv', path=data_dir ) + + + + plot_g2_general( g2_dict={1:g2_v, 2:g2_fit_v}, taus_dict={1:taus_v, 2:taus_fit_v},vlim=[0.95, 1.05], + qval_dict = qval_dict_v, fit_res= g2_fit_result_v, geometry=scat_geometry,filename= uid_+'_g2_v', + path= data_dir, function= fit_g2_func_v, ylabel='g2_v', append_name= '_fit') + + plot_g2_general( g2_dict={1:g2_p, 2:g2_fit_p}, taus_dict={1:taus_p, 2:taus_fit_p},vlim=[0.95, 1.05], + qval_dict = qval_dict_p, fit_res= g2_fit_result_p, geometry=scat_geometry,filename= uid_+'_g2_p', + path= data_dir, function= fit_g2_func_p, ylabel='g2_p', append_name= '_fit') + + combine_images( [data_dir + uid_+'_g2_v_fit.png', data_dir + uid_+'_g2_p_fit.png'], data_dir + uid_+'_g2_fit.png', outsize=(2000, 2400) ) + + + D0_v, qrate_fit_res_v = get_q_rate_fit_general( qval_dict_v, g2_fit_paras_v['relaxation_rate'], geometry= scat_geometry ) + plot_q_rate_fit_general( qval_dict_v, g2_fit_paras_v['relaxation_rate'], qrate_fit_res_v, + geometry= scat_geometry,uid=uid_ +'_vert' , path= data_dir ) + + D0_p, qrate_fit_res_p = get_q_rate_fit_general( qval_dict_p, g2_fit_paras_p['relaxation_rate'], geometry= scat_geometry ) + plot_q_rate_fit_general( qval_dict_p, g2_fit_paras_p['relaxation_rate'], qrate_fit_res_p, + geometry= scat_geometry,uid=uid_ +'_para' , path= data_dir ) + + + combine_images( [data_dir + uid_+ '_vert_Q_Rate_fit.png', data_dir + uid_+ '_para_Q_Rate_fit.png'], data_dir + uid_+'_Q_Rate_fit.png', outsize=(2000, 2400) ) - taus_v = lag_steps_v * timeperframe - g2_pds_v = save_g2_general( - g2_v, - taus=taus_v, - qr=np.array(list(qval_dict_v.values()))[:, 0], - uid=uid_ + "_g2v.csv", - path=data_dir, - return_res=True, - ) - - taus_p = lag_steps_p * timeperframe - g2_pds_p = save_g2_general( - g2_p, - taus=taus_p, - qr=np.array(list(qval_dict_p.values()))[:, 0], - uid=uid_ + "_g2p.csv", - path=data_dir, - return_res=True, - ) - - fit_g2_func_v = "stretched" # for vertical - g2_fit_result_v, taus_fit_v, g2_fit_v = get_g2_fit_general( - g2_v, - taus_v, - function=fit_g2_func_v, - vlim=[0.95, 1.05], - fit_range=None, - fit_variables={"baseline": True, "beta": True, "alpha": False, "relaxation_rate": True}, - guess_values={ - "baseline": 1.0, - "beta": 0.05, - "alpha": 1.0, - "relaxation_rate": 0.01, - }, - ) - g2_fit_paras_v = save_g2_fit_para_tocsv( - g2_fit_result_v, filename=uid_ + "_g2_fit_paras_v.csv", path=data_dir - ) - - fit_g2_func_p = "flow_para" # for parallel - g2_fit_result_p, taus_fit_p, g2_fit_p = get_g2_fit_general( - g2_p, - taus_p, - function=fit_g2_func_p, - vlim=[0.95, 1.05], - fit_range=None, - fit_variables={ - "baseline": True, - "beta": True, - "alpha": False, - "relaxation_rate": True, - "flow_velocity": True, - }, - guess_values={ - "baseline": 1.0, - "beta": 0.05, - "alpha": 1.0, - "relaxation_rate": 0.01, - "flow_velocity": 1, - }, - ) - g2_fit_paras_p = save_g2_fit_para_tocsv( - g2_fit_result_p, filename=uid_ + "_g2_fit_paras_p.csv", path=data_dir - ) - - plot_g2_general( - g2_dict={1: g2_v, 2: g2_fit_v}, - taus_dict={1: taus_v, 2: taus_fit_v}, - vlim=[0.95, 1.05], - qval_dict=qval_dict_v, - fit_res=g2_fit_result_v, - geometry=scat_geometry, - filename=uid_ + "_g2_v", - path=data_dir, - function=fit_g2_func_v, - ylabel="g2_v", - append_name="_fit", - ) - - plot_g2_general( - g2_dict={1: g2_p, 2: g2_fit_p}, - taus_dict={1: taus_p, 2: taus_fit_p}, - vlim=[0.95, 1.05], - qval_dict=qval_dict_p, - fit_res=g2_fit_result_p, - geometry=scat_geometry, - filename=uid_ + "_g2_p", - path=data_dir, - function=fit_g2_func_p, - ylabel="g2_p", - append_name="_fit", - ) - - combine_images( - [data_dir + uid_ + "_g2_v_fit.png", data_dir + uid_ + "_g2_p_fit.png"], - data_dir + uid_ + "_g2_fit.png", - outsize=(2000, 2400), - ) - - D0_v, qrate_fit_res_v = get_q_rate_fit_general( - qval_dict_v, g2_fit_paras_v["relaxation_rate"], geometry=scat_geometry - ) - plot_q_rate_fit_general( - qval_dict_v, - g2_fit_paras_v["relaxation_rate"], - qrate_fit_res_v, - geometry=scat_geometry, - uid=uid_ + "_vert", - path=data_dir, - ) - - D0_p, qrate_fit_res_p = get_q_rate_fit_general( - qval_dict_p, g2_fit_paras_p["relaxation_rate"], geometry=scat_geometry - ) - plot_q_rate_fit_general( - qval_dict_p, - g2_fit_paras_p["relaxation_rate"], - qrate_fit_res_p, - geometry=scat_geometry, - uid=uid_ + "_para", - path=data_dir, - ) - - combine_images( - [data_dir + uid_ + "_vert_Q_Rate_fit.png", data_dir + uid_ + "_para_Q_Rate_fit.png"], - data_dir + uid_ + "_Q_Rate_fit.png", - outsize=(2000, 2400), - ) # For two-time data_pixel = None - if run_two_time: - data_pixel = Get_Pixel_Arrayc(FD, pixelist, norm=norm).get_data() - t0 = time.time() - g12b = auto_two_Arrayc(data_pixel, roi_mask, index=None) + if run_two_time: + + data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm=norm ).get_data() + t0=time.time() + g12b = auto_two_Arrayc( data_pixel, roi_mask, index = None ) if run_dose: - np.save(data_dir + "uid=%s_g12b" % uid, g12b) - + np.save( data_dir + 'uid=%s_g12b'%uid, g12b) + + if lag_steps is None: - num_bufs = 8 + num_bufs=8 noframes = FD.end - FD.beg - num_levels = int(np.log(noframes / (num_bufs - 1)) / np.log(2) + 1) + 1 + num_levels = int(np.log( noframes/(num_bufs-1))/np.log(2) +1) +1 tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) - max_taus = lag_steps.max() - lag_steps = lag_steps[lag_steps < Nimg - good_start] - - run_time(t0) - - show_C12( - g12b, - q_ind=qth_interest, - N1=FD.beg, - N2=min(FD.end, 5000), - vmin=0.99, - vmax=1.3, - timeperframe=timeperframe, - save=True, - cmap=cmap_albula, - path=data_dir, - uid=uid_, - ) - - # print('here') - # show_C12(g12b, q_ind= 3, N1= 5, N2=min(5000,5000), vmin=.8, vmax=1.31, cmap=cmap_albula, - # timeperframe= timeperframe,save=False, path= data_dir, uid = uid_ +'_' + k) - max_taus = Nimg - t0 = time.time() - # g2b = get_one_time_from_two_time(g12b)[:max_taus] + max_taus= lag_steps.max() + lag_steps = lag_steps[ lag_steps < Nimg - good_start ] + + run_time( t0 ) + + show_C12(g12b, q_ind= qth_interest, N1= FD.beg, N2=min( FD.end,5000), vmin= 0.99, vmax=1.3, + timeperframe=timeperframe,save=True, cmap=cmap_albula, + path= data_dir, uid = uid_ ) + + #print('here') + #show_C12(g12b, q_ind= 3, N1= 5, N2=min(5000,5000), vmin=.8, vmax=1.31, cmap=cmap_albula, + # timeperframe= timeperframe,save=False, path= data_dir, uid = uid_ +'_' + k) + max_taus = Nimg + t0=time.time() + #g2b = get_one_time_from_two_time(g12b)[:max_taus] g2b = get_one_time_from_two_time(g12b)[lag_steps] - - tausb = lag_steps * timeperframe - run_time(t0) - - # tausb = np.arange( g2b.shape[0])[:max_taus] *timeperframe - g2b_pds = save_g2_general( - g2b, - taus=tausb, - qr=np.array(list(qval_dict.values()))[:, 0], - qz=None, - uid=uid_ + "_g2b.csv", - path=data_dir, - return_res=True, - ) - - g2_fit_resultb, taus_fitb, g2_fitb = get_g2_fit_general( - g2b, - tausb, - function=fit_g2_func, - vlim=[0.95, 1.05], - fit_range=None, - fit_variables=g2_fit_variables, - guess_values=g2_guess_values, - guess_limits=g2_guess_limits, - ) - - g2b_fit_paras = save_g2_fit_para_tocsv( - g2_fit_resultb, filename=uid_ + "_g2b_fit_paras.csv", path=data_dir - ) - - D0b, qrate_fit_resb = get_q_rate_fit_general( - qval_dict, g2b_fit_paras["relaxation_rate"], fit_range=None, geometry=scat_geometry - ) - - # print( qval_dict, g2b_fit_paras['relaxation_rate'], qrate_fit_resb ) - plot_q_rate_fit_general( - qval_dict, - g2b_fit_paras["relaxation_rate"], - qrate_fit_resb, - geometry=scat_geometry, - uid=uid_ + "_two_time", - path=data_dir, - ) - - plot_g2_general( - g2_dict={1: g2b, 2: g2_fitb}, - taus_dict={1: tausb, 2: taus_fitb}, - vlim=[0.95, 1.05], - qval_dict=qval_dict, - fit_res=g2_fit_resultb, - geometry=scat_geometry, - filename=uid_ + "_g2", - path=data_dir, - function=fit_g2_func, - ylabel="g2", - append_name="_b_fit", - ) - + + tausb = lag_steps *timeperframe + run_time(t0) + + + #tausb = np.arange( g2b.shape[0])[:max_taus] *timeperframe + g2b_pds = save_g2_general( g2b, taus=tausb, qr= np.array( list( qval_dict.values() ) )[:,0], + qz=None, uid=uid_ +'_g2b.csv', path= data_dir, return_res=True ) + + + g2_fit_resultb, taus_fitb, g2_fitb = get_g2_fit_general( g2b, tausb, + function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, + fit_variables=g2_fit_variables, guess_values=g2_guess_values, guess_limits =g2_guess_limits) + + g2b_fit_paras = save_g2_fit_para_tocsv(g2_fit_resultb, + filename= uid_ + '_g2b_fit_paras.csv', path=data_dir ) + + D0b, qrate_fit_resb = get_q_rate_fit_general( qval_dict, g2b_fit_paras['relaxation_rate'], + fit_range=None, geometry= scat_geometry ) + + + #print( qval_dict, g2b_fit_paras['relaxation_rate'], qrate_fit_resb ) + plot_q_rate_fit_general( qval_dict, g2b_fit_paras['relaxation_rate'], qrate_fit_resb, + geometry= scat_geometry,uid=uid_ +'_two_time' , path= data_dir ) + + + + plot_g2_general( g2_dict={1:g2b, 2:g2_fitb}, taus_dict={1:tausb, 2:taus_fitb},vlim=[0.95, 1.05], + qval_dict=qval_dict, fit_res= g2_fit_resultb, geometry=scat_geometry,filename=uid_+'_g2', + path= data_dir, function= fit_g2_func, ylabel='g2', append_name= '_b_fit') + if run_two_time and run_one_time: - plot_g2_general( - g2_dict={1: g2, 2: g2b}, - taus_dict={1: taus, 2: tausb}, - vlim=[0.95, 1.05], - qval_dict=qval_dict, - g2_labels=["from_one_time", "from_two_time"], - geometry=scat_geometry, - filename=uid_ + "_g2_two_g2", - path=data_dir, - ylabel="g2", - ) + plot_g2_general( g2_dict={1:g2, 2:g2b}, taus_dict={1:taus, 2:tausb},vlim=[0.95, 1.05], + qval_dict=qval_dict, g2_labels=['from_one_time', 'from_two_time'], + geometry=scat_geometry,filename=uid_+'_g2_two_g2', path= data_dir, ylabel='g2', ) + + # Four Time Correlation - if run_four_time: # have to run one and two first - t0 = time.time() + if run_four_time: #have to run one and two first + t0=time.time() g4 = get_four_time_from_two_time(g12b, g2=g2b)[:max_taus] run_time(t0) - taus4 = np.arange(g4.shape[0]) * timeperframe - g4_pds = save_g2_general( - g4, - taus=taus4, - qr=np.array(list(qval_dict.values()))[:, 0], - qz=None, - uid=uid_ + "_g4.csv", - path=data_dir, - return_res=True, - ) - plot_g2_general( - g2_dict={1: g4}, - taus_dict={1: taus4}, - vlim=[0.95, 1.05], - qval_dict=qval_dict, - fit_res=None, - geometry=scat_geometry, - filename=uid_ + "_g4", - path=data_dir, - ylabel="g4", - ) + taus4 = np.arange( g4.shape[0])*timeperframe + g4_pds = save_g2_general( g4, taus=taus4, qr=np.array( list( qval_dict.values() ) )[:,0], + qz=None, uid=uid_ +'_g4.csv', path= data_dir, return_res=True ) + plot_g2_general( g2_dict={1:g4}, taus_dict={1:taus4},vlim=[0.95, 1.05], qval_dict=qval_dict, fit_res= None, + geometry=scat_geometry,filename=uid_+'_g4',path= data_dir, ylabel='g4') if run_dose: - get_two_time_mulit_uids( - [uid], roi_mask, norm=norm, bin_frame_number=bin_frame_number, path=data_dir0, force_generate=False - ) + get_two_time_mulit_uids( [uid], roi_mask, norm= norm, bin_frame_number=bin_frame_number, + path= data_dir0, force_generate=False ) N = len(imgs) try: - tr = md["transmission"] + tr = md['transmission'] except: tr = 1 - if "dose_frame" in list(run_pargs.keys()): - dose_frame = run_pargs["dose_frame"] + if 'dose_frame' in list(run_pargs.keys()): + dose_frame = run_pargs['dose_frame'] else: - dose_frame = np.int_([N / 8, N / 4, N / 2, 3 * N / 4, N * 0.99]) - # N/32, N/16, N/8, N/4 ,N/2, 3*N/4, N*0.99 + dose_frame = np.int_([ N/8, N/4 ,N/2, 3*N/4, N*0.99 ] ) + #N/32, N/16, N/8, N/4 ,N/2, 3*N/4, N*0.99 exposure_dose = tr * exposuretime * dose_frame - taus_uids, g2_uids = get_series_one_time_mulit_uids( - [uid], - qval_dict, - good_start=good_start, - path=data_dir0, - exposure_dose=exposure_dose, - num_bufs=8, - save_g2=False, - dead_time=0, - trans=[tr], - ) - - plot_dose_g2( - taus_uids, - g2_uids, - ylim=[0.95, 1.2], - vshift=0.00, - qval_dict=qval_dict, - fit_res=None, - geometry=scat_geometry, - filename="%s_dose_analysis" % uid_, - path=data_dir, - function=None, - ylabel="g2_Dose", - g2_labels=None, - append_name="", - ) - + taus_uids, g2_uids = get_series_one_time_mulit_uids( [ uid ], qval_dict, good_start=good_start, + path= data_dir0, exposure_dose = exposure_dose, num_bufs =8, save_g2= False, + dead_time = 0, trans = [ tr ] ) + + plot_dose_g2( taus_uids, g2_uids, ylim=[0.95, 1.2], vshift= 0.00, + qval_dict = qval_dict, fit_res= None, geometry= scat_geometry, + filename= '%s_dose_analysis'%uid_, + path= data_dir, function= None, ylabel='g2_Dose', g2_labels= None, append_name= '' ) + # Speckel Visiblity - if run_xsvs: - max_cts = get_max_countc(FD, roi_mask) - qind, pixelist = roi.extract_label_indices(roi_mask) - noqs = len(np.unique(qind)) - nopr = np.bincount(qind, minlength=(noqs + 1))[1:] - # time_steps = np.array( utils.geometric_series(2, len(imgs) ) ) - time_steps = [0, 1] # only run the first two levels - num_times = len(time_steps) - times_xsvs = exposuretime + (2 ** (np.arange(len(time_steps))) - 1) * timeperframe - print("The max counts are: %s" % max_cts) - - ### Do historam - if roi_avg is None: - times_roi, mean_int_sets = cal_each_ring_mean_intensityc( - FD, - roi_mask, - timeperframe=None, - ) - roi_avg = np.average(mean_int_sets, axis=0) - - t0 = time.time() - spec_bins, spec_his, spec_std = xsvsp( - FD, - np.int_(roi_mask), - norm=None, - max_cts=int(max_cts + 2), - bad_images=bad_frame_list, - only_two_levels=True, - ) - spec_kmean = np.array([roi_avg * 2**j for j in range(spec_his.shape[0])]) + if run_xsvs: + max_cts = get_max_countc(FD, roi_mask ) + qind, pixelist = roi.extract_label_indices( roi_mask ) + noqs = len( np.unique(qind) ) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + #time_steps = np.array( utils.geometric_series(2, len(imgs) ) ) + time_steps = [0,1] #only run the first two levels + num_times = len(time_steps) + times_xsvs = exposuretime + (2**( np.arange( len(time_steps) ) ) -1 ) *timeperframe + print( 'The max counts are: %s'%max_cts ) + + ### Do historam + if roi_avg is None: + times_roi, mean_int_sets = cal_each_ring_mean_intensityc(FD, roi_mask, timeperframe = None, ) + roi_avg = np.average( mean_int_sets, axis=0) + + t0=time.time() + spec_bins, spec_his, spec_std = xsvsp( FD, np.int_(roi_mask), norm=None, + max_cts=int(max_cts+2), bad_images=bad_frame_list, only_two_levels=True ) + spec_kmean = np.array( [roi_avg * 2**j for j in range( spec_his.shape[0] )] ) run_time(t0) - + run_xsvs_all_lags = False if run_xsvs_all_lags: - times_xsvs = exposuretime + lag_steps * acquisition_period + times_xsvs = exposuretime + lag_steps * acquisition_period if data_pixel is None: - data_pixel = Get_Pixel_Arrayc(FD, pixelist, norm=norm).get_data() - t0 = time.time() - spec_bins, spec_his, spec_std, spec_kmean = get_binned_his_std( - data_pixel, np.int_(ro_mask), lag_steps - ) - run_time(t0) - spec_pds = save_bin_his_std( - spec_bins, spec_his, spec_std, filename=uid_ + "_spec_res.csv", path=data_dir - ) - - ML_val, KL_val, K_ = get_xsvs_fit( - spec_his, - spec_kmean, - spec_std, - max_bins=2, - varyK=False, - ) - - # print( 'The observed average photon counts are: %s'%np.round(K_mean,4)) - # print( 'The fitted average photon counts are: %s'%np.round(K_,4)) - print( - "The difference sum of average photon counts between fit and data are: %s" - % np.round(abs(np.sum(spec_kmean[0, :] - K_)), 4) - ) - print("#" * 30) - qth = 10 - print("The fitted M for Qth= %s are: %s" % (qth, ML_val[qth])) - print(K_[qth]) - print("#" * 30) - - plot_xsvs_fit( - spec_his, - ML_val, - KL_val, - K_mean=spec_kmean, - spec_std=spec_std, - xlim=[0, 10], - vlim=[0.9, 1.1], - uid=uid_, - qth=qth_interest, - logy=True, - times=times_xsvs, - q_ring_center=qr, - path=data_dir, - ) - - plot_xsvs_fit( - spec_his, - ML_val, - KL_val, - K_mean=spec_kmean, - spec_std=spec_std, - xlim=[0, 15], - vlim=[0.9, 1.1], - uid=uid_, - qth=None, - logy=True, - times=times_xsvs, - q_ring_center=qr, - path=data_dir, - ) + data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm=norm ).get_data() + t0=time.time() + spec_bins, spec_his, spec_std, spec_kmean = get_binned_his_std(data_pixel, np.int_(ro_mask), lag_steps ) + run_time(t0) + spec_pds = save_bin_his_std( spec_bins, spec_his, spec_std, filename=uid_+'_spec_res.csv', path=data_dir ) + + ML_val, KL_val,K_ = get_xsvs_fit( spec_his, spec_kmean, spec_std, max_bins=2,varyK= False, ) + + #print( 'The observed average photon counts are: %s'%np.round(K_mean,4)) + #print( 'The fitted average photon counts are: %s'%np.round(K_,4)) + print( 'The difference sum of average photon counts between fit and data are: %s'%np.round( + abs(np.sum( spec_kmean[0,:] - K_ )),4)) + print( '#'*30) + qth= 10 + print( 'The fitted M for Qth= %s are: %s'%(qth, ML_val[qth]) ) + print( K_[qth]) + print( '#'*30) + + + plot_xsvs_fit( spec_his, ML_val, KL_val, K_mean = spec_kmean, spec_std=spec_std, + xlim = [0,10], vlim =[.9, 1.1], + uid=uid_, qth= qth_interest, logy= True, times= times_xsvs, q_ring_center=qr, path=data_dir) + + plot_xsvs_fit( spec_his, ML_val, KL_val, K_mean = spec_kmean, spec_std = spec_std, + xlim = [0,15], vlim =[.9, 1.1], + uid=uid_, qth= None, logy= True, times= times_xsvs, q_ring_center=qr, path=data_dir ) ### Get contrast - contrast_factorL = get_contrast(ML_val) - spec_km_pds = save_KM( - spec_kmean, KL_val, ML_val, qs=qr, level_time=times_xsvs, uid=uid_, path=data_dir - ) - # print( spec_km_pds ) - - plot_g2_contrast( - contrast_factorL, - g2, - times_xsvs, - taus, - qr, - vlim=[0.8, 1.2], - qth=qth_interest, - uid=uid_, - path=data_dir, - legend_size=14, - ) - - plot_g2_contrast( - contrast_factorL, - g2, - times_xsvs, - taus, - qr, - vlim=[0.8, 1.2], - qth=None, - uid=uid_, - path=data_dir, - legend_size=4, - ) - - md["mask_file"] = mask_path + mask_name - md["mask"] = mask - md["NOTEBOOK_FULL_PATH"] = None - md["good_start"] = good_start - md["bad_frame_list"] = bad_frame_list - md["avg_img"] = avg_img - md["roi_mask"] = roi_mask - - if scat_geometry == "gi_saxs": - md["Qr"] = Qr - md["Qz"] = Qz - md["qval_dict"] = qval_dict - md["beam_center_x"] = inc_x0 - md["beam_center_y"] = inc_y0 - md["beam_refl_center_x"] = refl_x0 - md["beam_refl_center_y"] = refl_y0 - - elif scat_geometry == "saxs" or "gi_waxs": - md["qr"] = qr - # md['qr_edge'] = qr_edge - md["qval_dict"] = qval_dict - md["beam_center_x"] = center[1] - md["beam_center_y"] = center[0] - - elif scat_geometry == "ang_saxs": - md["qval_dict_v"] = qval_dict_v - md["qval_dict_p"] = qval_dict_p - md["beam_center_x"] = center[1] - md["beam_center_y"] = center[0] - - md["beg"] = FD.beg - md["end"] = FD.end - md["metadata_file"] = data_dir + "md.csv-&-md.pkl" - psave_obj(md, data_dir + "uid=%s_md" % uid[:6]) # save the setup parameters - # psave_obj( md, data_dir + 'uid=%s_md'%uid ) #save the setup parameters - save_dict_csv(md, data_dir + "uid=%s_md.csv" % uid, "w") - - Exdt = {} - if scat_geometry == "gi_saxs": - for k, v in zip( - [ - "md", - "roi_mask", - "qval_dict", - "avg_img", - "mask", - "pixel_mask", - "imgsum", - "bad_frame_list", - "qr_1d_pds", - ], - [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, bad_frame_list, qr_1d_pds], - ): - Exdt[k] = v - elif scat_geometry == "saxs": - for k, v in zip( - [ - "md", - "q_saxs", - "iq_saxs", - "iqst", - "qt", - "roi_mask", - "qval_dict", - "avg_img", - "mask", - "pixel_mask", - "imgsum", - "bad_frame_list", - ], - [ - md, - q_saxs, - iq_saxs, - iqst, - qt, - roi_mask, - qval_dict, - avg_img, - mask, - pixel_mask, - imgsum, - bad_frame_list, - ], - ): - Exdt[k] = v - elif scat_geometry == "gi_waxs": - for k, v in zip( - ["md", "roi_mask", "qval_dict", "avg_img", "mask", "pixel_mask", "imgsum", "bad_frame_list"], - [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, bad_frame_list], - ): - Exdt[k] = v - elif scat_geometry == "ang_saxs": - for k, v in zip( - [ - "md", - "q_saxs", - "iq_saxs", - "roi_mask_v", - "roi_mask_p", - "qval_dict_v", - "qval_dict_p", - "avg_img", - "mask", - "pixel_mask", - "imgsum", - "bad_frame_list", - ], - [ - md, - q_saxs, - iq_saxs, - roi_mask_v, - roi_mask_p, - qval_dict_v, - qval_dict_p, - avg_img, - mask, - pixel_mask, - imgsum, - bad_frame_list, - ], - ): - Exdt[k] = v - - if run_waterfall: - Exdt["wat"] = wat - if run_t_ROI_Inten: - Exdt["times_roi"] = times_roi - Exdt["mean_int_sets"] = mean_int_sets + contrast_factorL = get_contrast( ML_val) + spec_km_pds = save_KM( spec_kmean, KL_val, ML_val, qs=qr, level_time=times_xsvs, uid=uid_ , path = data_dir ) + #print( spec_km_pds ) + + plot_g2_contrast( contrast_factorL, g2, times_xsvs, taus, qr, + vlim=[0.8,1.2], qth = qth_interest, uid=uid_,path = data_dir, legend_size=14) + + plot_g2_contrast( contrast_factorL, g2, times_xsvs, taus, qr, + vlim=[0.8,1.2], qth = None, uid=uid_,path = data_dir, legend_size=4) + + + + + + md['mask_file']= mask_path + mask_name + md['mask'] = mask + md['NOTEBOOK_FULL_PATH'] = None + md['good_start'] = good_start + md['bad_frame_list'] = bad_frame_list + md['avg_img'] = avg_img + md['roi_mask'] = roi_mask + + if scat_geometry == 'gi_saxs': + md['Qr'] = Qr + md['Qz'] = Qz + md['qval_dict'] = qval_dict + md['beam_center_x'] = inc_x0 + md['beam_center_y']= inc_y0 + md['beam_refl_center_x'] = refl_x0 + md['beam_refl_center_y'] = refl_y0 + + elif scat_geometry == 'saxs' or 'gi_waxs': + md['qr']= qr + #md['qr_edge'] = qr_edge + md['qval_dict'] = qval_dict + md['beam_center_x'] = center[1] + md['beam_center_y']= center[0] + + elif scat_geometry == 'ang_saxs': + md['qval_dict_v'] = qval_dict_v + md['qval_dict_p'] = qval_dict_p + md['beam_center_x'] = center[1] + md['beam_center_y']= center[0] + + + md['beg'] = FD.beg + md['end'] = FD.end + md['metadata_file'] = data_dir + 'md.csv-&-md.pkl' + psave_obj( md, data_dir + 'uid=%s_md'%uid[:6] ) #save the setup parameters + #psave_obj( md, data_dir + 'uid=%s_md'%uid ) #save the setup parameters + save_dict_csv( md, data_dir + 'uid=%s_md.csv'%uid, 'w') + + Exdt = {} + if scat_geometry == 'gi_saxs': + for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list', 'qr_1d_pds'], + [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list, qr_1d_pds] ): + Exdt[ k ] = v + elif scat_geometry == 'saxs': + for k,v in zip( ['md', 'q_saxs', 'iq_saxs','iqst','qt','roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [md, q_saxs, iq_saxs, iqst, qt,roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + Exdt[ k ] = v + elif scat_geometry == 'gi_waxs': + for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + Exdt[ k ] = v + elif scat_geometry == 'ang_saxs': + for k,v in zip( ['md', 'q_saxs', 'iq_saxs','roi_mask_v','roi_mask_p', + 'qval_dict_v','qval_dict_p','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [md, q_saxs, iq_saxs, roi_mask_v,roi_mask_p, + qval_dict_v,qval_dict_p, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + Exdt[ k ] = v + + if run_waterfall:Exdt['wat'] = wat + if run_t_ROI_Inten:Exdt['times_roi'] = times_roi;Exdt['mean_int_sets']=mean_int_sets if run_one_time: - if scat_geometry != "ang_saxs": - for k, v in zip(["taus", "g2", "g2_fit_paras"], [taus, g2, g2_fit_paras]): - Exdt[k] = v + if scat_geometry != 'ang_saxs': + for k,v in zip( ['taus','g2','g2_fit_paras'], [taus,g2,g2_fit_paras] ):Exdt[ k ] = v else: - for k, v in zip(["taus_v", "g2_v", "g2_fit_paras_v"], [taus_v, g2_v, g2_fit_paras_v]): - Exdt[k] = v - for k, v in zip(["taus_p", "g2_p", "g2_fit_paras_p"], [taus_p, g2_p, g2_fit_paras_p]): - Exdt[k] = v + for k,v in zip( ['taus_v','g2_v','g2_fit_paras_v'], [taus_v,g2_v,g2_fit_paras_v] ):Exdt[ k ] = v + for k,v in zip( ['taus_p','g2_p','g2_fit_paras_p'], [taus_p,g2_p,g2_fit_paras_p] ):Exdt[ k ] = v if run_two_time: - for k, v in zip(["tausb", "g2b", "g2b_fit_paras", "g12b"], [tausb, g2b, g2b_fit_paras, g12b]): - Exdt[k] = v + for k,v in zip( ['tausb','g2b','g2b_fit_paras', 'g12b'], [tausb,g2b,g2b_fit_paras,g12b] ):Exdt[ k ] = v if run_four_time: - for k, v in zip(["taus4", "g4"], [taus4, g4]): - Exdt[k] = v + for k,v in zip( ['taus4','g4'], [taus4,g4] ):Exdt[ k ] = v if run_xsvs: - for k, v in zip( - ["spec_kmean", "spec_pds", "times_xsvs", "spec_km_pds", "contrast_factorL"], - [spec_kmean, spec_pds, times_xsvs, spec_km_pds, contrast_factorL], - ): - Exdt[k] = v - - export_xpcs_results_to_h5("uid=%s_Res.h5" % md["uid"], data_dir, export_dict=Exdt) - # extract_dict = extract_xpcs_results_from_h5( filename = 'uid=%s_Res.h5'%md['uid'], import_dir = data_dir ) + for k,v in zip( ['spec_kmean','spec_pds','times_xsvs','spec_km_pds','contrast_factorL'], + [ spec_kmean,spec_pds,times_xsvs,spec_km_pds,contrast_factorL] ):Exdt[ k ] = v + + + export_xpcs_results_to_h5( 'uid=%s_Res.h5'%md['uid'], data_dir, export_dict = Exdt ) + #extract_dict = extract_xpcs_results_from_h5( filename = 'uid=%s_Res.h5'%md['uid'], import_dir = data_dir ) # Creat PDF Report - pdf_out_dir = os.path.join("/XF11ID/analysis/", CYCLE, username, "Results/") - pdf_filename = "XPCS_Analysis_Report_for_uid=%s%s.pdf" % (uid, pdf_version) + pdf_out_dir = os.path.join('/XF11ID/analysis/', CYCLE, username, 'Results/') + pdf_filename = "XPCS_Analysis_Report_for_uid=%s%s.pdf"%(uid,pdf_version) if run_xsvs: - pdf_filename = "XPCS_XSVS_Analysis_Report_for_uid=%s%s.pdf" % (uid, pdf_version) - # pdf_filename - - print(data_dir, uid[:6], pdf_out_dir, pdf_filename, username) - - make_pdf_report( - data_dir, - uid[:6], - pdf_out_dir, - pdf_filename, - username, - run_fit_form, - run_one_time, - run_two_time, - run_four_time, - run_xsvs, - run_dose=run_dose, - report_type=scat_geometry, - ) - ## Attach the PDF report to Olog + pdf_filename = "XPCS_XSVS_Analysis_Report_for_uid=%s%s.pdf"%(uid,pdf_version) + #pdf_filename + + print( data_dir, uid[:6], pdf_out_dir, pdf_filename, username ) + + make_pdf_report( data_dir, uid[:6], pdf_out_dir, pdf_filename, username, + run_fit_form, run_one_time, run_two_time, run_four_time, run_xsvs, run_dose=run_dose, + report_type= scat_geometry + ) + ## Attach the PDF report to Olog if att_pdf_report: - os.environ["HTTPS_PROXY"] = "https://proxy:8888" - os.environ["no_proxy"] = "cs.nsls2.local,localhost,127.0.0.1" - pname = pdf_out_dir + pdf_filename - atch = [Attachment(open(pname, "rb"))] + os.environ['HTTPS_PROXY'] = 'https://proxy:8888' + os.environ['no_proxy'] = 'cs.nsls2.local,localhost,127.0.0.1' + pname = pdf_out_dir + pdf_filename + atch=[ Attachment(open(pname, 'rb')) ] try: - update_olog_uid(uid=md["uid"], text="Add XPCS Analysis PDF Report", attachments=atch) + update_olog_uid( uid= md['uid'], text='Add XPCS Analysis PDF Report', attachments= atch ) except: - print( - "I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file." - % pname - ) + print("I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file."%pname) if show_plot: - plt.show() - # else: + plt.show() + #else: # plt.close('all') if clear_plot: - plt.close("all") + plt.close('all') if return_res: res = {} - if scat_geometry == "saxs": - for k, v in zip( - [ - "md", - "q_saxs", - "iq_saxs", - "iqst", - "qt", - "avg_img", - "mask", - "imgsum", - "bad_frame_list", - "roi_mask", - "qval_dict", - ], - [md, q_saxs, iq_saxs, iqst, qt, avg_img, mask, imgsum, bad_frame_list, roi_mask, qval_dict], - ): - res[k] = v - - elif scat_geometry == "ang_saxs": - for k, v in zip( - [ - "md", - "q_saxs", - "iq_saxs", - "roi_mask_v", - "roi_mask_p", - "qval_dict_v", - "qval_dict_p", - "avg_img", - "mask", - "pixel_mask", - "imgsum", - "bad_frame_list", - ], - [ - md, - q_saxs, - iq_saxs, - roi_mask_v, - roi_mask_p, - qval_dict_v, - qval_dict_p, - avg_img, - mask, - pixel_mask, - imgsum, - bad_frame_list, - ], - ): - res[k] = v - - elif scat_geometry == "gi_saxs": - for k, v in zip( - [ - "md", - "roi_mask", - "qval_dict", - "avg_img", - "mask", - "pixel_mask", - "imgsum", - "bad_frame_list", - "qr_1d_pds", - ], - [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, bad_frame_list, qr_1d_pds], - ): - res[k] = v - - elif scat_geometry == "gi_waxs": - for k, v in zip( - ["md", "roi_mask", "qval_dict", "avg_img", "mask", "pixel_mask", "imgsum", "bad_frame_list"], - [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, bad_frame_list], - ): - res[k] = v - + if scat_geometry == 'saxs': + for k,v in zip( ['md', 'q_saxs', 'iq_saxs','iqst','qt','avg_img','mask', 'imgsum','bad_frame_list','roi_mask', 'qval_dict'], + [ md, q_saxs, iq_saxs, iqst, qt, avg_img,mask,imgsum,bad_frame_list,roi_mask, qval_dict ] ): + res[ k ] = v + + elif scat_geometry == 'ang_saxs': + for k,v in zip( [ 'md', 'q_saxs', 'iq_saxs','roi_mask_v','roi_mask_p', + 'qval_dict_v','qval_dict_p','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [ md, q_saxs, iq_saxs, roi_mask_v,roi_mask_p, + qval_dict_v,qval_dict_p, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + res[ k ] = v + + elif scat_geometry == 'gi_saxs': + for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list', 'qr_1d_pds'], + [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list, qr_1d_pds] ): + res[ k ] = v + + elif scat_geometry == 'gi_waxs': + for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + res[ k ] = v + if run_waterfall: - res["wat"] = wat + res['wat'] = wat if run_t_ROI_Inten: - res["times_roi"] = times_roi - res["mean_int_sets"] = mean_int_sets + res['times_roi'] = times_roi; + res['mean_int_sets']=mean_int_sets if run_one_time: - if scat_geometry != "ang_saxs": - res["g2"] = g2 - res["taus"] = taus + if scat_geometry != 'ang_saxs': + res['g2'] = g2 + res['taus']=taus else: - res["g2_p"] = g2_p - res["taus_p"] = taus_p - res["g2_v"] = g2_v - res["taus_v"] = taus_v - + res['g2_p'] = g2_p + res['taus_p']=taus_p + res['g2_v'] = g2_v + res['taus_v']=taus_v + if run_two_time: - res["tausb"] = tausb - res["g12b"] = g12b - res["g2b"] = g2b + res['tausb'] = tausb + res['g12b'] = g12b + res['g2b'] = g2b if run_four_time: - res["g4"] = g4 - res["taus4"] = taus4 + res['g4']= g4 + res['taus4']=taus4 if run_xsvs: - res["spec_kmean"] = spec_kmean - res["spec_pds"] = spec_pds - res["contrast_factorL"] = contrast_factorL - res["times_xsvs"] = times_xsvs + res['spec_kmean']=spec_kmean + res['spec_pds']= spec_pds + res['contrast_factorL'] = contrast_factorL + res['times_xsvs']= times_xsvs return res + +#uid = '3ff4ee' +#run_xpcs_xsvs_single( uid, run_pargs ) + + + -# uid = '3ff4ee' -# run_xpcs_xsvs_single( uid, run_pargs ) diff --git a/pyCHX/xpcs_timepixel.py b/pyCHX/xpcs_timepixel.py index 85080c5..286141e 100644 --- a/pyCHX/xpcs_timepixel.py +++ b/pyCHX/xpcs_timepixel.py @@ -1,907 +1,830 @@ -import os +from numpy import pi,sin,arctan,sqrt,mgrid,where,shape,exp,linspace,std,arange +from numpy import power,log,log10,array,zeros,ones,reshape,mean,histogram,round,int_ +from numpy import indices,hypot,digitize,ma,histogramdd,apply_over_axes,sum +from numpy import around,intersect1d, ravel, unique,hstack,vstack,zeros_like +from numpy import save, load, dot +from numpy.linalg import lstsq +from numpy import polyfit,poly1d; +import sys,os import pickle as pkl -import struct -import sys -# from Init_for_Timepix import * # the setup file +import matplotlib.pyplot as plt +#from Init_for_Timepix import * # the setup file import time - -import matplotlib.pyplot as plt -import numpy as np + +import struct +import numpy as np +from tqdm import tqdm import pandas as pds -from numpy import ( - apply_over_axes, - arange, - arctan, - around, - array, - digitize, - dot, - exp, - histogram, - histogramdd, - hstack, - hypot, - indices, - int_, - intersect1d, - linspace, - load, - log, - log10, - ma, - mean, - mgrid, - ones, - pi, - poly1d, - polyfit, - power, - ravel, - reshape, - round, - save, - shape, - sin, - sqrt, - std, - sum, - unique, - vstack, - where, - zeros, - zeros_like, -) -from numpy.linalg import lstsq -from tqdm import tqdm - -from pyCHX.chx_compress import Multifile, go_through_FD, pass_FD from pyCHX.chx_libs import multi_tau_lags +from pyCHX.chx_compress import Multifile, go_through_FD, pass_FD + -def get_timepixel_data(data_dir, filename, time_unit=1): - """give a csv file of a timepixel data, return x,y,t + +def get_timepixel_data( data_dir, filename, time_unit= 1 ): + '''give a csv file of a timepixel data, return x,y,t x, pos_x in pixel y, pos_y in pixel t, arrival time - time_unit, t*time_unit will convert to second, in reality, this value is 6.1e-12 - return x,y,t (in second, starting from zero) - - """ - data = pds.read_csv(data_dir + filename) + time_unit, t*time_unit will convert to second, in reality, this value is 6.1e-12 + return x,y,t (in second, starting from zero) + + ''' + data = pds.read_csv( data_dir + filename ) #'#Col', ' #Row', ' #ToA', - # return np.array( data['Col'] ), np.array(data['Row']), np.array(data['GlobalTimeFine']) #*6.1 #in ps - if time_unit != 1: + #return np.array( data['Col'] ), np.array(data['Row']), np.array(data['GlobalTimeFine']) #*6.1 #in ps + if time_unit !=1: try: - x, y, t = np.array(data["#Col"]), np.array(data["#Row"]), np.array(data["#ToA"]) * time_unit + x,y,t=np.array( data['#Col'] ), np.array(data['#Row']), np.array(data['#ToA'] ) * time_unit except: - x, y, t = np.array(data["#Col"]), np.array(data[" #Row"]), np.array(data[" #ToA"]) * time_unit + x,y,t=np.array( data['#Col'] ), np.array(data[' #Row']), np.array(data[' #ToA'] ) * time_unit else: try: - x, y, t = np.array(data["#Col"]), np.array(data["#Row"]), np.array(data["#ToA"]) + x,y,t=np.array( data['#Col'] ), np.array(data['#Row']), np.array(data['#ToA'] ) except: - x, y, t = np.array(data["#Col"]), np.array(data[" #Row"]), np.array(data[" #ToA"]) - return x, y, t - t.min() # * 25/4096. #in ns - - -def get_pvlist_from_post(p, t, binstep=100, detx=256, dety=256): - """YG.DEV@CHX Nov, 2017 to get a pos, val list of phonton hitting detector by giving - p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin - The most important function for timepix - Input: - p: array, int64, coordinate-x * det_x + coordinate-y - t: list, int64, photon hit time - binstep: int, binstep (in t unit) period - detx,dety: int/int, the detector size in x and y - Output: - positions: int array, (x*detx +y) - vals: int array, counts of that positions - counts: int array, counts of that positions in each binstep - """ - v = (t - t[0]) // binstep - L = np.max(v) + 1 - arr = np.ravel_multi_index([p, v], [detx * dety, L]) - uval, ind, count = np.unique(arr, return_counts=True, return_index=True) - ind2 = np.lexsort((p[ind], v[ind])) + x,y,t=np.array( data['#Col'] ), np.array(data[' #Row']), np.array(data[' #ToA'] ) + return x,y, t-t.min() #* 25/4096. #in ns + + +def get_pvlist_from_post( p, t, binstep=100, detx=256, dety=256 ): + '''YG.DEV@CHX Nov, 2017 to get a pos, val list of phonton hitting detector by giving + p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + p: array, int64, coordinate-x * det_x + coordinate-y + t: list, int64, photon hit time + binstep: int, binstep (in t unit) period + detx,dety: int/int, the detector size in x and y + Output: + positions: int array, (x*detx +y) + vals: int array, counts of that positions + counts: int array, counts of that positions in each binstep + ''' + v = ( t - t[0])//binstep + L= np.max( v ) + 1 + arr = np.ravel_multi_index( [ p, v ], [detx * dety,L ] ) + uval, ind, count = np.unique( arr, return_counts=True, return_index=True) + ind2 = np.lexsort( ( p[ind], v[ind] ) ) ps = (p[ind])[ind2] vs = count[ind2] cs = np.bincount(v[ind]) - return ps, vs, cs - - -def histogram_pt(p, t, binstep=100, detx=256, dety=256): - """YG.DEV@CHX Nov, 2017 to get a histogram of phonton counts by giving - p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin - The most important function for timepix - Input: - p: coordinate-x * det_x + coordinate-y - t: photon hit time - bin t in binstep (in t unit) period - detx,dety: the detector size in x and y - Output: - the hitorgram of photons with bins as binstep (in time unit) - """ - L = np.max((t - t[0]) // binstep) + 1 - # print(L,x,y, (t-t[0])//binstep) - arr = np.ravel_multi_index([p, (t - t[0]) // binstep], [detx * dety, L]) - M, N = arr.max(), arr.min() - da = np.zeros([detx * dety, L]) - da.flat[np.arange(N, M)] = np.bincount(arr - N) + return ps,vs,cs + + + +def histogram_pt( p, t, binstep=100, detx=256, dety=256 ): + '''YG.DEV@CHX Nov, 2017 to get a histogram of phonton counts by giving + p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + p: coordinate-x * det_x + coordinate-y + t: photon hit time + bin t in binstep (in t unit) period + detx,dety: the detector size in x and y + Output: + the hitorgram of photons with bins as binstep (in time unit) + ''' + L= np.max( (t-t[0])//binstep ) + 1 + #print(L,x,y, (t-t[0])//binstep) + arr = np.ravel_multi_index( [ p, (t-t[0])//binstep ], [detx * dety,L ] ) + M,N = arr.max(),arr.min() + da = np.zeros( [detx * dety, L ] ) + da.flat[np.arange(N, M ) ] = np.bincount( arr- N ) + return da + +def histogram_xyt( x, y, t, binstep=100, detx=256, dety=256 ): + '''YG.DEV@CHX Mar, 2017 to get a histogram of phonton counts by giving + x (photon hit pos_x), y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + x: coordinate-x + y: coordinate-y + t: photon hit time + bin t in binstep (in t unit) period + detx,dety: the detector size in x and y + Output: + the hitorgram of photons with bins as binstep (in time unit) + + + ''' + L= np.max( (t-t[0])//binstep ) + 1 + #print(L,x,y, (t-t[0])//binstep) + arr = np.ravel_multi_index( [x, y, (t-t[0])//binstep ], [detx, dety,L ] ) + M,N = arr.max(),arr.min() + da = np.zeros( [detx, dety, L ] ) + da.flat[np.arange(N, M ) ] = np.bincount( arr- N ) return da -def histogram_xyt(x, y, t, binstep=100, detx=256, dety=256): - """YG.DEV@CHX Mar, 2017 to get a histogram of phonton counts by giving - x (photon hit pos_x), y (photon hit pos_y), t (photon hit time), and the time bin - The most important function for timepix - Input: - x: coordinate-x - y: coordinate-y - t: photon hit time - bin t in binstep (in t unit) period - detx,dety: the detector size in x and y - Output: - the hitorgram of photons with bins as binstep (in time unit) - - - """ - L = np.max((t - t[0]) // binstep) + 1 - # print(L,x,y, (t-t[0])//binstep) - arr = np.ravel_multi_index([x, y, (t - t[0]) // binstep], [detx, dety, L]) - M, N = arr.max(), arr.min() - da = np.zeros([detx, dety, L]) - da.flat[np.arange(N, M)] = np.bincount(arr - N) - return da - def get_FD_end_num(FD, maxend=1e10): N = maxend - for i in range(0, int(maxend)): + for i in range(0,int(maxend)): try: FD.seekimg(i) except: - N = i + N = i break FD.seekimg(0) return N - -def compress_timepix_data( - pos, t, tbins, filename=None, md=None, force_compress=False, nobytes=2, with_pickle=True -): - """YG.Dev@CHX Nov 20, 2017 - Compress the timepixeldata, in a format of x, y, t - x: pos_x in pixel - y: pos_y in pixel - timepix3 det size 256, 256 - TODOLIST: mask is not working now - Input: - pos: 256 * y + x - t: arrival time in sec - filename: the output filename - md: a dict to describle the data info - force_compress: if False, - if already compressed, just it - else: compress - if True, compress and, if exist, overwrite the already-coompress data - Return: - avg_img, imgsum, N (frame number) - - """ +def compress_timepix_data( pos, t, tbins, filename=None, md=None, force_compress=False, nobytes=2, + with_pickle=True ): + + ''' YG.Dev@CHX Nov 20, 2017 + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * y + x + t: arrival time in sec + filename: the output filename + md: a dict to describle the data info + force_compress: if False, + if already compressed, just it + else: compress + if True, compress and, if exist, overwrite the already-coompress data + Return: + avg_img, imgsum, N (frame number) + + ''' if filename is None: - filename = "/XF11ID/analysis/Compressed_Data" + "/timpix_uid_%s.cmp" % md["uid"] - + filename= '/XF11ID/analysis/Compressed_Data' +'/timpix_uid_%s.cmp'%md['uid'] + if force_compress: - print("Create a new compress file with filename as :%s." % filename) - return init_compress_timepix_data( - pos, t, tbins, filename=filename, md=md, nobytes=nobytes, with_pickle=with_pickle - ) + print ("Create a new compress file with filename as :%s."%filename) + return init_compress_timepix_data( pos, t, tbins, filename=filename, md=md, nobytes= nobytes, + with_pickle=with_pickle ) else: - if not os.path.exists(filename): - print("Create a new compress file with filename as :%s." % filename) - return init_compress_timepix_data( - pos, t, tbins, filename=filename, md=md, nobytes=nobytes, with_pickle=with_pickle - ) - else: - print("Using already created compressed file with filename as :%s." % filename) - return pkl.load(open(filename + ".pkl", "rb")) - - # FD = Multifile(filename, 0, int(1e25) ) - # return get_FD_end_num(FD) - - -def create_timepix_compress_header(md, filename, nobytes=2, bins=1): - """ + if not os.path.exists( filename ): + print ("Create a new compress file with filename as :%s."%filename) + return init_compress_timepix_data( pos, t, tbins, filename=filename, md=md, nobytes= nobytes, + with_pickle=with_pickle ) + else: + print ("Using already created compressed file with filename as :%s."%filename) + return pkl.load( open(filename + '.pkl', 'rb' ) ) + + #FD = Multifile(filename, 0, int(1e25) ) + #return get_FD_end_num(FD) + + + + + +def create_timepix_compress_header( md, filename, nobytes=2, bins=1 ): + ''' Create the head for a compressed eiger data, this function is for parallel compress - """ - fp = open(filename, "wb") - # Make Header 1024 bytes - # md = images.md - if bins != 1: - nobytes = 8 - Header = struct.pack( - "@16s8d7I916x", - b"Version-COMPtpx1", - md["beam_center_x"], - md["beam_center_y"], - md["count_time"], - md["detector_distance"], - md["frame_time"], - md["incident_wavelength"], - md["x_pixel_size"], - md["y_pixel_size"], - nobytes, - md["sy"], - md["sx"], - 0, - 256, - 0, - 256, - ) - fp.write(Header) - fp.close() - - -def init_compress_timepix_data(pos, t, binstep, filename, mask=None, md=None, nobytes=2, with_pickle=True): - """YG.Dev@CHX Nov 19, 2017 with optimal algorithm by using complex index techniques - - Compress the timepixeldata, in a format of x, y, t - x: pos_x in pixel - y: pos_y in pixel - timepix3 det size 256, 256 - TODOLIST: mask is not working now - Input: - pos: 256 * x + y #can't be 256*x + y - t: arrival time in sec - binstep: int, binstep (in t unit) period - filename: the output filename - md: a dict to describle the data info - Return: - N (frame number) - - """ - fp = open(filename, "wb") + ''' + fp = open( filename,'wb' ) + #Make Header 1024 bytes + #md = images.md + if bins!=1: + nobytes=8 + Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + + nobytes, md['sy'], md['sx'], + 0,256, + 0,256 + ) + fp.write( Header) + fp.close() + + +def init_compress_timepix_data( pos, t, binstep, filename, mask=None, + md = None, nobytes=2,with_pickle=True ): + ''' YG.Dev@CHX Nov 19, 2017 with optimal algorithm by using complex index techniques + + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * x + y #can't be 256*x + y + t: arrival time in sec + binstep: int, binstep (in t unit) period + filename: the output filename + md: a dict to describle the data info + Return: + N (frame number) + + ''' + fp = open( filename,'wb' ) if md is None: - md = {} - md["beam_center_x"] = 0 - md["beam_center_y"] = 0 - md["count_time"] = 0 - md["detector_distance"] = 0 - md["frame_time"] = 0 - md["incident_wavelength"] = 0 - md["x_pixel_size"] = 45 - md["y_pixel_size"] = 45 - # nobytes = 2 - md["sx"] = 256 - md["sy"] = 256 - - # TODList: for different detector using different md structure, March 2, 2017, - - # 8d include, + md={} + md['beam_center_x'] = 0 + md['beam_center_y'] = 0 + md['count_time'] = 0 + md['detector_distance'] = 0 + md['frame_time'] = 0 + md['incident_wavelength'] =0 + md['x_pixel_size'] = 45 + md['y_pixel_size'] = 45 + #nobytes = 2 + md['sx'] = 256 + md['sy'] = 256 + + + #TODList: for different detector using different md structure, March 2, 2017, + + #8d include, #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) - Header = struct.pack( - "@16s8d7I916x", - b"Version-COMPtpx1", - md["beam_center_x"], - md["beam_center_y"], - md["count_time"], - md["detector_distance"], - md["frame_time"], - md["incident_wavelength"], - md["x_pixel_size"], - md["y_pixel_size"], - nobytes, - md["sy"], - md["sx"], - 0, - 256, - 0, - 256, - ) - fp.write(Header) - - N_ = np.int(np.ceil((t.max() - t.min()) / binstep)) - print("There are %s frames to be compressed..." % (N_ - 1)) - - ps, vs, cs = get_pvlist_from_post(pos, t, binstep, detx=md["sx"], dety=md["sy"]) - N = len(cs) - 1 # the last one might don't have full number for bings, so kick off + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + + nobytes, md['sy'], md['sx'], + 0,256, + 0,256 + ) + fp.write( Header) + + N_ = np.int( np.ceil( (t.max() -t.min()) / binstep ) ) + print('There are %s frames to be compressed...'%(N_-1)) + + ps,vs,cs = get_pvlist_from_post( pos, t, binstep, detx= md['sx'], dety= md['sy'] ) + N = len(cs) - 1 #the last one might don't have full number for bings, so kick off css = np.cumsum(cs) - imgsum = np.zeros(N) + imgsum = np.zeros( N ) good_count = 0 - avg_img = np.zeros( - [md["sy"], md["sx"]], dtype=np.float64 - ) # changed depreciated np.float to np.float64 LW @06/11/2023 - - for i in tqdm(range(0, N)): - if i == 0: + avg_img = np.zeros( [ md['sy'], md['sx'] ], dtype= np.float64 ) # changed depreciated np.float to np.float64 LW @06/11/2023 + + for i in tqdm( range(0,N) ): + if i ==0: ind1 = 0 ind2 = css[i] else: - ind1 = css[i - 1] - ind2 = css[i] - # print( ind1, ind2 ) - good_count += 1 - psi = ps[ind1:ind2] - vsi = vs[ind1:ind2] - dlen = cs[i] - imgsum[i] = vsi.sum() - np.ravel(avg_img)[psi] += vsi - # print(vs.sum()) - fp.write(struct.pack("@I", dlen)) - fp.write(struct.pack("@{}i".format(dlen), *psi)) - fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *vsi)) + ind1 = css[i-1] + ind2 = css[i] + #print( ind1, ind2 ) + good_count +=1 + psi = ps[ ind1:ind2 ] + vsi = vs[ ind1:ind2 ] + dlen = cs[i] + imgsum[i] = vsi.sum() + np.ravel(avg_img )[psi] += vsi + #print(vs.sum()) + fp.write( struct.pack( '@I', dlen )) + fp.write( struct.pack( '@{}i'.format( dlen), *psi)) + fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *vsi)) fp.close() avg_img /= good_count - # return N -1 - if with_pickle: - pkl.dump([avg_img, imgsum, N], open(filename + ".pkl", "wb")) - return avg_img, imgsum, N - - -def init_compress_timepix_data_light_duty( - pos, t, binstep, filename, mask=None, md=None, nobytes=2, with_pickle=True -): - """YG.Dev@CHX Nov 19, 2017 - Compress the timepixeldata, in a format of x, y, t - x: pos_x in pixel - y: pos_y in pixel - timepix3 det size 256, 256 - TODOLIST: mask is not working now - Input: - pos: 256 * x + y #can't be 256*x + y - t: arrival time in sec - filename: the output filename - md: a dict to describle the data info - Return: - N (frame number) - - """ - fp = open(filename, "wb") + #return N -1 + if with_pickle: + pkl.dump( [ avg_img, imgsum, N ], open(filename + '.pkl', 'wb' ) ) + return avg_img, imgsum, N + + + + + +def init_compress_timepix_data_light_duty( pos, t, binstep, filename, mask=None, + md = None, nobytes=2,with_pickle=True ): + ''' YG.Dev@CHX Nov 19, 2017 + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * x + y #can't be 256*x + y + t: arrival time in sec + filename: the output filename + md: a dict to describle the data info + Return: + N (frame number) + + ''' + fp = open( filename,'wb' ) if md is None: - md = {} - md["beam_center_x"] = 0 - md["beam_center_y"] = 0 - md["count_time"] = 0 - md["detector_distance"] = 0 - md["frame_time"] = 0 - md["incident_wavelength"] = 0 - md["x_pixel_size"] = 45 - md["y_pixel_size"] = 45 - # nobytes = 2 - md["sx"] = 256 - md["sy"] = 256 - - # TODList: for different detector using different md structure, March 2, 2017, - - # 8d include, + md={} + md['beam_center_x'] = 0 + md['beam_center_y'] = 0 + md['count_time'] = 0 + md['detector_distance'] = 0 + md['frame_time'] = 0 + md['incident_wavelength'] =0 + md['x_pixel_size'] = 45 + md['y_pixel_size'] = 45 + #nobytes = 2 + md['sx'] = 256 + md['sy'] = 256 + + + #TODList: for different detector using different md structure, March 2, 2017, + + #8d include, #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) - Header = struct.pack( - "@16s8d7I916x", - b"Version-COMPtpx1", - md["beam_center_x"], - md["beam_center_y"], - md["count_time"], - md["detector_distance"], - md["frame_time"], - md["incident_wavelength"], - md["x_pixel_size"], - md["y_pixel_size"], - nobytes, - md["sy"], - md["sx"], - 0, - 256, - 0, - 256, - ) - fp.write(Header) - - tx = np.arange(t.min(), t.max(), binstep) - N = len(tx) - imgsum = np.zeros(N - 1) - print("There are %s frames to be compressed..." % (N - 1)) + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + + nobytes, md['sy'], md['sx'], + 0,256, + 0,256 + ) + fp.write( Header) + + tx = np.arange( t.min(), t.max(), binstep ) + N = len(tx) + imgsum = np.zeros( N-1 ) + print('There are %s frames to be compressed...'%(N-1)) good_count = 0 - avg_img = np.zeros( - [md["sy"], md["sx"]], dtype=np.float64 - ) # changed depreciated np.float to np.float64 LW @06/11/2023 - for i in tqdm(range(N - 1)): - ind1 = np.argmin(np.abs(tx[i] - t)) - ind2 = np.argmin(np.abs(tx[i + 1] - t)) - # print( 'N=%d:'%i, ind1, ind2 ) - p_i = pos[ind1:ind2] - ps, vs = np.unique(p_i, return_counts=True) - np.ravel(avg_img)[ps] += vs - good_count += 1 - dlen = len(ps) - imgsum[i] = vs.sum() - # print(vs.sum()) - fp.write(struct.pack("@I", dlen)) - fp.write(struct.pack("@{}i".format(dlen), *ps)) - fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *vs)) + avg_img = np.zeros( [ md['sy'], md['sx'] ], dtype= np.float64 ) # changed depreciated np.float to np.float64 LW @06/11/2023 + for i in tqdm( range(N-1) ): + ind1 = np.argmin( np.abs( tx[i] - t) ) + ind2 = np.argmin( np.abs( tx[i+1] - t ) ) + #print( 'N=%d:'%i, ind1, ind2 ) + p_i = pos[ind1: ind2] + ps,vs = np.unique( p_i, return_counts= True ) + np.ravel(avg_img )[ps] += vs + good_count +=1 + dlen = len(ps) + imgsum[i] = vs.sum() + #print(vs.sum()) + fp.write( struct.pack( '@I', dlen )) + fp.write( struct.pack( '@{}i'.format( dlen), *ps)) + fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *vs)) fp.close() avg_img /= good_count - # return N -1 - if with_pickle: - pkl.dump([avg_img, imgsum, N - 1], open(filename + ".pkl", "wb")) - return avg_img, imgsum, N - 1 - - -def compress_timepix_data_old(data_pixel, filename, rois=None, md=None, nobytes=2): - """ - Compress the timepixeldata - md: a dict to describle the data info - rois: [y1,y2, x1, x2] - - """ - fp = open(filename, "wb") + #return N -1 + if with_pickle: + pkl.dump( [ avg_img, imgsum, N-1 ], open(filename + '.pkl', 'wb' ) ) + return avg_img, imgsum, N-1 + + + + + + +def compress_timepix_data_old( data_pixel, filename, rois=None, + md = None, nobytes=2 ): + ''' + Compress the timepixeldata + md: a dict to describle the data info + rois: [y1,y2, x1, x2] + + ''' + fp = open( filename,'wb' ) if md is None: - md = {} - md["beam_center_x"] = 0 - md["beam_center_y"] = 0 - md["count_time"] = 0 - md["detector_distance"] = 0 - md["frame_time"] = 0 - md["incident_wavelength"] = 0 - md["x_pixel_size"] = 25 - md["y_pixel_size"] = 25 - # nobytes = 2 - md["sx"] = 256 - md["sy"] = 256 - md["roi_rb"] = 0 - md["roi_re"] = md["sy"] - md["roi_cb"] = 0 - md["roi_ce"] = md["sx"] + md={} + md['beam_center_x'] = 0 + md['beam_center_y'] = 0 + md['count_time'] = 0 + md['detector_distance'] = 0 + md['frame_time'] = 0 + md['incident_wavelength'] =0 + md['x_pixel_size'] =25 + md['y_pixel_size'] =25 + #nobytes = 2 + md['sx'] = 256 + md['sy'] = 256 + md['roi_rb']= 0 + md['roi_re']= md['sy'] + md['roi_cb']= 0 + md['roi_ce']= md['sx'] if rois is not None: - md["roi_rb"] = rois[2] - md["roi_re"] = rois[3] - md["roi_cb"] = rois[1] - md["roi_ce"] = rois[0] - - md["sy"] = md["roi_cb"] - md["roi_ce"] - md["sx"] = md["roi_re"] - md["roi_rb"] - - # TODList: for different detector using different md structure, March 2, 2017, - - # 8d include, + md['roi_rb']= rois[2] + md['roi_re']= rois[3] + md['roi_cb']= rois[1] + md['roi_ce']= rois[0] + + md['sy'] = md['roi_cb'] - md['roi_ce'] + md['sx'] = md['roi_re'] - md['roi_rb'] + + #TODList: for different detector using different md structure, March 2, 2017, + + #8d include, #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) - Header = struct.pack( - "@16s8d7I916x", - b"Version-COMPtpx1", - md["beam_center_x"], - md["beam_center_y"], - md["count_time"], - md["detector_distance"], - md["frame_time"], - md["incident_wavelength"], - md["x_pixel_size"], - md["y_pixel_size"], - nobytes, - md["sy"], - md["sx"], - md["roi_rb"], - md["roi_re"], - md["roi_cb"], - md["roi_ce"], - ) - - fp.write(Header) - fp.write(data_pixel) - - + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + + nobytes, md['sy'], md['sx'], + md['roi_rb'], md['roi_re'],md['roi_cb'],md['roi_ce'] + ) + + fp.write( Header) + fp.write( data_pixel ) + + + class Get_TimePixel_Arrayc(object): - """ - a class to get intested pixels from a images sequence, - load ROI of all images into memory + ''' + a class to get intested pixels from a images sequence, + load ROI of all images into memory get_data: to get a 2-D array, shape as (len(images), len(pixellist)) - - One example: + + One example: data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() - """ - - def __init__( - self, pos, hitime, tbins, pixelist, beg=None, end=None, norm=None, flat_correction=None, detx=256, dety=256 - ): - """ + ''' + + def __init__(self, pos, hitime, tbins, pixelist, beg=None, end=None, norm=None,flat_correction=None, + detx = 256, dety = 256): + ''' indexable: a images sequences pixelist: 1-D array, interest pixel list #flat_correction, normalized by flatfield #norm, normalized by total intensity, like a incident beam intensity - """ + ''' self.hitime = hitime - self.tbins = tbins - self.tx = np.arange(self.hitime.min(), self.hitime.max(), self.tbins) - N = len(self.tx) + self.tbins = tbins + self.tx = np.arange( self.hitime.min(), self.hitime.max(), self.tbins ) + N = len(self.tx) if beg is None: beg = 0 if end is None: end = N - + self.beg = beg - self.end = end - self.length = self.end - self.beg + self.end = end + self.length = self.end - self.beg self.pos = pos - self.pixelist = pixelist - self.norm = norm + self.pixelist = pixelist + self.norm = norm self.flat_correction = flat_correction self.detx = detx self.dety = dety - - def get_data(self): - """ + + def get_data(self ): + ''' To get intested pixels array Return: 2-D array, shape as (len(images), len(pixellist)) - """ + ''' norm = self.norm - data_array = np.zeros([self.length - 1, len(self.pixelist)]) - print(data_array.shape) - - # fra_pix = np.zeros_like( pixelist, dtype=np.float64) - timg = np.zeros(self.detx * self.dety, dtype=np.int32) - timg[self.pixelist] = np.arange(1, len(self.pixelist) + 1) - n = 0 + data_array = np.zeros([ self.length-1,len(self.pixelist)]) + print( data_array.shape) + + #fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( self.detx * self.dety, dtype=np.int32 ) + timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) + n=0 tx = self.tx N = len(self.tx) - print("The Produced Array Length is %d." % (N - 1)) + print( 'The Produced Array Length is %d.'%(N-1) ) flat_correction = self.flat_correction - # imgsum = np.zeros( N ) - for i in tqdm(range(N - 1)): - ind1 = np.argmin(np.abs(tx[i] - self.hitime)) - ind2 = np.argmin(np.abs(tx[i + 1] - self.hitime)) - # print( 'N=%d:'%i, ind1, ind2 ) - p_i = self.pos[ind1:ind2] - pos, val = np.unique(p_i, return_counts=True) - # print( val.sum() ) - w = np.where(timg[pos])[0] - pxlist = timg[pos[w]] - 1 - # print( val[w].sum() ) - # fra_pix[ pxlist] = v[w] + #imgsum = np.zeros( N ) + for i in tqdm( range(N-1) ): + ind1 = np.argmin( np.abs( tx[i] - self.hitime ) ) + ind2 = np.argmin( np.abs( tx[i+1] - self.hitime ) ) + #print( 'N=%d:'%i, ind1, ind2 ) + p_i = self.pos[ind1: ind2] + pos,val = np.unique( p_i, return_counts= True ) + #print( val.sum() ) + w = np.where( timg[pos] )[0] + pxlist = timg[ pos[w] ] -1 + #print( val[w].sum() ) + #fra_pix[ pxlist] = v[w] if flat_correction is not None: - # normalized by flatfield - data_array[n][pxlist] = val[w] + #normalized by flatfield + data_array[n][ pxlist] = val[w] else: - data_array[n][pxlist] = val[w] / flat_correction[pxlist] # -1.0 - if norm is not None: - # normalized by total intensity, like a incident beam intensity - data_array[n][pxlist] /= norm[i] - n += 1 - return data_array - - -def apply_timepix_mask(x, y, t, roi): - y1, y2, x1, x2 = roi - w = (x < x2) & (x >= x1) & (y < y2) & (y >= y1) - return x[w], y[w], t[w] - - -def get_timepixel_data_from_series(data_dir, filename_prefix, total_filenum=72, colms=int(1e5)): - x = np.zeros(total_filenum * colms) - y = np.zeros(total_filenum * colms) - t = zeros(total_filenum * colms) - for n in range(total_filenum): - filename = filename_prefix + "_%s.csv" % n - data = get_timepixel_data(data_dir, filename) - if n != total_filenum - 1: - (x[n * colms : (n + 1) * colms], y[n * colms : (n + 1) * colms], t[n * colms : (n + 1) * colms]) = ( - data[0], - data[1], - data[2], - ) + data_array[n][ pxlist] = val[w] / flat_correction[pxlist] #-1.0 + if norm is not None: + #normalized by total intensity, like a incident beam intensity + data_array[n][ pxlist] /= norm[i] + n += 1 + return data_array + + + +def apply_timepix_mask( x,y,t, roi ): + y1,y2, x1,x2 = roi + w = (x < x2) & (x >= x1) & (y < y2) & (y >= y1) + return x[w],y[w], t[w] + + + + + + +def get_timepixel_data_from_series( data_dir, filename_prefix, + total_filenum = 72, colms = int(1e5) ): + x = np.zeros( total_filenum * colms ) + y = np.zeros( total_filenum * colms ) + t = zeros( total_filenum * colms ) + for n in range( total_filenum): + filename = filename_prefix + '_%s.csv'%n + data = get_timepixel_data( data_dir, filename ) + if n!=total_filenum-1: + ( x[n*colms: (n+1)*colms ], y[n*colms: (n+1)*colms ], t[n*colms: (n+1)*colms ] )= ( + data[0], data[1], data[2]) else: - # print( filename_prefix + '_%s.csv'%n ) + #print( filename_prefix + '_%s.csv'%n ) ln = len(data[0]) - # print( ln ) - (x[n * colms : n * colms + ln], y[n * colms : n * colms + ln], t[n * colms : n * colms + ln]) = ( - data[0], - data[1], - data[2], - ) - - return x[: n * colms + ln], y[: n * colms + ln], t[: n * colms + ln] - - -def get_timepixel_avg_image(x, y, t, det_shape=[256, 256], delta_time=None): - """YG.Dev@CHX, 2016 + #print( ln ) + ( x[n*colms: n*colms + ln ], y[n*colms: n*colms + ln ], t[n*colms: n*colms + ln ] )= ( + data[0], data[1], data[2]) + + return x[:n*colms + ln] ,y[:n*colms + ln],t[:n*colms + ln] + + + +def get_timepixel_avg_image( x,y,t, det_shape = [256, 256], delta_time = None ): + '''YG.Dev@CHX, 2016 give x,y, t data to get image in a period of delta_time (in second) x, pos_x in pixel y, pos_y in pixel t, arrival time - - - """ - t0 = t.min() - tm = t.max() - + + + ''' + t0 = t.min() + tm = t.max() + if delta_time is not None: - delta_time *= 1e12 + delta_time *=1e12 if delta_time > tm: - delta_time = tm + delta_time = tm else: delta_time = t.max() - # print( delta_time) - t_ = t[t < delta_time] - x_ = x[: len(t_)] - y_ = y[: len(t_)] - - img = np.zeros(det_shape, dtype=np.int32) - pixlist = x_ * det_shape[0] + y_ - his = np.histogram(pixlist, bins=np.arange(det_shape[0] * det_shape[1] + 1))[0] - np.ravel(img)[:] = his - print("The max photon count is %d." % img.max()) + #print( delta_time) + t_ = t[t 10: # print progress... - if n % (noframes / 10) == 0: + '''Do correlation for xyt file, + noframes is the frame number to be correlated + ''' + start_time = time.time() + for n in range(1,noframes +1 ): # the main loop for correlator + gg2 = self.autocor_xytframe( n ) + if n==1:g2=zeros_like( gg2 ) + g2 += ( gg2 - g2 )/ float( n ) #average g2 + #print n + if noframes>10: #print progress... + if n %( noframes / 10) ==0: sys.stdout.write("#") - sys.stdout.flush() + sys.stdout.flush() elapsed_time = time.time() - start_time - print("Total time: %.2f min" % (elapsed_time / 60.0)) + print ( 'Total time: %.2f min' %(elapsed_time/60.) ) return g2 - def plot(self, y, x=None): - """a simple plot""" - if x is None: - x = arange(len(y)) - plt.plot(x, y, "ro", ls="-") + + def plot(self, y,x=None): + '''a simple plot''' + if x is None:x=arange( len(y)) + plt.plot(x,y,'ro', ls='-') plt.show() - def g2_to_pds(self, dly, g2, tscale=None): - """convert g2 to a pandas frame""" - if len(g2.shape) == 1: - g2 = g2.reshape([len(g2), 1]) + + def g2_to_pds(self, dly, g2, tscale = None): + '''convert g2 to a pandas frame''' + if len(g2.shape)==1:g2=g2.reshape( [len(g2),1] ) tn, qn = g2.shape - tindex = xrange(tn) - qcolumns = ["t"] + ["g2"] - if tscale is None: - tscale = 1.0 - g2t = hstack([dly[:tn].reshape(tn, 1) * tscale, g2]) - g2p = pd.DataFrame(data=g2t, index=tindex, columns=qcolumns) + tindex=xrange( tn ) + qcolumns = ['t'] + [ 'g2' ] + if tscale is None:tscale = 1.0 + g2t = hstack( [dly[:tn].reshape(tn,1) * tscale, g2 ]) + g2p = pd.DataFrame(data=g2t, index=tindex,columns=qcolumns) return g2p - def show(self, g2p, title): - t = g2p.t - N = len(g2p) - ylim = [g2p.g2.min(), g2p[1:N].g2.max()] - g2p.plot(x=t, y="g2", marker="o", ls="--", logx=T, ylim=ylim) - plt.xlabel("time delay, ns", fontsize=12) + def show(self,g2p,title): + t = g2p.t + N = len( g2p ) + ylim = [g2p.g2.min(),g2p[1:N].g2.max()] + g2p.plot(x=t,y='g2',marker='o',ls='--',logx=T,ylim=ylim); + plt.xlabel('time delay, ns',fontsize=12) plt.title(title) - plt.savefig(RES_DIR + title + ".png") + plt.savefig( RES_DIR + title +'.png' ) plt.show() + ###################################################### - + if False: - xp = xpcs() - # use the xpcs class + xp=xpcs(); #use the xpcs class dly = xp.delays() if T: fnum = 100 - g2 = xp.autocor(fnum) - filename = "g2_-%s-" % (fnum) - save(RES_DIR + FOUT + filename, g2) + g2=xp.autocor( fnum ) + filename='g2_-%s-'%(fnum) + save( RES_DIR + FOUT + filename, g2) ##g2= load(RES_DIR + FOUT + filename +'.npy') - g2p = xp.g2_to_pds(dly, g2, tscale=20) - xp.show(g2p, "g2_run_%s" % fnum) + g2p = xp.g2_to_pds(dly,g2, tscale = 20) + xp.show(g2p,'g2_run_%s'%fnum) From fc459286cb261672c761dc548eeec23c8c097a50 Mon Sep 17 00:00:00 2001 From: Andrei Fluerasu Date: Wed, 1 May 2024 11:24:06 -0400 Subject: [PATCH 2/6] Updates from Lutz on May 1, 2024 --- pyCHX/chx_generic_functions.py | 148 ++++++++++++++++++++---------- pyCHX/chx_outlier_detection.py | 98 ++++++++++++++++++++ pyCHX/chx_xpcs_xsvs_jupyter_V1.py | 6 +- 3 files changed, 201 insertions(+), 51 deletions(-) create mode 100644 pyCHX/chx_outlier_detection.py diff --git a/pyCHX/chx_generic_functions.py b/pyCHX/chx_generic_functions.py index d0a88e0..5a04f5f 100644 --- a/pyCHX/chx_generic_functions.py +++ b/pyCHX/chx_generic_functions.py @@ -3135,73 +3135,123 @@ def get_full_data_path( uid ): #print(p,p2) return p + '_' + str(p2) + '_master.h5' - - -def get_sid_filenames(header): - """YG. Dev Jan, 2016 - Get a bluesky scan_id, unique_id, filename by giveing uid - - Parameters - ---------- - header: a header of a bluesky scan, e.g. db[-1] - - Returns - ------- - scan_id: integer - unique_id: string, a full string of a uid - filename: sring - - Usuage: - sid,uid, filenames = get_sid_filenames(db[uid]) - +def get_sid_filenames(hdr,verbose=False): """ - from collections import defaultdict - from glob import glob - from pathlib import Path - - filepaths = [] - resources = {} # uid: document - datums = defaultdict(list) # uid: List(document) - for name, doc in header.documents(): - if name == "resource": - resources[doc["uid"]] = doc - elif name == "datum": - datums[doc["resource"]].append(doc) - elif name == "datum_page": - for datum in event_model.unpack_datum_page(doc): - datums[datum["resource"]].append(datum) - for resource_uid, resource in resources.items(): - file_prefix = Path(resource.get('root', '/'), resource["resource_path"]) - if 'eiger' not in resource['spec'].lower(): - continue - for datum in datums[resource_uid]: - dm_kw = datum["datum_kwargs"] - seq_id = dm_kw['seq_id'] - new_filepaths = glob(f'{file_prefix!s}_{seq_id}*') - filepaths.extend(new_filepaths) - return header.start['scan_id'], header.start['uid'], filepaths - -def load_dask_data(uid,detector,reverse=False,rot90=False): + get scan_id, uid and detector filename from databroker + get_sid_filenames(hdr,verbose=False) + hdr = db[uid] + returns (scan_id, uid, filepath) + LW 04/30/2024 + """ + import glob + from time import strftime, localtime + start_doc = hdr.start + stop_doc = hdr.stop + success = False + + ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{start_doc['data path']}*_{start_doc['sequence id']}_master.h5")) # looking for (eiger) datafile at the path specified in metadata + if len(ret[2])==0: + if verbose: print('could not find detector filename from "data_path" in metadata: %s'%start_doc['data path']) + else: + if verbose: print('Found detector filename from "data_path" in metadata!');success=True + + if not success: # looking at path in metadata, but taking the date from the run start document + data_path=start_doc['data path'][:-11]+strftime("%Y/%m/%d/",localtime(start_doc['time'])) + ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5")) + if len(ret[2])==0: + if verbose: print('could not find detector filename in %s'%data_path) + else: + if verbose: print('Found detector filename in %s'%data_path);success=True + + if not success: # looking at path in metadata, but taking the date from the run stop document (in case the date rolled over between creating the start doc and staging the detector) + data_path=start_doc['data path'][:-11]+strftime("%Y/%m/%d/",localtime(stop_doc['time'])) + ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5")) + if len(ret[2])==0: + if verbose: print('Sorry, could not find detector filename....') + else: + if verbose: print('Found detector filename in %s'%data_path);success=True + return ret + + +# def get_sid_filenames(header): +# """YG. Dev Jan, 2016 +# Get a bluesky scan_id, unique_id, filename by giveing uid + +# Parameters +# ---------- +# header: a header of a bluesky scan, e.g. db[-1] + +# Returns +# ------- +# scan_id: integer +# unique_id: string, a full string of a uid +# filename: sring + +# Usuage: +# sid,uid, filenames = get_sid_filenames(db[uid]) + +# """ +# from collections import defaultdict +# from glob import glob +# from pathlib import Path + +# filepaths = [] +# resources = {} # uid: document +# datums = defaultdict(list) # uid: List(document) +# for name, doc in header.documents(): +# if name == "resource": +# resources[doc["uid"]] = doc +# elif name == "datum": +# datums[doc["resource"]].append(doc) +# elif name == "datum_page": +# for datum in event_model.unpack_datum_page(doc): +# datums[datum["resource"]].append(datum) +# for resource_uid, resource in resources.items(): +# file_prefix = Path(resource.get('root', '/'), resource["resource_path"]) +# if 'eiger' not in resource['spec'].lower(): +# continue +# for datum in datums[resource_uid]: +# dm_kw = datum["datum_kwargs"] +# seq_id = dm_kw['seq_id'] +# new_filepaths = glob(f'{file_prefix!s}_{seq_id}*') +# filepaths.extend(new_filepaths) +# return header.start['scan_id'], header.start['uid'], filepaths + +def load_dask_data(uid,detector,mask_path_full,reverse=False,rot90=False): """ load data as dask-array get image md (direct beam, wavelength, sample-detector distance,...) from databroker documents (no need to read an actual image) + get pixel_mask and binary_mask from static location (getting it from image metadata takes forever in some conda envs...) load_dask_data(uid,detector,reverse=False,rot90=False) + uid: uid (str) + detector: md['detector'] + mask_path_full: current standard would be _mask_path_+'pixel_masks/' returns detector_images(dask-array), image_md LW 04/26/2024 """ import dask hdr=db[uid] det=detector.split('_image')[0] - # collect image metadata + # collect image metadata from loading single image img_md_dict={'detector_distance':'det_distance','incident_wavelength':'wavelength','frame_time':'cam_acquire_period','count_time':'cam_acquire_time','num_images':'cam_num_images','beam_center_x':'beam_center_x','beam_center_y':'beam_center_y'} img_md={} for k in list(img_md_dict.keys()): img_md[k]=hdr.config_data(det)['primary'][0]['%s_%s'%(det,img_md_dict[k])] if md['detector'] in ['eiger4m_single_image','eiger1m_single_image','eiger500K_single_image']: img_md.update({'y_pixel_size': 7.5e-05, 'x_pixel_size': 7.5e-05}) - else: img_md.update({'y_pixel_size': None, 'x_pixel_size': None}) + got_pixel_mask=True + else: + img_md.update({'y_pixel_size': None, 'x_pixel_size': None}) + got_pixel_mask=False + # load pixel mask from static location + if got_pixel_mask: + json_open=open(_mask_path_+'pixel_masks/pixel_mask_compression_%s.json'%detector.split('_')[0]) + mask_dict=json.load(json_open) + img_md['pixel_mask']=np.array(mask_dict['pixel_mask']) + img_md['binary_mask']=np.array(mask_dict['binary_mask']) + del mask_dict + # load image data as dask-arry: - #raise Exception('this was supposed to break!') dimg=hdr.xarray_dask()[md['detector']][0] if reverse: dimg=dask.array.flip(dimg,axis=(0,1)) diff --git a/pyCHX/chx_outlier_detection.py b/pyCHX/chx_outlier_detection.py new file mode 100644 index 0000000..e211742 --- /dev/null +++ b/pyCHX/chx_outlier_detection.py @@ -0,0 +1,98 @@ +def is_outlier(points,thresh=3.5,verbose=False): + """MAD test + """ + points.tolist() + if len(points) ==1: + points=points[:,None] + if verbose: + print('input to is_outlier is a single point...') + median = np.median(points)*np.ones(np.shape(points))#, axis=0) + + diff = (points-median)**2 + diff=np.sqrt(diff) + med_abs_deviation= np.median(diff) + modified_z_score = .6745*diff/med_abs_deviation + return modified_z_score > thresh + +def outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False): + """ + outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) + avg_img: average image data (2D) + mask: 2D array, same size as avg_img with pixels that are already masked + roi_mask: 2D array, same size as avg_img, ROI labels 'encoded' as mask values (i.e. all pixels belonging to ROI 5 have the value 5) + outlier_threshold: threshold for MAD test + maximum_outlier_fraction: maximum fraction of pixels in an ROI that can be classifed as outliers. If the detected fraction is higher, no outliers will be masked for that ROI. + verbose: 'True' enables message output + plot: 'True' enables visualization of outliers + returns: mask (dtype=float): 0 for pixels that have been classified as outliers, 1 else + dependency: is_outlier() + + function does outlier detection for each ROI separately based on pixel intensity in avg_img*mask and ROI specified by roi_mask, using the median-absolute-deviation (MAD) method + + by LW 06/21/2023 + """ + hhmask = np.ones(np.shape(roi_mask)) + pc=1 + + for rn in np.arange(1,np.max(roi_mask)+1,1): + rm=np.zeros(np.shape(roi_mask));rm=rm-1;rm[np.where( roi_mask == rn)]=1 + pixel = roi.roi_pixel_values(avg_img*rm, roi_mask, [rn] ) + out_l = is_outlier((avg_img*mask*rm)[rm>-1], thresh=outlier_threshold) + if np.nanmax(out_l)>0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l<1]) + if verbose: print('ROI #%s\naverage ROI intensity: %s'%(rn,ave_roi_int)) + try: + upper_outlier_threshold = np.nanmin((out_l*pixel[0][0])[out_l*pixel[0][0]>ave_roi_int]) + if verbose: print('upper outlier threshold: %s'%upper_outlier_threshold) + except: + upper_outlier_threshold = False + if verbose: print('no upper outlier threshold found') + ind1 = (out_l*pixel[0][0])>0; ind2 = (out_l*pixel[0][0])< ave_roi_int + try: + lower_outlier_threshold = np.nanmax((out_l*pixel[0][0])[ind1*ind2]) + except: + lower_outlier_threshold = False + if verbose: print('no lower outlier threshold found') + else: + if verbose: print('ROI #%s: no outliers detected'%rn) + + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l)/len(pixel[0][0]) + if verbose: print('fraction of pixel values detected as outliers: %s'%np.round(outlier_fraction,2)) + if outlier_fraction > maximum_outlier_fraction: + if verbose: print('fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed'%maximum_outlier_fraction) + upper_outlier_threshold = False; lower_outlier_threshold = False + + if upper_outlier_threshold: + hhmask[avg_img*rm > upper_outlier_threshold] = 0 + if lower_outlier_threshold: + hhmask[avg_img*rm < lower_outlier_threshold] = 0 + + if plot: + if pc == 1: fig,ax = plt.subplots(1,5,figsize=(24,4)) + plt.subplot(1,5,pc);pc+=1; + if pc>5: pc=1 + pixel = roi.roi_pixel_values(avg_img*rm*mask, roi_mask, [rn] ) + plt.plot( pixel[0][0] ,'bo',markersize=1.5 ) + if upper_outlier_threshold or lower_outlier_threshold: + x=np.arange(len(out_l)) + plt.plot([x[0],x[-1]],[ave_roi_int,ave_roi_int],'g--',label='ROI average: %s'%np.round(ave_roi_int,4)) + if upper_outlier_threshold: + ind=(out_l*pixel[0][0])> upper_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[upper_outlier_threshold,upper_outlier_threshold],'r--',label='upper thresh.: %s'%np.round(upper_outlier_threshold,4)) + if lower_outlier_threshold: + ind=(out_l*pixel[0][0])< lower_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[lower_outlier_threshold,lower_outlier_threshold],'r--',label='lower thresh.: %s'%np.round(upper_outlier_threshold,4)) + plt.ylabel('Intensity') ;plt.xlabel('pixel');plt.title('ROI #: %s'%rn);plt.legend(loc='best',fontsize=8) + + if plot: + fig,ax = plt.subplots() + plt.imshow(hhmask) + hot_dark=np.nonzero(hhmask<1) + cmap = plt.cm.get_cmap('viridis') + plt.plot(hot_dark[1],hot_dark[0],'+',color=cmap(0)) + plt.xlabel('pixel');plt.ylabel('pixel');plt.title('masked pixels with outlier threshold: %s'%outlier_threshold) + + return hhmask diff --git a/pyCHX/chx_xpcs_xsvs_jupyter_V1.py b/pyCHX/chx_xpcs_xsvs_jupyter_V1.py index 6b10886..9755142 100644 --- a/pyCHX/chx_xpcs_xsvs_jupyter_V1.py +++ b/pyCHX/chx_xpcs_xsvs_jupyter_V1.py @@ -3,9 +3,11 @@ #from pyCHX.chx_generic_functions import get_short_long_labels_from_qval_dict #RUN_GUI = False #from pyCHX.chx_libs import markers +from IPython import get_ipython import pandas as pds -# temporary fix: get_data() uses depreciated np.float and gets imported from pyCHX/chx_correlationc.py -> clobber function with temporary fix: -%run /nsls2/data/chx/legacy/analysis/2022_3/lwiegart/development/chx_analysis_setup.ipynb + +ip = get_ipython() +ip.run_line_magic("run", "/nsls2/data/chx/shared/CHX_Software/packages/environment_management/chx_analysis_setup.ipynb") def get_t_iqc_uids( uid_list, setup_pargs, slice_num= 10, slice_width= 1): '''Get Iq at different time edge (difined by slice_num and slice_width) for a list of uids From 99c855660327e7676cdc1e70edff4e46a0519712 Mon Sep 17 00:00:00 2001 From: Max Rakitin Date: Wed, 1 May 2024 11:38:46 -0400 Subject: [PATCH 3/6] STY: Apply `pre-commit` checks --- .pre-commit-config.yaml | 9 +- pyCHX/Badpixels.py | 1 + pyCHX/Create_Report.py | 3050 +++++++------- pyCHX/chx_Fitters2D.py | 5 +- pyCHX/chx_compress.py | 2042 +++++---- pyCHX/chx_correlationc.py | 1793 ++++---- pyCHX/chx_correlationp.py | 1 + pyCHX/chx_correlationp2.py | 1 + pyCHX/chx_generic_functions.py | 6469 ++++++++++++++++------------- pyCHX/chx_libs.py | 1 + pyCHX/chx_outlier_detection.py | 147 +- pyCHX/chx_speckle.py | 4 +- pyCHX/chx_specklecp.py | 4 +- pyCHX/chx_xpcs_xsvs_jupyter_V1.py | 3289 +++++++++------ pyCHX/xpcs_timepixel.py | 1431 ++++--- 15 files changed, 10195 insertions(+), 8052 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f9a888a..9b86b19 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,22 +1,23 @@ +exclude: '(v2)/.*' default_language_version: python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.6.0 hooks: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/ambv/black - rev: 23.1.0 + rev: 24.4.2 hooks: - id: black - repo: https://github.com/pycqa/isort - rev: 5.12.0 + rev: 5.13.2 hooks: - id: isort args: ["--profile", "black"] - repo: https://github.com/kynan/nbstripout - rev: 0.6.1 + rev: 0.7.1 hooks: - id: nbstripout diff --git a/pyCHX/Badpixels.py b/pyCHX/Badpixels.py index c90714a..7b7dc5b 100644 --- a/pyCHX/Badpixels.py +++ b/pyCHX/Badpixels.py @@ -1,4 +1,5 @@ """Dev@Octo12,2017""" + import numpy as np damaged_4Mpixel = np.array( diff --git a/pyCHX/Create_Report.py b/pyCHX/Create_Report.py index f434328..bfb7b30 100644 --- a/pyCHX/Create_Report.py +++ b/pyCHX/Create_Report.py @@ -1,9 +1,9 @@ -''' +""" Yugang Created at Aug 08, 2016, CHX-NSLS-II Create a PDF file from XPCS data analysis results, which are generated by CHX data analysis pipeline -How to use: +How to use: python Create_Report.py full_file_path uid output_dir (option) An exmplae to use: @@ -11,198 +11,202 @@ python Create_Report.py /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/ af8f66 /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/test/ -''' +""" -def check_dict_keys( dicts, key): - if key not in list(dicts.keys()): - dicts[key] = 'unknown' - - -import h5py +def check_dict_keys(dicts, key): + if key not in list(dicts.keys()): + dicts[key] = "unknown" -from reportlab.pdfgen import canvas -from reportlab.lib.units import inch, cm , mm -from reportlab.lib.colors import pink, green, brown, white, black, red, blue +import os +import sys +from datetime import datetime +from time import time +import h5py +import numpy as np +import pandas as pds +from PIL import Image +from reportlab.lib.colors import black, blue, brown, green, pink, red, white +from reportlab.lib.pagesizes import A4, letter from reportlab.lib.styles import getSampleStyleSheet -#from reportlab.platypus import Image, Paragraph, Table +from reportlab.lib.units import cm, inch, mm +from reportlab.pdfgen import canvas -from reportlab.lib.pagesizes import letter, A4 -from pyCHX.chx_generic_functions import (pload_obj ) +from pyCHX.chx_generic_functions import pload_obj +# from reportlab.platypus import Image, Paragraph, Table -from PIL import Image -from time import time -from datetime import datetime -import sys,os -import pandas as pds -import numpy as np +def add_one_line_string(c, s, top, left=30, fontsize=11): + if (fontsize * len(s)) > 1000: + fontsize = 1000.0 / (len(s)) + c.setFont("Helvetica", fontsize) + c.drawString(left, top, s) -def add_one_line_string( c, s, top, left=30, fontsize = 11 ): - if (fontsize*len(s )) >1000: - fontsize = 1000./(len(s)) - c.setFont("Helvetica", fontsize ) - c.drawString(left, top, s) - - - -def add_image_string( c, imgf, data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top, return_ = False ): - - image = data_dir + imgf +def add_image_string( + c, imgf, data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top, return_=False +): + + image = data_dir + imgf if os.path.exists(image): - im = Image.open( image ) - ratio = float(im.size[1])/im.size[0] - height= img_height - width = height/ratio - #if width>400: + im = Image.open(image) + ratio = float(im.size[1]) / im.size[0] + height = img_height + width = height / ratio + # if width>400: # width = 350 # height = width*ratio - c.drawImage( image, img_left, img_top, width= width,height=height,mask=None) + c.drawImage(image, img_left, img_top, width=width, height=height, mask=None) c.setFont("Helvetica", 16) - c.setFillColor( blue ) - c.drawString(str1_left, str1_top,str1 ) + c.setFillColor(blue) + c.drawString(str1_left, str1_top, str1) c.setFont("Helvetica", 12) - c.setFillColor(red) - c.drawString(str2_left, str2_top, 'filename: %s'%imgf ) + c.setFillColor(red) + c.drawString(str2_left, str2_top, "filename: %s" % imgf) if return_: - return height/ratio - + return height / ratio + else: - c.setFillColor( blue ) - c.drawString( str1_left, str1_top, str1) - c.setFillColor(red) - c.drawString( str1_left, str1_top -40, '-->Not Calculated!' ) - - - -class create_pdf_report( object ): - - '''Aug 16, YG@CHX-NSLS-II - Create a pdf report by giving data_dir, uid, out_dir - data_dir: the input data directory, including all necessary images - the images names should be: - meta_file = 'uid=%s-md'%uid - avg_img_file = 'uid=%s--img-avg-.png'%uid - ROI_on_img_file = 'uid=%s--ROI-on-Image-.png'%uid - qiq_file = 'uid=%s--Circular-Average-.png'%uid - ROI_on_Iq_file = 'uid=%s--ROI-on-Iq-.png'%uid - - Iq_t_file = 'uid=%s--Iq-t-.png'%uid - img_sum_t_file = 'uid=%s--img-sum-t.png'%uid - wat_file= 'uid=%s--Waterfall-.png'%uid - Mean_inten_t_file= 'uid=%s--Mean-intensity-of-each-ROI-.png'%uid - - g2_file = 'uid=%s--g2-.png'%uid - g2_fit_file = 'uid=%s--g2--fit-.png'%uid - q_rate_file = 'uid=--%s--Q-Rate--fit-.png'%uid - - two_time_file = 'uid=%s--Two-time-.png'%uid - two_g2_file = 'uid=%s--g2--two-g2-.png'%uid - - uid: the unique id - out_dir: the output directory - report_type: - 'saxs': report saxs results - 'gisaxs': report gisaxs results - - - Output: - A PDF file with name as "XPCS Analysis Report for uid=%s"%uid in out_dir folder - ''' - - def __init__( self, data_dir, uid, out_dir=None, filename=None, load=True, user=None, - report_type='saxs',md=None, res_h5_filename=None ): + c.setFillColor(blue) + c.drawString(str1_left, str1_top, str1) + c.setFillColor(red) + c.drawString(str1_left, str1_top - 40, "-->Not Calculated!") + + +class create_pdf_report(object): + """Aug 16, YG@CHX-NSLS-II + Create a pdf report by giving data_dir, uid, out_dir + data_dir: the input data directory, including all necessary images + the images names should be: + meta_file = 'uid=%s-md'%uid + avg_img_file = 'uid=%s--img-avg-.png'%uid + ROI_on_img_file = 'uid=%s--ROI-on-Image-.png'%uid + qiq_file = 'uid=%s--Circular-Average-.png'%uid + ROI_on_Iq_file = 'uid=%s--ROI-on-Iq-.png'%uid + + Iq_t_file = 'uid=%s--Iq-t-.png'%uid + img_sum_t_file = 'uid=%s--img-sum-t.png'%uid + wat_file= 'uid=%s--Waterfall-.png'%uid + Mean_inten_t_file= 'uid=%s--Mean-intensity-of-each-ROI-.png'%uid + + g2_file = 'uid=%s--g2-.png'%uid + g2_fit_file = 'uid=%s--g2--fit-.png'%uid + q_rate_file = 'uid=--%s--Q-Rate--fit-.png'%uid + + two_time_file = 'uid=%s--Two-time-.png'%uid + two_g2_file = 'uid=%s--g2--two-g2-.png'%uid + + uid: the unique id + out_dir: the output directory + report_type: + 'saxs': report saxs results + 'gisaxs': report gisaxs results + + + Output: + A PDF file with name as "XPCS Analysis Report for uid=%s"%uid in out_dir folder + """ + + def __init__( + self, + data_dir, + uid, + out_dir=None, + filename=None, + load=True, + user=None, + report_type="saxs", + md=None, + res_h5_filename=None, + ): from datetime import datetime + self.data_dir = data_dir self.uid = uid - self.md = md - #print(md) + self.md = md + # print(md) if user is None: - user = 'chx' + user = "chx" self.user = user if out_dir is None: - out_dir = data_dir + out_dir = data_dir if not os.path.exists(out_dir): os.makedirs(out_dir) - self.out_dir=out_dir - + self.out_dir = out_dir + self.styles = getSampleStyleSheet() self.width, self.height = letter - - self.report_type = report_type - dt =datetime.now() - CurTime = '%02d/%02d/%s/-%02d/%02d/' % ( dt.month, dt.day, dt.year,dt.hour,dt.minute) + + self.report_type = report_type + dt = datetime.now() + CurTime = "%02d/%02d/%s/-%02d/%02d/" % (dt.month, dt.day, dt.year, dt.hour, dt.minute) self.CurTime = CurTime if filename is None: - filename="XPCS_Analysis_Report_for_uid=%s.pdf"%uid - filename=out_dir + filename - c = canvas.Canvas( filename, pagesize=letter) - self.filename= filename + filename = "XPCS_Analysis_Report_for_uid=%s.pdf" % uid + filename = out_dir + filename + c = canvas.Canvas(filename, pagesize=letter) + self.filename = filename self.res_h5_filename = res_h5_filename - #c.setTitle("XPCS Analysis Report for uid=%s"%uid) + # c.setTitle("XPCS Analysis Report for uid=%s"%uid) c.setTitle(filename) self.c = c if load: self.load_metadata() - + def load_metadata(self): - uid=self.uid + uid = self.uid data_dir = self.data_dir - #load metadata - meta_file = 'uid=%s_md'%uid - self.metafile = data_dir + meta_file - if self.md is None: - md = pload_obj( data_dir + meta_file ) + # load metadata + meta_file = "uid=%s_md" % uid + self.metafile = data_dir + meta_file + if self.md is None: + md = pload_obj(data_dir + meta_file) self.md = md - else: + else: md = self.md - #print('Get md from giving md') - #print(md) - self.sub_title_num = 0 + # print('Get md from giving md') + # print(md) + self.sub_title_num = 0 uid_g2 = None uid_c12 = None - if 'uid_g2' in list(md.keys()): - uid_g2 = md['uid_g2'] - if 'uid_c12' in list(md.keys()): - uid_c12 = md['uid_c12'] - - '''global definition''' - - if 'beg_OneTime' in list( md.keys()): - beg_OneTime = md['beg_OneTime'] - end_OneTime = md['end_OneTime'] + if "uid_g2" in list(md.keys()): + uid_g2 = md["uid_g2"] + if "uid_c12" in list(md.keys()): + uid_c12 = md["uid_c12"] + + """global definition""" + + if "beg_OneTime" in list(md.keys()): + beg_OneTime = md["beg_OneTime"] + end_OneTime = md["end_OneTime"] else: beg_OneTime = None end_OneTime = None - - if 'beg_TwoTime' in list( md.keys()): - beg_TwoTime = md['beg_TwoTime'] - end_TwoTime = md['end_TwoTime'] + + if "beg_TwoTime" in list(md.keys()): + beg_TwoTime = md["beg_TwoTime"] + end_TwoTime = md["end_TwoTime"] else: beg_TwoTime = None - end_TwoTime = None - - + end_TwoTime = None + try: - beg = md['beg'] - end= md['end'] - uid_ = uid + '_fra_%s_%s'%(beg, end) + beg = md["beg"] + end = md["end"] + uid_ = uid + "_fra_%s_%s" % (beg, end) if beg_OneTime is None: - uid_OneTime = uid + '_fra_%s_%s'%(beg, end) + uid_OneTime = uid + "_fra_%s_%s" % (beg, end) else: - uid_OneTime = uid + '_fra_%s_%s'%(beg_OneTime, end_OneTime) + uid_OneTime = uid + "_fra_%s_%s" % (beg_OneTime, end_OneTime) if beg_TwoTime is None: - uid_TwoTime = uid + '_fra_%s_%s'%(beg, end) + uid_TwoTime = uid + "_fra_%s_%s" % (beg, end) else: - uid_TwoTime = uid + '_fra_%s_%s'%(beg_TwoTime, end_TwoTime) - + uid_TwoTime = uid + "_fra_%s_%s" % (beg_TwoTime, end_TwoTime) + except: uid_ = uid uid_OneTime = uid @@ -210,223 +214,227 @@ def load_metadata(self): uid_ = uid uid_OneTime = uid - self.avg_img_file = 'uid=%s_img_avg.png'%uid - self.ROI_on_img_file = 'uid=%s_ROI_on_Image.png'%uid - - self.qiq_file = 'uid=%s_q_Iq.png'%uid - self.qiq_fit_file = 'uid=%s_form_factor_fit.png'%uid - #self.qr_1d_file = 'uid=%s_Qr_ROI.png'%uid - if self.report_type =='saxs' or self.report_type =='ang_saxs': - self.ROI_on_Iq_file = 'uid=%s_ROI_on_Iq.png'%uid - - elif self.report_type =='gi_saxs': - self.ROI_on_Iq_file = 'uid=%s_Qr_ROI.png'%uid - - self.Iq_t_file = 'uid=%s_q_Iqt.png'%uid - self.img_sum_t_file = 'uid=%s_img_sum_t.png'%uid - self.wat_file= 'uid=%s_waterfall.png'%uid - self.Mean_inten_t_file= 'uid=%s_t_ROIs.png'%uid - self.oavs_file = 'uid=%s_OAVS.png'%uid - - if uid_g2 is None: - uid_g2 = uid_OneTime - self.g2_file = 'uid=%s_g2.png'%uid_g2 - self.g2_fit_file = 'uid=%s_g2_fit.png'%uid_g2 - #print( self.g2_fit_file ) + self.avg_img_file = "uid=%s_img_avg.png" % uid + self.ROI_on_img_file = "uid=%s_ROI_on_Image.png" % uid + + self.qiq_file = "uid=%s_q_Iq.png" % uid + self.qiq_fit_file = "uid=%s_form_factor_fit.png" % uid + # self.qr_1d_file = 'uid=%s_Qr_ROI.png'%uid + if self.report_type == "saxs" or self.report_type == "ang_saxs": + self.ROI_on_Iq_file = "uid=%s_ROI_on_Iq.png" % uid + + elif self.report_type == "gi_saxs": + self.ROI_on_Iq_file = "uid=%s_Qr_ROI.png" % uid + + self.Iq_t_file = "uid=%s_q_Iqt.png" % uid + self.img_sum_t_file = "uid=%s_img_sum_t.png" % uid + self.wat_file = "uid=%s_waterfall.png" % uid + self.Mean_inten_t_file = "uid=%s_t_ROIs.png" % uid + self.oavs_file = "uid=%s_OAVS.png" % uid + + if uid_g2 is None: + uid_g2 = uid_OneTime + self.g2_file = "uid=%s_g2.png" % uid_g2 + self.g2_fit_file = "uid=%s_g2_fit.png" % uid_g2 + # print( self.g2_fit_file ) self.g2_new_page = False self.g2_fit_new_page = False - if self.report_type =='saxs': - jfn = 'uid=%s_g2.png'%uid_g2 - if os.path.exists( data_dir + jfn): + if self.report_type == "saxs": + jfn = "uid=%s_g2.png" % uid_g2 + if os.path.exists(data_dir + jfn): self.g2_file = jfn else: - jfn = 'uid=%s_g2__joint.png'%uid_g2 - if os.path.exists( data_dir + jfn): + jfn = "uid=%s_g2__joint.png" % uid_g2 + if os.path.exists(data_dir + jfn): self.g2_file = jfn - self.g2_new_page = True - #self.g2_new_page = True - jfn = 'uid=%s_g2_fit.png'%uid_g2 - if os.path.exists(data_dir + jfn ): + self.g2_new_page = True + # self.g2_new_page = True + jfn = "uid=%s_g2_fit.png" % uid_g2 + if os.path.exists(data_dir + jfn): self.g2_fit_file = jfn - #self.g2_fit_new_page = True + # self.g2_fit_new_page = True else: - jfn = 'uid=%s_g2_fit__joint.png'%uid_g2 - if os.path.exists(data_dir + jfn ): + jfn = "uid=%s_g2_fit__joint.png" % uid_g2 + if os.path.exists(data_dir + jfn): self.g2_fit_file = jfn - self.g2_fit_new_page = True - - else: - jfn = 'uid=%s_g2__joint.png'%uid_g2 - if os.path.exists( data_dir + jfn): + self.g2_fit_new_page = True + + else: + jfn = "uid=%s_g2__joint.png" % uid_g2 + if os.path.exists(data_dir + jfn): self.g2_file = jfn - self.g2_new_page = True - jfn = 'uid=%s_g2_fit__joint.png'%uid_g2 - if os.path.exists(data_dir + jfn ): + self.g2_new_page = True + jfn = "uid=%s_g2_fit__joint.png" % uid_g2 + if os.path.exists(data_dir + jfn): self.g2_fit_file = jfn - self.g2_fit_new_page = True - - self.q_rate_file = 'uid=%s_Q_Rate_fit.png'%uid_g2 - self.q_rate_loglog_file = 'uid=%s_Q_Rate_loglog.png'%uid_g2 - self.g2_q_fitpara_file = 'uid=%s_g2_q_fitpara_plot.png'%uid_g2 - - - #print( self.q_rate_file ) + self.g2_fit_new_page = True + + self.q_rate_file = "uid=%s_Q_Rate_fit.png" % uid_g2 + self.q_rate_loglog_file = "uid=%s_Q_Rate_loglog.png" % uid_g2 + self.g2_q_fitpara_file = "uid=%s_g2_q_fitpara_plot.png" % uid_g2 + + # print( self.q_rate_file ) if uid_c12 is None: - uid_c12 = uid_ - self.q_rate_two_time_fit_file = 'uid=%s_two_time_Q_Rate_fit.png'%uid_c12 - #print( self.q_rate_two_time_fit_file ) - - self.two_time_file = 'uid=%s_Two_time.png'%uid_c12 - self.two_g2_file = 'uid=%s_g2_two_g2.png'%uid_c12 - - if self.report_type =='saxs': - - jfn = 'uid=%s_g2_two_g2.png'%uid_c12 + uid_c12 = uid_ + self.q_rate_two_time_fit_file = "uid=%s_two_time_Q_Rate_fit.png" % uid_c12 + # print( self.q_rate_two_time_fit_file ) + + self.two_time_file = "uid=%s_Two_time.png" % uid_c12 + self.two_g2_file = "uid=%s_g2_two_g2.png" % uid_c12 + + if self.report_type == "saxs": + + jfn = "uid=%s_g2_two_g2.png" % uid_c12 self.two_g2_new_page = False - if os.path.exists( data_dir + jfn ): - #print( 'Here we go') + if os.path.exists(data_dir + jfn): + # print( 'Here we go') self.two_g2_file = jfn - #self.two_g2_new_page = True - else: - jfn = 'uid=%s_g2_two_g2__joint.png'%uid_c12 + # self.two_g2_new_page = True + else: + jfn = "uid=%s_g2_two_g2__joint.png" % uid_c12 self.two_g2_new_page = False - if os.path.exists( data_dir + jfn ): - #print( 'Here we go') + if os.path.exists(data_dir + jfn): + # print( 'Here we go') self.two_g2_file = jfn - self.two_g2_new_page = True - else: - jfn = 'uid=%s_g2_two_g2__joint.png'%uid_c12 + self.two_g2_new_page = True + else: + jfn = "uid=%s_g2_two_g2__joint.png" % uid_c12 self.two_g2_new_page = False - if os.path.exists( data_dir + jfn ): - #print( 'Here we go') + if os.path.exists(data_dir + jfn): + # print( 'Here we go') self.two_g2_file = jfn - self.two_g2_new_page = True - - - self.four_time_file = 'uid=%s_g4.png'%uid_ - jfn = 'uid=%s_g4__joint.png'%uid_ + self.two_g2_new_page = True + + self.four_time_file = "uid=%s_g4.png" % uid_ + jfn = "uid=%s_g4__joint.png" % uid_ self.g4_new_page = False - if os.path.exists( data_dir + jfn ): + if os.path.exists(data_dir + jfn): self.four_time_file = jfn - self.g4_new_page = True - - self.xsvs_fit_file = 'uid=%s_xsvs_fit.png'%uid_ - self.contrast_file = 'uid=%s_contrast.png'%uid_ - self.dose_file = 'uid=%s_dose_analysis.png'%uid_ - - jfn = 'uid=%s_dose_analysis__joint.png'%uid_ + self.g4_new_page = True + + self.xsvs_fit_file = "uid=%s_xsvs_fit.png" % uid_ + self.contrast_file = "uid=%s_contrast.png" % uid_ + self.dose_file = "uid=%s_dose_analysis.png" % uid_ + + jfn = "uid=%s_dose_analysis__joint.png" % uid_ self.dose_file_new_page = False - if os.path.exists( data_dir + jfn ): - self.dose_file = jfn + if os.path.exists(data_dir + jfn): + self.dose_file = jfn self.dose_file_new_page = True - - #print( self.dose_file ) + + # print( self.dose_file ) if False: - self.flow_g2v = 'uid=%s_1a_mqv_g2_v_fit.png'%uid_ - self.flow_g2p = 'uid=%s_1a_mqp_g2_p_fit.png'%uid_ - self.flow_g2v_rate_fit = 'uid=%s_v_fit_rate_Q_Rate_fit.png'%uid_ - self.flow_g2p_rate_fit = 'uid=%s_p_fit_rate_Q_Rate_fit.png'%uid_ - - if True: - self.two_time = 'uid=%s_pv_two_time.png'%uid_ - #self.two_time_v = 'uid=%s_pv_two_time.png'%uid_ - - #self.flow_g2bv = 'uid=%s_g2b_v_fit.png'%uid_ - #self.flow_g2bp = 'uid=%s_g2b_p_fit.png'%uid_ - self.flow_g2_g2b_p = 'uid=%s_g2_two_g2_p.png'%uid_ - self.flow_g2_g2b_v = 'uid=%s_g2_two_g2_v.png'%uid_ - - self.flow_g2bv_rate_fit = 'uid=%s_vertb_Q_Rate_fit.png'%uid_ - self.flow_g2bp_rate_fit = 'uid=%s_parab_Q_Rate_fit.png'%uid_ - - self.flow_g2v = 'uid=%s_g2_v_fit.png'%uid_ - self.flow_g2p = 'uid=%s_g2_p_fit.png'%uid_ - self.flow_g2v_rate_fit = 'uid=%s_vert_Q_Rate_fit.png'%uid_ - self.flow_g2p_rate_fit = 'uid=%s_para_Q_Rate_fit.png'%uid_ - - #self.report_header(page=1, top=730, new_page=False) - #self.report_meta(new_page=False) - - self.q2Iq_file = 'uid=%s_q2_iq.png'%uid - self.iq_invariant_file = 'uid=%s_iq_invariant.png'%uid - - def report_invariant( self, top= 300, new_page=False): - '''create the invariant analysis report - two images: - ROI on average intensity image - ROI on circular average - ''' - uid=self.uid - c= self.c - #add sub-title, static images + self.flow_g2v = "uid=%s_1a_mqv_g2_v_fit.png" % uid_ + self.flow_g2p = "uid=%s_1a_mqp_g2_p_fit.png" % uid_ + self.flow_g2v_rate_fit = "uid=%s_v_fit_rate_Q_Rate_fit.png" % uid_ + self.flow_g2p_rate_fit = "uid=%s_p_fit_rate_Q_Rate_fit.png" % uid_ + + if True: + self.two_time = "uid=%s_pv_two_time.png" % uid_ + # self.two_time_v = 'uid=%s_pv_two_time.png'%uid_ + + # self.flow_g2bv = 'uid=%s_g2b_v_fit.png'%uid_ + # self.flow_g2bp = 'uid=%s_g2b_p_fit.png'%uid_ + self.flow_g2_g2b_p = "uid=%s_g2_two_g2_p.png" % uid_ + self.flow_g2_g2b_v = "uid=%s_g2_two_g2_v.png" % uid_ + + self.flow_g2bv_rate_fit = "uid=%s_vertb_Q_Rate_fit.png" % uid_ + self.flow_g2bp_rate_fit = "uid=%s_parab_Q_Rate_fit.png" % uid_ + + self.flow_g2v = "uid=%s_g2_v_fit.png" % uid_ + self.flow_g2p = "uid=%s_g2_p_fit.png" % uid_ + self.flow_g2v_rate_fit = "uid=%s_vert_Q_Rate_fit.png" % uid_ + self.flow_g2p_rate_fit = "uid=%s_para_Q_Rate_fit.png" % uid_ + + # self.report_header(page=1, top=730, new_page=False) + # self.report_meta(new_page=False) + + self.q2Iq_file = "uid=%s_q2_iq.png" % uid + self.iq_invariant_file = "uid=%s_iq_invariant.png" % uid + + def report_invariant(self, top=300, new_page=False): + """create the invariant analysis report + two images: + ROI on average intensity image + ROI on circular average + """ + uid = self.uid + c = self.c + # add sub-title, static images c.setFillColor(black) - c.setFont("Helvetica", 20) + c.setFont("Helvetica", 20) ds = 230 - self.sub_title_num +=1 - c.drawString(10, top, "%s. I(q) Invariant Analysis"%self.sub_title_num ) #add title - #add q2Iq - c.setFont("Helvetica", 14) - imgf = self.q2Iq_file - #print( imgf ) - label = 'q^2*I(q)' - add_image_string( c, imgf, self.data_dir, img_left= 60, img_top=top - ds*1.15, img_height=180, - str1_left=110, str1_top = top-35,str1=label, - str2_left = 60, str2_top = top -320 ) - - #add iq_invariant + self.sub_title_num += 1 + c.drawString(10, top, "%s. I(q) Invariant Analysis" % self.sub_title_num) # add title + # add q2Iq + c.setFont("Helvetica", 14) + imgf = self.q2Iq_file + # print( imgf ) + label = "q^2*I(q)" + add_image_string( + c, + imgf, + self.data_dir, + img_left=60, + img_top=top - ds * 1.15, + img_height=180, + str1_left=110, + str1_top=top - 35, + str1=label, + str2_left=60, + str2_top=top - 320, + ) + + # add iq_invariant imgf = self.iq_invariant_file - img_height= 180 - img_left,img_top =320, top - ds*1.15 - str1_left, str1_top,str1= 420, top- 35, 'I(q) Invariant' - str2_left, str2_top = 350, top- 320 + img_height = 180 + img_left, img_top = 320, top - ds * 1.15 + str1_left, str1_top, str1 = 420, top - 35, "I(q) Invariant" + str2_left, str2_top = 350, top - 320 - #print ( imgf ) + # print ( imgf ) - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) - if new_page: c.showPage() - c.save() - - - + c.save() + def report_header(self, page=1, new_page=False): - '''create headers, including title/page number''' - c= self.c + """create headers, including title/page number""" + c = self.c CurTime = self.CurTime - uid=self.uid - user=self.user + uid = self.uid + user = self.user c.setFillColor(black) c.setFont("Helvetica", 14) - #add page number - c.drawString(250, 10, "Page--%s--"%( page ) ) - #add time stamp - - #c.drawString(350, 10, "Created at %s@CHX-by-%s"%( CurTime,user ) ) - s_ = "Created at %s@CHX-By-%s"%( CurTime,user ) - add_one_line_string( c, s_, 10, left=350,fontsize = 11 ) - - #add title - #c.setFont("Helvetica", 22) - title = "XPCS Analysis Report for uid=%s"%uid - c.setFont("Helvetica", 1000/( len(title) ) ) - #c.drawString(180,760, "XPCS Report of uid=%s"%uid ) #add title - c.drawString(50,760, "XPCS Analysis Report for uid=%s"%uid ) #add title - #add a line under title - c.setStrokeColor( red ) - c.setLineWidth(width=1.5) - c.line( 50, 750, 550, 750 ) + # add page number + c.drawString(250, 10, "Page--%s--" % (page)) + # add time stamp + + # c.drawString(350, 10, "Created at %s@CHX-by-%s"%( CurTime,user ) ) + s_ = "Created at %s@CHX-By-%s" % (CurTime, user) + add_one_line_string(c, s_, 10, left=350, fontsize=11) + + # add title + # c.setFont("Helvetica", 22) + title = "XPCS Analysis Report for uid=%s" % uid + c.setFont("Helvetica", 1000 / (len(title))) + # c.drawString(180,760, "XPCS Report of uid=%s"%uid ) #add title + c.drawString(50, 760, "XPCS Analysis Report for uid=%s" % uid) # add title + # add a line under title + c.setStrokeColor(red) + c.setLineWidth(width=1.5) + c.line(50, 750, 550, 750) if new_page: c.showPage() c.save() - def report_meta(self, top=740, new_page=False): - '''create the meta data report, - the meta data include: + """create the meta data report, + the meta data include: uid Sample: Measurement @@ -435,1506 +443,1724 @@ def report_meta(self, top=740, new_page=False): Beam Center Mask file Data dir - Pipeline notebook - ''' + Pipeline notebook + """ - c=self.c - #load metadata + c = self.c + # load metadata md = self.md try: - uid = md['uid'] + uid = md["uid"] except: - uid=self.uid - #add sub-title, metadata - c.setFont("Helvetica", 20) + uid = self.uid + # add sub-title, metadata + c.setFont("Helvetica", 20) ds = 15 self.sub_title_num += 1 - c.drawString(10, top, "%s. Metadata"%self.sub_title_num ) #add title - top = top - 5 + c.drawString(10, top, "%s. Metadata" % self.sub_title_num) # add title + top = top - 5 fontsize = 11 - c.setFont("Helvetica", fontsize) - - nec_keys = [ 'sample', 'start_time', 'stop_time','Measurement' ,'exposure time' ,'incident_wavelength', 'cam_acquire_t', - 'frame_time','detector_distance', 'feedback_x', 'feedback_y', 'shutter mode', - 'beam_center_x', 'beam_center_y', 'beam_refl_center_x', 'beam_refl_center_y','mask_file','bad_frame_list', 'transmission', 'roi_mask_file'] + c.setFont("Helvetica", fontsize) + + nec_keys = [ + "sample", + "start_time", + "stop_time", + "Measurement", + "exposure time", + "incident_wavelength", + "cam_acquire_t", + "frame_time", + "detector_distance", + "feedback_x", + "feedback_y", + "shutter mode", + "beam_center_x", + "beam_center_y", + "beam_refl_center_x", + "beam_refl_center_y", + "mask_file", + "bad_frame_list", + "transmission", + "roi_mask_file", + ] for key in nec_keys: check_dict_keys(md, key) - - try:#try exp time from detector - exposuretime= md['count_time'] #exposure time in sec - except: - exposuretime= md['cam_acquire_time'] #exposure time in sec - - try:#try acq time from detector - acquisition_period = md['frame_time'] + + try: # try exp time from detector + exposuretime = md["count_time"] # exposure time in sec + except: + exposuretime = md["cam_acquire_time"] # exposure time in sec + + try: # try acq time from detector + acquisition_period = md["frame_time"] except: try: - acquisition_period = md['acquire period'] - except: - uid = md['uid'] - acquisition_period = float( db[uid]['start']['acquire period'] ) - - + acquisition_period = md["acquire period"] + except: + uid = md["uid"] + acquisition_period = float(db[uid]["start"]["acquire period"]) + s = [] - s.append( 'UID: %s'%uid ) ###line 1, for uid - s.append('Sample: %s'%md['sample'] ) ####line 2 sample - s.append('Data Acquisition From: %s To: %s'%(md['start_time'], md['stop_time']))####line 3 Data Acquisition time - s.append( 'Measurement: %s'%md['Measurement'] ) ####line 4 'Measurement - - #print( md['incident_wavelength'], int(md['number of images']), md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) - #print(acquisition_period) - s.append( 'Wavelength: %s A | Num of Image: %d | Exposure time: %s ms | Acquire period: %s ms'%( md['incident_wavelength'], int(md['number of images']),round(float(exposuretime)*1000,4), round(float( acquisition_period )*1000,4) ) ) ####line 5 'lamda... - - s.append( 'Detector-Sample Distance: %s m| FeedBack Mode: x -> %s & y -> %s| Shutter Mode: %s'%( - md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) ) ####line 6 'Detector-Sample Distance.. - if self.report_type == 'saxs': - s7= 'Beam Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) - elif self.report_type == 'gi_saxs': - s7= ('Incident Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) + - ' || ' + - 'Reflect Center: [%s, %s] (pixel)'%(md['beam_refl_center_x'], md['beam_refl_center_y']) ) - elif self.report_type == 'ang_saxs' or self.report_type == 'gi_waxs' : - s7= 'Beam Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) + s.append("UID: %s" % uid) ###line 1, for uid + s.append("Sample: %s" % md["sample"]) ####line 2 sample + s.append( + "Data Acquisition From: %s To: %s" % (md["start_time"], md["stop_time"]) + ) ####line 3 Data Acquisition time + s.append("Measurement: %s" % md["Measurement"]) ####line 4 'Measurement + + # print( md['incident_wavelength'], int(md['number of images']), md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) + # print(acquisition_period) + s.append( + "Wavelength: %s A | Num of Image: %d | Exposure time: %s ms | Acquire period: %s ms" + % ( + md["incident_wavelength"], + int(md["number of images"]), + round(float(exposuretime) * 1000, 4), + round(float(acquisition_period) * 1000, 4), + ) + ) ####line 5 'lamda... + + s.append( + "Detector-Sample Distance: %s m| FeedBack Mode: x -> %s & y -> %s| Shutter Mode: %s" + % (md["detector_distance"], md["feedback_x"], md["feedback_y"], md["shutter mode"]) + ) ####line 6 'Detector-Sample Distance.. + if self.report_type == "saxs": + s7 = "Beam Center: [%s, %s] (pixel)" % (md["beam_center_x"], md["beam_center_y"]) + elif self.report_type == "gi_saxs": + s7 = ( + "Incident Center: [%s, %s] (pixel)" % (md["beam_center_x"], md["beam_center_y"]) + + " || " + + "Reflect Center: [%s, %s] (pixel)" % (md["beam_refl_center_x"], md["beam_refl_center_y"]) + ) + elif self.report_type == "ang_saxs" or self.report_type == "gi_waxs": + s7 = "Beam Center: [%s, %s] (pixel)" % (md["beam_center_x"], md["beam_center_y"]) else: - s7 = '' - - s7 += ' || ' + 'BadLen: %s'%len(md['bad_frame_list']) - s7 += ' || ' + 'Transmission: %s'%md['transmission'] - s.append( s7 ) ####line 7 'Beam center... - m = 'Mask file: %s'%md['mask_file'] + ' || ' + 'ROI mask file: %s'%md['roi_mask_file'] - #s.append( 'Mask file: %s'%md['mask_file'] ) ####line 8 mask filename - #s.append( ) ####line 8 mask filename + s7 = "" + + s7 += " || " + "BadLen: %s" % len(md["bad_frame_list"]) + s7 += " || " + "Transmission: %s" % md["transmission"] + s.append(s7) ####line 7 'Beam center... + m = "Mask file: %s" % md["mask_file"] + " || " + "ROI mask file: %s" % md["roi_mask_file"] + # s.append( 'Mask file: %s'%md['mask_file'] ) ####line 8 mask filename + # s.append( ) ####line 8 mask filename s.append(m) - + if self.res_h5_filename is not None: self.data_dir_ = self.data_dir + self.res_h5_filename else: - self.data_dir_ = self.data_dir - s.append( 'Analysis Results Dir: %s'%self.data_dir_ ) ####line 9 results folder - - - s.append( 'Metadata Dir: %s.csv-&.pkl'%self.metafile ) ####line 10 metadata folder + self.data_dir_ = self.data_dir + s.append("Analysis Results Dir: %s" % self.data_dir_) ####line 9 results folder + + s.append("Metadata Dir: %s.csv-&.pkl" % self.metafile) ####line 10 metadata folder try: - s.append( 'Pipeline notebook: %s'%md['NOTEBOOK_FULL_PATH'] ) ####line 11 notebook folder + s.append("Pipeline notebook: %s" % md["NOTEBOOK_FULL_PATH"]) ####line 11 notebook folder except: pass - #print( 'here' ) - line =1 - for s_ in s: - add_one_line_string( c, s_, top -ds*line , left=30,fontsize = fontsize ) - line += 1 - + # print( 'here' ) + line = 1 + for s_ in s: + add_one_line_string(c, s_, top - ds * line, left=30, fontsize=fontsize) + line += 1 + if new_page: c.showPage() c.save() - - def report_static( self, top=560, new_page=False, iq_fit=False): - '''create the static analysis report - two images: - average intensity image - circular average - - ''' - #add sub-title, static images - - c= self.c + + def report_static(self, top=560, new_page=False, iq_fit=False): + """create the static analysis report + two images: + average intensity image + circular average + + """ + # add sub-title, static images + + c = self.c c.setFont("Helvetica", 20) - uid=self.uid - - ds = 220 - self.sub_title_num +=1 - c.drawString(10, top, "%s. Static Analysis"%self.sub_title_num ) #add title + uid = self.uid - #add average image + ds = 220 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Static Analysis" % self.sub_title_num) # add title + + # add average image c.setFont("Helvetica", 14) - - imgf = self.avg_img_file - - if self.report_type == 'saxs': + + imgf = self.avg_img_file + + if self.report_type == "saxs": ipos = 60 - dshift=0 - elif self.report_type == 'gi_saxs': + dshift = 0 + elif self.report_type == "gi_saxs": ipos = 200 - dshift= 140 - elif self.report_type == 'ang_saxs': + dshift = 140 + elif self.report_type == "ang_saxs": ipos = 200 - dshift= 140 + dshift = 140 else: ipos = 200 - dshift= 140 - - - add_image_string( c, imgf, self.data_dir, img_left= ipos, img_top=top-ds, img_height=180, - str1_left=90 + dshift, str1_top = top-35,str1='Average Intensity Image', - str2_left = 80 + dshift, str2_top = top -230 ) - - #add q_Iq - if self.report_type == 'saxs': - imgf = self.qiq_file - #print(imgf) + dshift = 140 + + add_image_string( + c, + imgf, + self.data_dir, + img_left=ipos, + img_top=top - ds, + img_height=180, + str1_left=90 + dshift, + str1_top=top - 35, + str1="Average Intensity Image", + str2_left=80 + dshift, + str2_top=top - 230, + ) + + # add q_Iq + if self.report_type == "saxs": + imgf = self.qiq_file + # print(imgf) if iq_fit: - imgf = self.qiq_fit_file - label = 'Circular Average' + imgf = self.qiq_fit_file + label = "Circular Average" lab_pos = 390 fn_pos = 320 - add_image_string( c, imgf, self.data_dir, img_left=320, img_top=top-ds, img_height=180, - str1_left=lab_pos, str1_top = top-35,str1=label, - str2_left = fn_pos, str2_top = top -230 ) + add_image_string( + c, + imgf, + self.data_dir, + img_left=320, + img_top=top - ds, + img_height=180, + str1_left=lab_pos, + str1_top=top - 35, + str1=label, + str2_left=fn_pos, + str2_top=top - 230, + ) else: if False: - imgf = self.ROI_on_Iq_file #self.qr_1d_file - label = 'Qr-1D' + imgf = self.ROI_on_Iq_file # self.qr_1d_file + label = "Qr-1D" lab_pos = 420 - fn_pos = 350 - - add_image_string( c, imgf, self.data_dir, img_left=320, img_top=top-ds, img_height=180, - str1_left=lab_pos, str1_top = top-35,str1=label, - str2_left = fn_pos, str2_top = top -230 ) + fn_pos = 350 + + add_image_string( + c, + imgf, + self.data_dir, + img_left=320, + img_top=top - ds, + img_height=180, + str1_left=lab_pos, + str1_top=top - 35, + str1=label, + str2_left=fn_pos, + str2_top=top - 230, + ) if new_page: c.showPage() - c.save() - - def report_ROI( self, top= 300, new_page=False): - '''create the static analysis report - two images: - ROI on average intensity image - ROI on circular average - ''' - uid=self.uid - c= self.c - #add sub-title, static images + c.save() + + def report_ROI(self, top=300, new_page=False): + """create the static analysis report + two images: + ROI on average intensity image + ROI on circular average + """ + uid = self.uid + c = self.c + # add sub-title, static images c.setFillColor(black) - c.setFont("Helvetica", 20) + c.setFont("Helvetica", 20) ds = 230 - self.sub_title_num +=1 - c.drawString(10, top, "%s. Define of ROI"%self.sub_title_num ) #add title - #add ROI on image - c.setFont("Helvetica", 14) + self.sub_title_num += 1 + c.drawString(10, top, "%s. Define of ROI" % self.sub_title_num) # add title + # add ROI on image + c.setFont("Helvetica", 14) imgf = self.ROI_on_img_file - label = 'ROI on Image' - add_image_string( c, imgf, self.data_dir, img_left= 60, img_top=top - ds*1.15, img_height=240, - str1_left=110, str1_top = top-35,str1=label, - str2_left = 60, str2_top = top -260 ) - - #add q_Iq - if self.report_type == 'saxs' or self.report_type == 'gi_saxs' or self.report_type == 'ang_saxs': - imgf = self.ROI_on_Iq_file - img_height=180 - img_left,img_top =320, top - ds - str1_left, str1_top,str1= 420, top- 35, 'ROI on Iq' - str2_left, str2_top = 350, top- 260 - - #print ( imgf ) - - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - - + label = "ROI on Image" + add_image_string( + c, + imgf, + self.data_dir, + img_left=60, + img_top=top - ds * 1.15, + img_height=240, + str1_left=110, + str1_top=top - 35, + str1=label, + str2_left=60, + str2_top=top - 260, + ) + + # add q_Iq + if self.report_type == "saxs" or self.report_type == "gi_saxs" or self.report_type == "ang_saxs": + imgf = self.ROI_on_Iq_file + img_height = 180 + img_left, img_top = 320, top - ds + str1_left, str1_top, str1 = 420, top - 35, "ROI on Iq" + str2_left, str2_top = 350, top - 260 + + # print ( imgf ) + + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + if new_page: c.showPage() - c.save() - - - def report_time_analysis( self, top= 720,new_page=False): - '''create the time dependent analysis report - four images: - each image total intensity as a function of time - iq~t - waterfall - mean intensity of each ROI as a function of time - ''' - c= self.c - uid=self.uid - #add sub-title, Time-dependent plot + c.save() + + def report_time_analysis(self, top=720, new_page=False): + """create the time dependent analysis report + four images: + each image total intensity as a function of time + iq~t + waterfall + mean intensity of each ROI as a function of time + """ + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - top1=top + top1 = top ds = 20 - self.sub_title_num +=1 - c.drawString(10, top, "%s. Time Dependent Plot"%self.sub_title_num ) #add title + self.sub_title_num += 1 + c.drawString(10, top, "%s. Time Dependent Plot" % self.sub_title_num) # add title c.setFont("Helvetica", 14) - - + top = top1 - 160 - - #add img_sum_t - if self.report_type == 'saxs': + + # add img_sum_t + if self.report_type == "saxs": ipos = 80 - elif self.report_type == 'gi_saxs': + elif self.report_type == "gi_saxs": + ipos = 200 + elif self.report_type == "ang_saxs": ipos = 200 - elif self.report_type == 'ang_saxs': - ipos = 200 else: - ipos = 200 - - imgf = self.img_sum_t_file - img_height=140 - img_left,img_top = ipos, top - str1_left, str1_top,str1= ipos + 60, top1 - 20 , 'img sum ~ t' - str2_left, str2_top = ipos, top- 5 - - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - - #plot iq~t - if self.report_type == 'saxs': + ipos = 200 + + imgf = self.img_sum_t_file + img_height = 140 + img_left, img_top = ipos, top + str1_left, str1_top, str1 = ipos + 60, top1 - 20, "img sum ~ t" + str2_left, str2_top = ipos, top - 5 + + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + # plot iq~t + if self.report_type == "saxs": imgf = self.Iq_t_file image = self.data_dir + imgf - - - img_height=140 - img_left,img_top = 350, top - str1_left, str1_top,str1= 420, top1-20 , 'iq ~ t' - str2_left, str2_top = 360, top- 5 - - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - elif self.report_type == 'gi_saxs': + + img_height = 140 + img_left, img_top = 350, top + str1_left, str1_top, str1 = 420, top1 - 20, "iq ~ t" + str2_left, str2_top = 360, top - 5 + + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + elif self.report_type == "gi_saxs": pass - + top = top1 - 340 - #add waterfall plot + # add waterfall plot imgf = self.wat_file - - img_height=160 - img_left,img_top = 80, top - str1_left, str1_top,str1= 140, top + img_height, 'waterfall plot' - str2_left, str2_top = 80, top- 5 - - if self.report_type != 'ang_saxs': - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) + + img_height = 160 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 140, top + img_height, "waterfall plot" + str2_left, str2_top = 80, top - 5 + + if self.report_type != "ang_saxs": + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) else: pass - #add mean-intensity of each roi + # add mean-intensity of each roi imgf = self.Mean_inten_t_file - - img_height=160 - img_left,img_top = 360, top - str1_left, str1_top,str1= 330, top + img_height, 'Mean-intensity-of-each-ROI' - str2_left, str2_top = 310, top- 5 - if self.report_type != 'ang_saxs': - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) + + img_height = 160 + img_left, img_top = 360, top + str1_left, str1_top, str1 = 330, top + img_height, "Mean-intensity-of-each-ROI" + str2_left, str2_top = 310, top - 5 + if self.report_type != "ang_saxs": + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) else: pass - + if new_page: c.showPage() c.save() - - def report_oavs( self, top= 350, oavs_file=None, new_page=False): - '''create the oavs images report - - ''' - - c= self.c - uid=self.uid - #add sub-title, One Time Correlation Function + + def report_oavs(self, top=350, oavs_file=None, new_page=False): + """create the oavs images report""" + + c = self.c + uid = self.uid + # add sub-title, One Time Correlation Function c.setFillColor(black) c.setFont("Helvetica", 20) ds = 20 - self.sub_title_num +=1 - c.drawString(10, top, "%s. OAVS Images"%self.sub_title_num ) #add title + self.sub_title_num += 1 + c.drawString(10, top, "%s. OAVS Images" % self.sub_title_num) # add title c.setFont("Helvetica", 14) - #add g2 plot + # add g2 plot if oavs_file is None: imgf = self.oavs_file else: - imgf = oavs_file - #print(self.data_dir + imgf) + imgf = oavs_file + # print(self.data_dir + imgf) if os.path.exists(self.data_dir + imgf): - im = Image.open( self.data_dir+imgf ) - ratio = float(im.size[1])/im.size[0] - img_width = 600 - img_height= img_width * ratio #img_height - #width = height/ratio - + im = Image.open(self.data_dir + imgf) + ratio = float(im.size[1]) / im.size[0] + img_width = 600 + img_height = img_width * ratio # img_height + # width = height/ratio + if not new_page: - #img_height= 550 + # img_height= 550 top = top - 600 - str2_left, str2_top = 80, top - 400 - img_left,img_top = 1, top - - if new_page: - #img_height= 150 + str2_left, str2_top = 80, top - 400 + img_left, img_top = 1, top + + if new_page: + # img_height= 150 top = top - img_height - 50 - str2_left, str2_top = 80, top - 50 - img_left,img_top = 10, top - - str1_left, str1_top, str1= 150, top + img_height, 'OAVS images' - img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top, return_=True ) - #print( imgf,self.data_dir ) + str2_left, str2_top = 80, top - 50 + img_left, img_top = 10, top + + str1_left, str1_top, str1 = 150, top + img_height, "OAVS images" + img_width = add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + return_=True, + ) + # print( imgf,self.data_dir ) print(img_width, img_height) - - - - def report_one_time( self, top= 350, g2_fit_file=None, q_rate_file=None, new_page=False): - '''create the one time correlation function report - Two images: - One Time Correlation Function with fit - q-rate fit - ''' - - c= self.c - uid=self.uid - #add sub-title, One Time Correlation Function + + def report_one_time(self, top=350, g2_fit_file=None, q_rate_file=None, new_page=False): + """create the one time correlation function report + Two images: + One Time Correlation Function with fit + q-rate fit + """ + + c = self.c + uid = self.uid + # add sub-title, One Time Correlation Function c.setFillColor(black) c.setFont("Helvetica", 20) ds = 20 - self.sub_title_num +=1 - c.drawString(10, top, "%s. One Time Correlation Function"%self.sub_title_num ) #add title + self.sub_title_num += 1 + c.drawString(10, top, "%s. One Time Correlation Function" % self.sub_title_num) # add title c.setFont("Helvetica", 14) - #add g2 plot + # add g2 plot if g2_fit_file is None: imgf = self.g2_fit_file else: - imgf = g2_fit_file - - if self.report_type != 'ang_saxs': - img_height= 300 - top = top - 320 - str2_left, str2_top = 80, top- 0 - + imgf = g2_fit_file + + if self.report_type != "ang_saxs": + img_height = 300 + top = top - 320 + str2_left, str2_top = 80, top - 0 + else: - img_height= 550 + img_height = 550 top = top - 600 - str2_left, str2_top = 80, top - 400 - #add one_time caculation - img_left,img_top = 1, top + str2_left, str2_top = 80, top - 400 + # add one_time caculation + img_left, img_top = 1, top if self.g2_fit_new_page or self.g2_new_page: - - img_height= 550 + + img_height = 550 top = top - 250 - str2_left, str2_top = 80, top - 0 - img_left,img_top = 60, top - - str1_left, str1_top,str1= 150, top + img_height, 'g2 fit plot' - img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top, return_=True ) - #print( imgf,self.data_dir ) - #add g2 plot fit - #print(self.q_rate_file ) - if os.path.isfile( self.data_dir + self.q_rate_file ): - #print('here') - #print(self.q_rate_file ) - top = top + 70 # + str2_left, str2_top = 80, top - 0 + img_left, img_top = 60, top + + str1_left, str1_top, str1 = 150, top + img_height, "g2 fit plot" + img_width = add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + return_=True, + ) + # print( imgf,self.data_dir ) + # add g2 plot fit + # print(self.q_rate_file ) + if os.path.isfile(self.data_dir + self.q_rate_file): + # print('here') + # print(self.q_rate_file ) + top = top + 70 # if q_rate_file is None: imgf = self.q_rate_file else: - imgf = q_rate_file - if self.report_type != 'ang_saxs': - #print(img_width) + imgf = q_rate_file + if self.report_type != "ang_saxs": + # print(img_width) if img_width > 400: - img_height = 90 + img_height = 90 else: - img_height= 180 - img_left,img_top = img_width-10, top #350, top - str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 - str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot' + img_height = 180 + img_left, img_top = img_width - 10, top # 350, top + str2_left, str2_top = img_width + 50, top - 5 # 380, top - 5 + str1_left, str1_top, str1 = 450, top + 230, "q-rate fit plot" else: - img_height= 300 - img_left,img_top = 350, top - 150 - str2_left, str2_top = 380, top - 5 - str1_left, str1_top,str1= 450, top + 180, 'q-rate fit plot' + img_height = 300 + img_left, img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top, str1 = 450, top + 180, "q-rate fit plot" if self.g2_fit_new_page or self.g2_new_page: - top = top - 200 - img_height= 180 - img_left,img_top = 350, top + top = top - 200 + img_height = 180 + img_left, img_top = 350, top str2_left, str2_top = 380, top - 5 - str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot' - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - + str1_left, str1_top, str1 = 450, top + 230, "q-rate fit plot" + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + else: top = top + 320 # if q_rate_file is None: imgf = self.q_rate_loglog_file else: - imgf = q_rate_file - #print(imgf) - if self.report_type != 'ang_saxs': - #print(img_width) + imgf = q_rate_file + # print(imgf) + if self.report_type != "ang_saxs": + # print(img_width) if img_width > 400: - img_height = 90/2 + img_height = 90 / 2 else: - img_height= 180 /2 - img_left,img_top = img_width-10, top #350, top - str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 - str1_left, str1_top,str1= 450, top + 230, 'q-rate loglog plot' + img_height = 180 / 2 + img_left, img_top = img_width - 10, top # 350, top + str2_left, str2_top = img_width + 50, top - 5 # 380, top - 5 + str1_left, str1_top, str1 = 450, top + 230, "q-rate loglog plot" else: - img_height= 300/2 - img_left,img_top = 350, top - 150 - str2_left, str2_top = 380, top - 5 - str1_left, str1_top,str1= 450, top + 180, 'q-rate loglog plot' + img_height = 300 / 2 + img_left, img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top, str1 = 450, top + 180, "q-rate loglog plot" if self.g2_fit_new_page or self.g2_new_page: - top = top - 200 + 50 - img_height= 180 / 1.5 - img_left,img_top = 350, top + top = top - 200 + 50 + img_height = 180 / 1.5 + img_left, img_top = 350, top str2_left, str2_top = 380, top - 5 - str1_left, str1_top,str1= 450, top + 120, 'q-rate loglog plot' - - #print('here') - - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - - - top = top - 100 # + str1_left, str1_top, str1 = 450, top + 120, "q-rate loglog plot" + + # print('here') + + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + + top = top - 100 # if q_rate_file is None: imgf = self.g2_q_fitpara_file else: - imgf = q_rate_file - if self.report_type != 'ang_saxs': - #print(img_width) + imgf = q_rate_file + if self.report_type != "ang_saxs": + # print(img_width) if img_width > 400: - img_height = 90 + img_height = 90 else: - img_height= 180 - img_left,img_top = img_width-10, top #350, top - str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 - str1_left, str1_top,str1= 450, top + 230, 'g2 fit para' + img_height = 180 + img_left, img_top = img_width - 10, top # 350, top + str2_left, str2_top = img_width + 50, top - 5 # 380, top - 5 + str1_left, str1_top, str1 = 450, top + 230, "g2 fit para" else: - img_height= 300 - img_left,img_top = 350, top - 150 - str2_left, str2_top = 380, top - 5 - str1_left, str1_top,str1= 450, top + 180, 'g2 fit para' + img_height = 300 + img_left, img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top, str1 = 450, top + 180, "g2 fit para" if self.g2_fit_new_page or self.g2_new_page: - top = top - 200 - img_height= 180 * 1.5 - img_left,img_top = 350, top + top = top - 200 + img_height = 180 * 1.5 + img_left, img_top = 350, top str2_left, str2_top = 380, top - 5 - str1_left, str1_top,str1= 450, top + 280, 'g2 fit para' - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - - + str1_left, str1_top, str1 = 450, top + 280, "g2 fit para" + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + if new_page: c.showPage() c.save() - - - def report_mulit_one_time( self, top= 720,new_page=False): - '''create the mulit one time correlation function report - Two images: - One Time Correlation Function with fit - q-rate fit - ''' - c= self.c - uid=self.uid - #add sub-title, One Time Correlation Function + def report_mulit_one_time(self, top=720, new_page=False): + """create the mulit one time correlation function report + Two images: + One Time Correlation Function with fit + q-rate fit + """ + c = self.c + uid = self.uid + # add sub-title, One Time Correlation Function c.setFillColor(black) c.setFont("Helvetica", 20) ds = 20 - self.sub_title_num +=1 - c.drawString(10, top, "%s. One Time Correlation Function"%self.sub_title_num ) #add title + self.sub_title_num += 1 + c.drawString(10, top, "%s. One Time Correlation Function" % self.sub_title_num) # add title c.setFont("Helvetica", 14) - #add g2 plot + # add g2 plot top = top - 320 imgf = self.g2_fit_file image = self.data_dir + imgf if not os.path.exists(image): image = self.data_dir + self.g2_file - im = Image.open( image ) - ratio = float(im.size[1])/im.size[0] - height= 300 - c.drawImage( image, 1, top, width= height/ratio,height=height, mask= 'auto') - #c.drawImage( image, 1, top, width= height/ratio,height=height, mask= None ) + im = Image.open(image) + ratio = float(im.size[1]) / im.size[0] + height = 300 + c.drawImage(image, 1, top, width=height / ratio, height=height, mask="auto") + # c.drawImage( image, 1, top, width= height/ratio,height=height, mask= None ) c.setFont("Helvetica", 16) - c.setFillColor( blue) - c.drawString( 150, top + height , 'g2 fit plot' ) + c.setFillColor(blue) + c.drawString(150, top + height, "g2 fit plot") c.setFont("Helvetica", 12) - c.setFillColor(red) - c.drawString( 80, top- 0, 'filename: %s'%imgf ) + c.setFillColor(red) + c.drawString(80, top - 0, "filename: %s" % imgf) - #add g2 plot fit - top = top + 70 # + # add g2 plot fit + top = top + 70 # imgf = self.q_rate_file image = self.data_dir + imgf - if os.path.exists(image): - im = Image.open( image ) - ratio = float(im.size[1])/im.size[0] - height= 180 - c.drawImage( image, 350, top, width= height/ratio,height=height,mask= 'auto') + if os.path.exists(image): + im = Image.open(image) + ratio = float(im.size[1]) / im.size[0] + height = 180 + c.drawImage(image, 350, top, width=height / ratio, height=height, mask="auto") c.setFont("Helvetica", 16) - c.setFillColor( blue) - c.drawString( 450, top + 230, 'q-rate fit plot' ) + c.setFillColor(blue) + c.drawString(450, top + 230, "q-rate fit plot") c.setFont("Helvetica", 12) - c.setFillColor(red) - c.drawString( 380, top- 5, 'filename: %s'%imgf ) - + c.setFillColor(red) + c.drawString(380, top - 5, "filename: %s" % imgf) + if new_page: c.showPage() c.save() - - - def report_two_time( self, top= 720, new_page=False): - '''create the one time correlation function report - Two images: - Two Time Correlation Function - two one-time correlatoin function from multi-one-time and from diagonal two-time - ''' - c= self.c - uid=self.uid - #add sub-title, Time-dependent plot + def report_two_time(self, top=720, new_page=False): + """create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + """ + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - + ds = 20 - self.sub_title_num +=1 - c.drawString(10, top, "%s. Two Time Correlation Function"%self.sub_title_num ) #add title + self.sub_title_num += 1 + c.drawString(10, top, "%s. Two Time Correlation Function" % self.sub_title_num) # add title c.setFont("Helvetica", 14) - - top1=top + + top1 = top top = top1 - 330 - #add q_Iq_t + # add q_Iq_t imgf = self.two_time_file - - img_height= 300 - img_left,img_top = 80, top - str1_left, str1_top,str1= 180, top + 300, 'two time correlation function' + + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 180, top + 300, "two time correlation function" str2_left, str2_top = 180, top - 10 - img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top, return_=True ) + img_width = add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + return_=True, + ) - - top = top - 340 - #add q_Iq_t + # add q_Iq_t imgf = self.two_g2_file - - if True:#not self.two_g2_new_page: - - img_height= 300 - img_left,img_top = 100 -70, top - str1_left, str1_top,str1= 210-70, top + 310, 'compared g2' - str2_left, str2_top = 180-70, top - 10 - + + if True: # not self.two_g2_new_page: + + img_height = 300 + img_left, img_top = 100 - 70, top + str1_left, str1_top, str1 = 210 - 70, top + 310, "compared g2" + str2_left, str2_top = 180 - 70, top - 10 + if self.two_g2_new_page: - img_left,img_top = 100, top - print(imgf ) - img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top,return_=True ) - #print(imgf) - top = top + 50 + img_left, img_top = 100, top + print(imgf) + img_width = add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + return_=True, + ) + # print(imgf) + top = top + 50 imgf = self.q_rate_two_time_fit_file - #print(imgf, img_width, top) + # print(imgf, img_width, top) if img_width < 400: - img_height= 140 - img_left,img_top = 350, top + 30 + img_height = 140 + img_left, img_top = 350, top + 30 str2_left, str2_top = 380 - 80, top - 5 - str1_left, str1_top,str1= 450 -80 , top + 230, 'q-rate fit from two-time' + str1_left, str1_top, str1 = 450 - 80, top + 230, "q-rate fit from two-time" else: - img_height = 90 - img_left,img_top = img_width-10, top #350, top - str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 - str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot' - - - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - - - - - + img_height = 90 + img_left, img_top = img_width - 10, top # 350, top + str2_left, str2_top = img_width + 50, top - 5 # 380, top - 5 + str1_left, str1_top, str1 = 450, top + 230, "q-rate fit plot" + + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + if new_page: c.showPage() - c.save() - - def report_four_time( self, top= 720, new_page=False): - '''create the one time correlation function report - Two images: - Two Time Correlation Function - two one-time correlatoin function from multi-one-time and from diagonal two-time - ''' - - c= self.c - uid=self.uid - #add sub-title, Time-dependent plot + c.save() + + def report_four_time(self, top=720, new_page=False): + """create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + """ + + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - + ds = 20 - self.sub_title_num +=1 - c.drawString(10, top, "%s. Four Time Correlation Function"%self.sub_title_num ) #add title + self.sub_title_num += 1 + c.drawString(10, top, "%s. Four Time Correlation Function" % self.sub_title_num) # add title c.setFont("Helvetica", 14) - - top1=top + + top1 = top top = top1 - 330 - #add q_Iq_t + # add q_Iq_t imgf = self.four_time_file - + if not self.g4_new_page: - img_height= 300 - img_left,img_top = 80, top - str1_left, str1_top,str1= 180, top + 300, 'four time correlation function' - str2_left, str2_top = 180, top - 10 + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 180, top + 300, "four time correlation function" + str2_left, str2_top = 180, top - 10 else: - img_height= 600 + img_height = 600 top -= 300 - img_left,img_top = 80, top - str1_left, str1_top,str1= 180, top + 300-250, 'four time correlation function' - str2_left, str2_top = 180, top - 10 - - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - + img_left, img_top = 80, top + str1_left, str1_top, str1 = 180, top + 300 - 250, "four time correlation function" + str2_left, str2_top = 180, top - 10 + + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) if new_page: c.showPage() - c.save() - - def report_dose( self, top= 720, new_page=False): - - c= self.c - uid=self.uid - #add sub-title, Time-dependent plot - c.setFont("Helvetica", 20) + c.save() + + def report_dose(self, top=720, new_page=False): + + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) ds = 20 - self.sub_title_num +=1 - c.drawString(10, top, "%s. Dose Analysis"%self.sub_title_num ) #add title + self.sub_title_num += 1 + c.drawString(10, top, "%s. Dose Analysis" % self.sub_title_num) # add title c.setFont("Helvetica", 14) - - top1=top + + top1 = top top = top1 - 530 - #add q_Iq_t + # add q_Iq_t imgf = self.dose_file - - img_height= 500 - img_left,img_top = 80, top - str1_left, str1_top,str1= 180, top + 500, 'dose analysis' + + img_height = 500 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 180, top + 500, "dose analysis" str2_left, str2_top = 180, top - 10 - - #print( self.data_dir + self.dose_file) - if os.path.exists( self.data_dir + imgf): - #print( self.dose_file) - im = Image.open( self.data_dir + imgf ) - ratio = float(im.size[1])/im.size[0] - width = img_height/ratio - #print(width) - if width >450: - img_height = 450*ratio - + + # print( self.data_dir + self.dose_file) + if os.path.exists(self.data_dir + imgf): + # print( self.dose_file) + im = Image.open(self.data_dir + imgf) + ratio = float(im.size[1]) / im.size[0] + width = img_height / ratio + # print(width) + if width > 450: + img_height = 450 * ratio + if self.dose_file_new_page: - #img_left,img_top = 180, top - img_left,img_top = 100, top - - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - + # img_left,img_top = 180, top + img_left, img_top = 100, top + + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) if new_page: c.showPage() - c.save() - - - - def report_flow_pv_g2( self, top= 720, new_page=False): - '''create the one time correlation function report - Two images: - Two Time Correlation Function - two one-time correlatoin function from multi-one-time and from diagonal two-time - ''' - c= self.c - uid=self.uid - #add sub-title, Time-dependent plot + c.save() + + def report_flow_pv_g2(self, top=720, new_page=False): + """create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + """ + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - + ds = 20 - self.sub_title_num +=1 - c.drawString(10, top, "%s. Flow One Time Analysis"%self.sub_title_num ) #add title + self.sub_title_num += 1 + c.drawString(10, top, "%s. Flow One Time Analysis" % self.sub_title_num) # add title c.setFont("Helvetica", 14) - - top1=top + + top1 = top top = top1 - 330 - #add xsvs fit - + # add xsvs fit + imgf = self.flow_g2v image = self.data_dir + imgf - - img_height= 300 - img_left,img_top = 80, top - str1_left, str1_top,str1= 210, top + 300, 'XPCS Vertical Flow' + + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 210, top + 300, "XPCS Vertical Flow" str2_left, str2_top = 180, top - 10 - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - - imgf = self.flow_g2v_rate_fit - img_height= 200 - img_left,img_top = 350, top +50 - str1_left, str1_top,str1= 210, top + 300, '' + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + imgf = self.flow_g2v_rate_fit + img_height = 200 + img_left, img_top = 350, top + 50 + str1_left, str1_top, str1 = 210, top + 300, "" str2_left, str2_top = 350, top - 10 + 50 - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - - - + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + top = top - 340 - #add contrast fit - imgf = self.flow_g2p - img_height= 300 - img_left,img_top = 80, top - str1_left, str1_top,str1= 210, top + 300, 'XPCS Parallel Flow' + # add contrast fit + imgf = self.flow_g2p + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 210, top + 300, "XPCS Parallel Flow" str2_left, str2_top = 180, top - 10 - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + imgf = self.flow_g2p_rate_fit - img_height= 200 - img_left,img_top = 350, top +50 - str1_left, str1_top,str1= 210, top + 300, '' + img_height = 200 + img_left, img_top = 350, top + 50 + str1_left, str1_top, str1 = 210, top + 300, "" str2_left, str2_top = 350, top - 10 + 50 - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) if new_page: c.showPage() - c.save() - - - def report_flow_pv_two_time( self, top= 720, new_page=False): - '''create the two time correlation function report - Two images: - Two Time Correlation Function - two one-time correlatoin function from multi-one-time and from diagonal two-time - ''' - c= self.c - uid=self.uid - #add sub-title, Time-dependent plot + c.save() + + def report_flow_pv_two_time(self, top=720, new_page=False): + """create the two time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + """ + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - + ds = 20 - self.sub_title_num +=1 - c.drawString(10, top, "%s. Flow One &Two Time Comparison"%self.sub_title_num ) #add title + self.sub_title_num += 1 + c.drawString(10, top, "%s. Flow One &Two Time Comparison" % self.sub_title_num) # add title c.setFont("Helvetica", 14) - - top1=top + + top1 = top top = top1 - 330 - #add xsvs fit + # add xsvs fit - if False: imgf = self.two_time image = self.data_dir + imgf - img_height= 300 - img_left,img_top = 80, top - str1_left, str1_top,str1= 210, top + 300, 'Two_time' + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 210, top + 300, "Two_time" str2_left, str2_top = 180, top - 10 - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - - - imgf = self.flow_g2_g2b_p - img_height= 300 - img_left,img_top = 80, top - str1_left, str1_top,str1= 210, top + 300, 'XPCS Vertical Flow by two-time' + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + + imgf = self.flow_g2_g2b_p + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 210, top + 300, "XPCS Vertical Flow by two-time" str2_left, str2_top = 180, top - 10 - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - - imgf = self.flow_g2bp_rate_fit - img_height= 200 - img_left,img_top = 350, top +50 - str1_left, str1_top,str1= 210, top + 300, '' + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + imgf = self.flow_g2bp_rate_fit + img_height = 200 + img_left, img_top = 350, top + 50 + str1_left, str1_top, str1 = 210, top + 300, "" str2_left, str2_top = 350, top - 10 + 50 - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - - - + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + top = top - 340 - #add contrast fit - imgf = self.flow_g2_g2b_v - - img_height= 300 - img_left,img_top = 80, top - str1_left, str1_top,str1= 210, top + 300, 'XPCS Parallel Flow by two-time' + # add contrast fit + imgf = self.flow_g2_g2b_v + + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 210, top + 300, "XPCS Parallel Flow by two-time" str2_left, str2_top = 180, top - 10 - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) - - imgf = self.flow_g2bv_rate_fit - img_height= 200 - img_left,img_top = 350, top +50 - str1_left, str1_top,str1= 210, top + 300, '' + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + imgf = self.flow_g2bv_rate_fit + img_height = 200 + img_left, img_top = 350, top + 50 + str1_left, str1_top, str1 = 210, top + 300, "" str2_left, str2_top = 350, top - 10 + 50 - add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, - str1_left, str1_top,str1, - str2_left, str2_top ) + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) if new_page: c.showPage() - c.save() - - def report_xsvs( self, top= 720, new_page=False): - '''create the one time correlation function report - Two images: - Two Time Correlation Function - two one-time correlatoin function from multi-one-time and from diagonal two-time - ''' - c= self.c - uid=self.uid - #add sub-title, Time-dependent plot + c.save() + + def report_xsvs(self, top=720, new_page=False): + """create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + """ + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - + ds = 20 - self.sub_title_num +=1 - c.drawString(10, top, "%s. Visibility Analysis"%self.sub_title_num ) #add title + self.sub_title_num += 1 + c.drawString(10, top, "%s. Visibility Analysis" % self.sub_title_num) # add title c.setFont("Helvetica", 14) top = top - 330 - #add xsvs fit - imgf = self.xsvs_fit_file - add_image_string( c, imgf, self.data_dir, img_left=100, img_top=top, img_height= 300, - - str1_left=210, str1_top = top +300,str1='XSVS_Fit_by_Negtive_Binomal Function', - str2_left = 180, str2_top = top -10 ) - - #add contrast fit - top = top -340 + # add xsvs fit + imgf = self.xsvs_fit_file + add_image_string( + c, + imgf, + self.data_dir, + img_left=100, + img_top=top, + img_height=300, + str1_left=210, + str1_top=top + 300, + str1="XSVS_Fit_by_Negtive_Binomal Function", + str2_left=180, + str2_top=top - 10, + ) + + # add contrast fit + top = top - 340 imgf = self.contrast_file - add_image_string( c, imgf, self.data_dir, img_left=100, img_top=top, img_height= 300, - - str1_left=210, str1_top = top + 310,str1='contrast get from xsvs and xpcs', - str2_left = 180, str2_top = top -10 ) - + add_image_string( + c, + imgf, + self.data_dir, + img_left=100, + img_top=top, + img_height=300, + str1_left=210, + str1_top=top + 310, + str1="contrast get from xsvs and xpcs", + str2_left=180, + str2_top=top - 10, + ) + if False: - top1=top + top1 = top top = top1 - 330 - #add xsvs fit + # add xsvs fit imgf = self.xsvs_fit_file image = self.data_dir + imgf - im = Image.open( image ) - ratio = float(im.size[1])/im.size[0] - height= 300 - c.drawImage( image, 100, top, width= height/ratio,height=height,mask=None) + im = Image.open(image) + ratio = float(im.size[1]) / im.size[0] + height = 300 + c.drawImage(image, 100, top, width=height / ratio, height=height, mask=None) c.setFont("Helvetica", 16) - c.setFillColor( blue) - c.drawString( 210, top + 300 , 'XSVS_Fit_by_Negtive_Binomal Function' ) + c.setFillColor(blue) + c.drawString(210, top + 300, "XSVS_Fit_by_Negtive_Binomal Function") c.setFont("Helvetica", 12) - c.setFillColor(red) - c.drawString( 180, top- 10, 'filename: %s'%imgf ) + c.setFillColor(red) + c.drawString(180, top - 10, "filename: %s" % imgf) top = top - 340 - #add contrast fit + # add contrast fit imgf = self.contrast_file image = self.data_dir + imgf - im = Image.open( image ) - ratio = float(im.size[1])/im.size[0] - height= 300 - c.drawImage( image, 100, top, width= height/ratio,height=height,mask=None) + im = Image.open(image) + ratio = float(im.size[1]) / im.size[0] + height = 300 + c.drawImage(image, 100, top, width=height / ratio, height=height, mask=None) c.setFont("Helvetica", 16) - c.setFillColor( blue) - c.drawString( 210, top + 310, 'contrast get from xsvs and xpcs' ) + c.setFillColor(blue) + c.drawString(210, top + 310, "contrast get from xsvs and xpcs") c.setFont("Helvetica", 12) - c.setFillColor(red) - c.drawString( 180, top- 10, 'filename: %s'%imgf ) - + c.setFillColor(red) + c.drawString(180, top - 10, "filename: %s" % imgf) if new_page: c.showPage() c.save() - - - def new_page(self): - c=self.c + c = self.c c.showPage() - + def save_page(self): - c=self.c + c = self.c c.save() - + def done(self): out_dir = self.out_dir - uid=self.uid - + uid = self.uid + print() - print('*'*40) - print ('The pdf report is created with filename as: %s'%(self.filename )) - print('*'*40) - - - - -def create_multi_pdf_reports_for_uids( uids, g2, data_dir, report_type='saxs', append_name='' ): - ''' Aug 16, YG@CHX-NSLS-II - Create multi pdf reports for each uid in uids - uids: a list of uids to be reported - g2: a dictionary, {run_num: sub_num: g2_of_each_uid} - data_dir: - Save pdf report in data dir - ''' - for key in list( g2.keys()): - i=1 - for sub_key in list( g2[key].keys() ): + print("*" * 40) + print("The pdf report is created with filename as: %s" % (self.filename)) + print("*" * 40) + + +def create_multi_pdf_reports_for_uids(uids, g2, data_dir, report_type="saxs", append_name=""): + """Aug 16, YG@CHX-NSLS-II + Create multi pdf reports for each uid in uids + uids: a list of uids to be reported + g2: a dictionary, {run_num: sub_num: g2_of_each_uid} + data_dir: + Save pdf report in data dir + """ + for key in list(g2.keys()): + i = 1 + for sub_key in list(g2[key].keys()): uid_i = uids[key][sub_key] - data_dir_ = os.path.join( data_dir, '%s/'%uid_i ) - if append_name!='': + data_dir_ = os.path.join(data_dir, "%s/" % uid_i) + if append_name != "": uid_name = uid_i + append_name else: uid_name = uid_i - c= create_pdf_report( data_dir_, uid_i,data_dir, - report_type=report_type, filename="XPCS_Analysis_Report_for_uid=%s.pdf"%uid_name ) - #Page one: Meta-data/Iq-Q/ROI + c = create_pdf_report( + data_dir_, + uid_i, + data_dir, + report_type=report_type, + filename="XPCS_Analysis_Report_for_uid=%s.pdf" % uid_name, + ) + # Page one: Meta-data/Iq-Q/ROI c.report_header(page=1) - c.report_meta( top=730) - #c.report_one_time( top= 500 ) - #c.new_page() - if report_type =='flow': - c.report_flow_pv_g2( top= 720) + c.report_meta(top=730) + # c.report_one_time( top= 500 ) + # c.new_page() + if report_type == "flow": + c.report_flow_pv_g2(top=720) c.save_page() - c.done() - - - - - -def create_one_pdf_reports_for_uids( uids, g2, data_dir, filename='all_in_one', report_type='saxs' ): - ''' Aug 16, YG@CHX-NSLS-II - Create one pdf reports for each uid in uids - uids: a list of uids to be reported - g2: a dictionary, {run_num: sub_num: g2_of_each_uid} - data_dir: - Save pdf report in data dir - ''' - c= create_pdf_report( data_dir, uid=filename, out_dir=data_dir, load=False, report_type= report_type) - page=1 - - for key in list( g2.keys()): - i=1 - for sub_key in list( g2[key].keys() ): + c.done() + + +def create_one_pdf_reports_for_uids(uids, g2, data_dir, filename="all_in_one", report_type="saxs"): + """Aug 16, YG@CHX-NSLS-II + Create one pdf reports for each uid in uids + uids: a list of uids to be reported + g2: a dictionary, {run_num: sub_num: g2_of_each_uid} + data_dir: + Save pdf report in data dir + """ + c = create_pdf_report(data_dir, uid=filename, out_dir=data_dir, load=False, report_type=report_type) + page = 1 + + for key in list(g2.keys()): + i = 1 + for sub_key in list(g2[key].keys()): uid_i = uids[key][sub_key] - data_dir_ = os.path.join( data_dir, '%s/'%uid_i) - + data_dir_ = os.path.join(data_dir, "%s/" % uid_i) + c.uid = uid_i c.data_dir = data_dir_ - c.load_metadata() - - #Page one: Meta-data/Iq-Q/ROI + c.load_metadata() + + # Page one: Meta-data/Iq-Q/ROI c.report_header(page=page) - c.report_meta( top=730) - c.report_one_time( top= 500 ) + c.report_meta(top=730) + c.report_one_time(top=500) c.new_page() page += 1 - c.uid = filename + c.uid = filename c.save_page() - c.done() - - -def save_res_h5( full_uid, data_dir, save_two_time=False ): - ''' - YG. Nov 10, 2016 - save the results to a h5 file - will save meta data/avg_img/mask/roi (ring_mask or box_mask)/ - will aslo save multi-tau calculated one-time correlation function g2/taus - will also save two-time derived one-time correlation function /g2b/taus2 - if save_two_time if True, will save two-time correaltion function - ''' - with h5py.File(data_dir + '%s.h5'%full_uid, 'w') as hf: - #write meta data - meta_data = hf.create_dataset("meta_data", (1,), dtype='i') - for key in md.keys(): + c.done() + + +def save_res_h5(full_uid, data_dir, save_two_time=False): + """ + YG. Nov 10, 2016 + save the results to a h5 file + will save meta data/avg_img/mask/roi (ring_mask or box_mask)/ + will aslo save multi-tau calculated one-time correlation function g2/taus + will also save two-time derived one-time correlation function /g2b/taus2 + if save_two_time if True, will save two-time correaltion function + """ + with h5py.File(data_dir + "%s.h5" % full_uid, "w") as hf: + # write meta data + meta_data = hf.create_dataset("meta_data", (1,), dtype="i") + for key in md.keys(): try: meta_data.attrs[key] = md[key] except: pass - shapes = md['avg_img'].shape - avg_h5 = hf.create_dataset("avg_img", data = md['avg_img'] ) - mask_h5 = hf.create_dataset("mask", data = md['mask'] ) - roi_h5 = hf.create_dataset("roi", data = md['ring_mask'] ) + shapes = md["avg_img"].shape + avg_h5 = hf.create_dataset("avg_img", data=md["avg_img"]) + mask_h5 = hf.create_dataset("mask", data=md["mask"]) + roi_h5 = hf.create_dataset("roi", data=md["ring_mask"]) - g2_h5 = hf.create_dataset("g2", data = g2 ) - taus_h5 = hf.create_dataset("taus", data = taus ) + g2_h5 = hf.create_dataset("g2", data=g2) + taus_h5 = hf.create_dataset("taus", data=taus) if save_two_time: - g12b_h5 = hf.create_dataset("g12b", data = g12b ) - g2b_h5 = hf.create_dataset("g2b", data = g2b ) - taus2_h5 = hf.create_dataset("taus2", data = taus2 ) + g12b_h5 = hf.create_dataset("g12b", data=g12b) + g2b_h5 = hf.create_dataset("g2b", data=g2b) + taus2_h5 = hf.create_dataset("taus2", data=taus2) + def printname(name): - print (name) -#f.visit(printname) -def load_res_h5( full_uid, data_dir ): - '''YG. Nov 10, 2016 - load results from a h5 file - will load meta data/avg_img/mask/roi (ring_mask or box_mask)/ - will aslo load multi-tau calculated one-time correlation function g2/taus - will also load two-time derived one-time correlation function /g2b/taus2 - if save_two_time if True, will load two-time correaltion function - - ''' - with h5py.File(data_dir + '%s.h5'%full_uid, 'r') as hf: - meta_data_h5 = hf.get( "meta_data" ) + print(name) + + +# f.visit(printname) +def load_res_h5(full_uid, data_dir): + """YG. Nov 10, 2016 + load results from a h5 file + will load meta data/avg_img/mask/roi (ring_mask or box_mask)/ + will aslo load multi-tau calculated one-time correlation function g2/taus + will also load two-time derived one-time correlation function /g2b/taus2 + if save_two_time if True, will load two-time correaltion function + + """ + with h5py.File(data_dir + "%s.h5" % full_uid, "r") as hf: + meta_data_h5 = hf.get("meta_data") meta_data = {} - for att in meta_data_h5.attrs: - meta_data[att] = meta_data_h5.attrs[att] - avg_h5 = np.array( hf.get("avg_img" ) ) - mask_h5 = np.array(hf.get("mask" )) - roi_h5 =np.array( hf.get("roi" )) - g2_h5 = np.array( hf.get("g2" )) - taus_h5 = np.array( hf.get("taus" )) - g2b_h5 = np.array( hf.get("g2b")) - taus2_h5 = np.array( hf.get("taus2")) - if 'g12b' in hf: - g12b_h5 = np.array( hf.get("g12b")) - - if 'g12b' in hf: - return meta_data, avg_h5, mask_h5,roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5, g12b - else: - return meta_data, avg_h5, mask_h5,roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5 - - - - -def make_pdf_report( data_dir, uid, pdf_out_dir, pdf_filename, username, - run_fit_form, run_one_time, run_two_time, run_four_time, run_xsvs, run_dose=None, - oavs_report = False,report_type='saxs', md=None,report_invariant=False, return_class=False, res_h5_filename=None - ): - + for att in meta_data_h5.attrs: + meta_data[att] = meta_data_h5.attrs[att] + avg_h5 = np.array(hf.get("avg_img")) + mask_h5 = np.array(hf.get("mask")) + roi_h5 = np.array(hf.get("roi")) + g2_h5 = np.array(hf.get("g2")) + taus_h5 = np.array(hf.get("taus")) + g2b_h5 = np.array(hf.get("g2b")) + taus2_h5 = np.array(hf.get("taus2")) + if "g12b" in hf: + g12b_h5 = np.array(hf.get("g12b")) + + if "g12b" in hf: + return meta_data, avg_h5, mask_h5, roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5, g12b + else: + return meta_data, avg_h5, mask_h5, roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5 + + +def make_pdf_report( + data_dir, + uid, + pdf_out_dir, + pdf_filename, + username, + run_fit_form, + run_one_time, + run_two_time, + run_four_time, + run_xsvs, + run_dose=None, + oavs_report=False, + report_type="saxs", + md=None, + report_invariant=False, + return_class=False, + res_h5_filename=None, +): + if uid.startswith("uid=") or uid.startswith("Uid="): uid = uid[4:] - c= create_pdf_report( data_dir, uid, pdf_out_dir, filename= pdf_filename, user= username, report_type=report_type, md = md, res_h5_filename=res_h5_filename ) - #print( c.md) - #Page one: Meta-data/Iq-Q/ROI + c = create_pdf_report( + data_dir, + uid, + pdf_out_dir, + filename=pdf_filename, + user=username, + report_type=report_type, + md=md, + res_h5_filename=res_h5_filename, + ) + # print( c.md) + # Page one: Meta-data/Iq-Q/ROI c.report_header(page=1) - c.report_meta( top=730) - c.report_static( top=540, iq_fit = run_fit_form ) - c.report_ROI( top= 290) - page = 1 + c.report_meta(top=730) + c.report_static(top=540, iq_fit=run_fit_form) + c.report_ROI(top=290) + page = 1 ##Page Two for plot OVAS images if oavs_report is True if oavs_report: c.new_page() - c.report_header(page=2) - c.report_oavs( top= 720, oavs_file=None, new_page=True) - page +=1 - - #Page Two: img~t/iq~t/waterfall/mean~t/g2/rate~q + c.report_header(page=2) + c.report_oavs(top=720, oavs_file=None, new_page=True) + page += 1 + + # Page Two: img~t/iq~t/waterfall/mean~t/g2/rate~q c.new_page() - page +=1 + page += 1 c.report_header(page=page) - - if c.report_type != 'ang_saxs': - c.report_time_analysis( top= 720) - if run_one_time: - if c.report_type != 'ang_saxs': + + if c.report_type != "ang_saxs": + c.report_time_analysis(top=720) + if run_one_time: + if c.report_type != "ang_saxs": top = 350 - else: + else: top = 500 if c.g2_fit_new_page: c.new_page() - page +=1 + page += 1 top = 720 - c.report_one_time( top= top ) - - - #self.two_g2_new_page = True - #self.g2_fit_new_page = True - - #Page Three: two-time/two g2 - + c.report_one_time(top=top) + + # self.two_g2_new_page = True + # self.g2_fit_new_page = True + + # Page Three: two-time/two g2 + if run_two_time: c.new_page() - page +=1 - c.report_header(page= page) - c.report_two_time( top= 720 ) + page += 1 + c.report_header(page=page) + c.report_two_time(top=720) if run_four_time: c.new_page() - page +=1 - c.report_header(page= page) - c.report_four_time( top= 720 ) + page += 1 + c.report_header(page=page) + c.report_four_time(top=720) if run_xsvs: c.new_page() - page +=1 - c.report_header(page= page) - c.report_xsvs( top= 720 ) + page += 1 + c.report_header(page=page) + c.report_xsvs(top=720) if run_dose: c.new_page() - page +=1 - c.report_header(page= page) - c.report_dose( top = 702) + page += 1 + c.report_header(page=page) + c.report_dose(top=702) if report_invariant: c.new_page() - page +=1 - c.report_header(page= page) - c.report_invariant( top = 702) - + page += 1 + c.report_header(page=page) + c.report_invariant(top=702) + else: - c.report_flow_pv_g2( top= 720, new_page= True) - c.report_flow_pv_two_time( top= 720, new_page= True ) + c.report_flow_pv_g2(top=720, new_page=True) + c.report_flow_pv_two_time(top=720, new_page=True) c.save_page() - c.done() + c.done() if return_class: return c - - + + ###################################### ###Deal with saving dict to hdf5 file def save_dict_to_hdf5(dic, filename): """ .... """ - with h5py.File(filename, 'w') as h5file: - recursively_save_dict_contents_to_group(h5file, '/', dic) + with h5py.File(filename, "w") as h5file: + recursively_save_dict_contents_to_group(h5file, "/", dic) + def load_dict_from_hdf5(filename): """ .... """ - with h5py.File(filename, 'r') as h5file: - return recursively_load_dict_contents_from_group(h5file, '/') - -def recursively_save_dict_contents_to_group( h5file, path, dic): + with h5py.File(filename, "r") as h5file: + return recursively_load_dict_contents_from_group(h5file, "/") + + +def recursively_save_dict_contents_to_group(h5file, path, dic): """...""" # argument type checking if not isinstance(dic, dict): - raise ValueError("must provide a dictionary") - + raise ValueError("must provide a dictionary") + if not isinstance(path, str): raise ValueError("path must be a string") if not isinstance(h5file, h5py._hl.files.File): raise ValueError("must be an open h5py file") # save items to the hdf5 file for key, item in dic.items(): - #print(key,item) + # print(key,item) key = str(key) if isinstance(item, list): item = np.array(item) - #print(item) + # print(item) if not isinstance(key, str): raise ValueError("dict keys must be strings to save to hdf5") # save strings, numpy.int64, and numpy.float64 types - if isinstance(item, (np.int64, np.float64, str, float, np.float32,int)): # removed depreciated np.float LW @06/11/2023 - #print( 'here' ) + if isinstance( + item, (np.int64, np.float64, str, float, np.float32, int) + ): # removed depreciated np.float LW @06/11/2023 + # print( 'here' ) h5file[path + key] = item if not h5file[path + key].value == item: - raise ValueError('The data representation in the HDF5 file does not match the original dict.') + raise ValueError("The data representation in the HDF5 file does not match the original dict.") # save numpy arrays - elif isinstance(item, np.ndarray): + elif isinstance(item, np.ndarray): try: h5file[path + key] = item except: - item = np.array(item).astype('|S9') + item = np.array(item).astype("|S9") h5file[path + key] = item if not np.array_equal(h5file[path + key].value, item): - raise ValueError('The data representation in the HDF5 file does not match the original dict.') + raise ValueError("The data representation in the HDF5 file does not match the original dict.") # save dictionaries elif isinstance(item, dict): - recursively_save_dict_contents_to_group(h5file, path + key + '/', item) + recursively_save_dict_contents_to_group(h5file, path + key + "/", item) # other types cannot be saved and will result in an error else: - #print(item) - raise ValueError('Cannot save %s type.' % type(item)) - - -def recursively_load_dict_contents_from_group( h5file, path): + # print(item) + raise ValueError("Cannot save %s type." % type(item)) + + +def recursively_load_dict_contents_from_group(h5file, path): """...""" ans = {} for key, item in h5file[path].items(): if isinstance(item, h5py._hl.dataset.Dataset): ans[key] = item.value elif isinstance(item, h5py._hl.group.Group): - ans[key] = recursively_load_dict_contents_from_group(h5file, path + key + '/') - return ans - - -def export_xpcs_results_to_h5( filename, export_dir, export_dict ): - ''' - YG. May 10, 2017 - save the results to a h5 file - - YG. Aug28 2019 modify, add try in export pandas to h5 to fit the new version of pandas - - filename: the h5 file name - export_dir: the exported file folder - export_dict: dict, with keys as md, g2, g4 et.al. - ''' - + ans[key] = recursively_load_dict_contents_from_group(h5file, path + key + "/") + return ans + + +def export_xpcs_results_to_h5(filename, export_dir, export_dict): + """ + YG. May 10, 2017 + save the results to a h5 file + + YG. Aug28 2019 modify, add try in export pandas to h5 to fit the new version of pandas + + filename: the h5 file name + export_dir: the exported file folder + export_dict: dict, with keys as md, g2, g4 et.al. + """ + fout = export_dir + filename - dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p'] - dict_nest=['taus_uids', 'g2_uids' ] - - with h5py.File(fout, 'w') as hf: - flag=False - for key in list(export_dict.keys()): - #print( key ) - if key in dicts: #=='md' or key == 'qval_dict': - md= export_dict[key] - meta_data = hf.create_dataset( key, (1,), dtype='i') - for key_ in md.keys(): + dicts = ["md", "qval_dict", "qval_dict_v", "qval_dict_p"] + dict_nest = ["taus_uids", "g2_uids"] + + with h5py.File(fout, "w") as hf: + flag = False + for key in list(export_dict.keys()): + # print( key ) + if key in dicts: # =='md' or key == 'qval_dict': + md = export_dict[key] + meta_data = hf.create_dataset(key, (1,), dtype="i") + for key_ in md.keys(): try: - meta_data.attrs[str(key_)] = md[key_] + meta_data.attrs[str(key_)] = md[key_] except: - pass + pass elif key in dict_nest: - #print(key) + # print(key) try: - recursively_save_dict_contents_to_group(hf, '/%s/'%key, export_dict[key] ) + recursively_save_dict_contents_to_group(hf, "/%s/" % key, export_dict[key]) except: - print("Can't export the key: %s in this dataset."%key) - - elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + print("Can't export the key: %s in this dataset." % key) + + elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: try: - export_dict[key].to_hdf( fout, key=key, mode='a', ) + export_dict[key].to_hdf( + fout, + key=key, + mode="a", + ) except: - flag=True + flag = True else: - data = hf.create_dataset(key, data = export_dict[key] ) - #add this fill line at Octo 27, 2017 + data = hf.create_dataset(key, data=export_dict[key]) + # add this fill line at Octo 27, 2017 data.set_fill_value = np.nan - if flag: - for key in list(export_dict.keys()): - if key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: - export_dict[key].to_hdf( fout, key=key, mode='a', ) - - print( 'The xpcs analysis results are exported to %s with filename as %s'%(export_dir , filename)) - - - -def extract_xpcs_results_from_h5_debug( filename, import_dir, onekey=None, exclude_keys=None ): - ''' - YG. Dec 22, 2016 - extract data from a h5 file - - filename: the h5 file name - import_dir: the imported file folder - onekey: string, if not None, only extract that key - return: - extact_dict: dict, with keys as md, g2, g4 et.al. - ''' - - import pandas as pds + if flag: + for key in list(export_dict.keys()): + if key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + export_dict[key].to_hdf( + fout, + key=key, + mode="a", + ) + + print("The xpcs analysis results are exported to %s with filename as %s" % (export_dir, filename)) + + +def extract_xpcs_results_from_h5_debug(filename, import_dir, onekey=None, exclude_keys=None): + """ + YG. Dec 22, 2016 + extract data from a h5 file + + filename: the h5 file name + import_dir: the imported file folder + onekey: string, if not None, only extract that key + return: + extact_dict: dict, with keys as md, g2, g4 et.al. + """ + import numpy as np - extract_dict = {} + import pandas as pds + + extract_dict = {} fp = import_dir + filename pds_type_keys = [] - dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p', 'taus_uids', 'g2_uids'] + dicts = ["md", "qval_dict", "qval_dict_v", "qval_dict_p", "taus_uids", "g2_uids"] if exclude_keys is None: - exclude_keys =[] + exclude_keys = [] if onekey is None: for k in dicts: extract_dict[k] = {} - with h5py.File( fp, 'r') as hf: - #print (list( hf.keys()) ) - for key in list( hf.keys()): + with h5py.File(fp, "r") as hf: + # print (list( hf.keys()) ) + for key in list(hf.keys()): if key not in exclude_keys: if key in dicts: - extract_dict[key] = recursively_load_dict_contents_from_group(hf, '/' + key + '/') - elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: - pds_type_keys.append( key ) - else: - extract_dict[key] = np.array( hf.get( key )) + extract_dict[key] = recursively_load_dict_contents_from_group(hf, "/" + key + "/") + elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + pds_type_keys.append(key) + else: + extract_dict[key] = np.array(hf.get(key)) for key in pds_type_keys: if key not in exclude_keys: - extract_dict[key] = pds.read_hdf(fp, key= key ) + extract_dict[key] = pds.read_hdf(fp, key=key) else: - if onekey == 'md': - with h5py.File( fp, 'r') as hf: - md = hf.get('md') + if onekey == "md": + with h5py.File(fp, "r") as hf: + md = hf.get("md") for key in list(md.attrs): - extract_dict['md'][key] = md.attrs[key] - elif onekey in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: - extract_dict[onekey] = pds.read_hdf(fp, key= onekey ) + extract_dict["md"][key] = md.attrs[key] + elif onekey in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + extract_dict[onekey] = pds.read_hdf(fp, key=onekey) else: try: - with h5py.File( fp, 'r') as hf: - extract_dict[onekey] = np.array( hf.get( onekey )) + with h5py.File(fp, "r") as hf: + extract_dict[onekey] = np.array(hf.get(onekey)) except: - print("The %s dosen't have this %s value"%(fp, onekey) ) + print("The %s dosen't have this %s value" % (fp, onekey)) return extract_dict +def export_xpcs_results_to_h5_old(filename, export_dir, export_dict): + """ + YG. Dec 22, 2016 + save the results to a h5 file + filename: the h5 file name + export_dir: the exported file folder + export_dict: dict, with keys as md, g2, g4 et.al. + """ + import h5py - - - - -def export_xpcs_results_to_h5_old( filename, export_dir, export_dict ): - ''' - YG. Dec 22, 2016 - save the results to a h5 file - - filename: the h5 file name - export_dir: the exported file folder - export_dict: dict, with keys as md, g2, g4 et.al. - ''' - import h5py fout = export_dir + filename - dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p'] #{k1: { }} - dict_nest= ['taus_uids', 'g2_uids'] #{k1: {k2:}} - with h5py.File(fout, 'w') as hf: - for key in list(export_dict.keys()): - #print( key ) - if key in dicts: #=='md' or key == 'qval_dict': - md= export_dict[key] - meta_data = hf.create_dataset( key, (1,), dtype='i') - for key_ in md.keys(): + dicts = ["md", "qval_dict", "qval_dict_v", "qval_dict_p"] # {k1: { }} + dict_nest = ["taus_uids", "g2_uids"] # {k1: {k2:}} + with h5py.File(fout, "w") as hf: + for key in list(export_dict.keys()): + # print( key ) + if key in dicts: # =='md' or key == 'qval_dict': + md = export_dict[key] + meta_data = hf.create_dataset(key, (1,), dtype="i") + for key_ in md.keys(): try: - meta_data.attrs[str(key_)] = md[key_] + meta_data.attrs[str(key_)] = md[key_] except: - pass + pass elif key in dict_nest: k1 = export_dict[key] - v1 = hf.create_dataset( key, (1,), dtype='i') + v1 = hf.create_dataset(key, (1,), dtype="i") for k2 in k1.keys(): - - v2 = hf.create_dataset( k1, (1,), dtype='i') - - - elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: - export_dict[key].to_hdf( fout, key=key, mode='a', ) + + v2 = hf.create_dataset(k1, (1,), dtype="i") + + elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + export_dict[key].to_hdf( + fout, + key=key, + mode="a", + ) else: - data = hf.create_dataset(key, data = export_dict[key] ) - print( 'The xpcs analysis results are exported to %s with filename as %s'%(export_dir , filename)) - - -def extract_xpcs_results_from_h5( filename, import_dir, onekey=None, exclude_keys=None, two_time_qindex = None ): - ''' - YG. Dec 22, 2016 - extract data from a h5 file - - filename: the h5 file name - import_dir: the imported file folder - onekey: string, if not None, only extract that key - return: - extact_dict: dict, with keys as md, g2, g4 et.al. - ''' - - import pandas as pds + data = hf.create_dataset(key, data=export_dict[key]) + print("The xpcs analysis results are exported to %s with filename as %s" % (export_dir, filename)) + + +def extract_xpcs_results_from_h5(filename, import_dir, onekey=None, exclude_keys=None, two_time_qindex=None): + """ + YG. Dec 22, 2016 + extract data from a h5 file + + filename: the h5 file name + import_dir: the imported file folder + onekey: string, if not None, only extract that key + return: + extact_dict: dict, with keys as md, g2, g4 et.al. + """ + import numpy as np - extract_dict = {} + import pandas as pds + + extract_dict = {} fp = import_dir + filename pds_type_keys = [] - dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p', 'taus_uids', 'g2_uids'] + dicts = ["md", "qval_dict", "qval_dict_v", "qval_dict_p", "taus_uids", "g2_uids"] if exclude_keys is None: - exclude_keys =[] + exclude_keys = [] if onekey is None: for k in dicts: extract_dict[k] = {} - with h5py.File( fp, 'r') as hf: - #print (list( hf.keys()) ) - for key in list( hf.keys()): + with h5py.File(fp, "r") as hf: + # print (list( hf.keys()) ) + for key in list(hf.keys()): if key not in exclude_keys: if key in dicts: md = hf.get(key) for key_ in list(md.attrs): - #print(key, key_) - if key == 'qval_dict': - extract_dict[key][int(key_)] = md.attrs[key_] + # print(key, key_) + if key == "qval_dict": + extract_dict[key][int(key_)] = md.attrs[key_] else: - extract_dict[key][key_] = md.attrs[key_] + extract_dict[key][key_] = md.attrs[key_] - elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: - pds_type_keys.append( key ) - else: - if key == 'g12b': + elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + pds_type_keys.append(key) + else: + if key == "g12b": if two_time_qindex is not None: - extract_dict[key] = hf.get( key )[:,:,two_time_qindex] + extract_dict[key] = hf.get(key)[:, :, two_time_qindex] else: - extract_dict[key] = hf.get( key )[:] - else: - extract_dict[key] = hf.get( key )[:] #np.array( hf.get( key )) - + extract_dict[key] = hf.get(key)[:] + else: + extract_dict[key] = hf.get(key)[:] # np.array( hf.get( key )) + for key in pds_type_keys: if key not in exclude_keys: - extract_dict[key] = pds.read_hdf(fp, key= key ) + extract_dict[key] = pds.read_hdf(fp, key=key) else: - if onekey == 'md': - with h5py.File( fp, 'r') as hf: - md = hf.get('md') + if onekey == "md": + with h5py.File(fp, "r") as hf: + md = hf.get("md") for key in list(md.attrs): - extract_dict['md'][key] = md.attrs[key] - elif onekey in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: - extract_dict[onekey] = pds.read_hdf(fp, key= onekey ) + extract_dict["md"][key] = md.attrs[key] + elif onekey in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + extract_dict[onekey] = pds.read_hdf(fp, key=onekey) else: try: - with h5py.File( fp, 'r') as hf: - if key == 'g12b': + with h5py.File(fp, "r") as hf: + if key == "g12b": if two_time_qindex is not None: - extract_dict[key] = hf.get( key )[:,:,two_time_qindex] + extract_dict[key] = hf.get(key)[:, :, two_time_qindex] else: - extract_dict[key] = hf.get( key )[:] - else: - extract_dict[key] = hf.get( key )[:] #np.array( hf.get( key )) - #extract_dict[onekey] = hf.get( key )[:] #np.array( hf.get( onekey )) + extract_dict[key] = hf.get(key)[:] + else: + extract_dict[key] = hf.get(key)[:] # np.array( hf.get( key )) + # extract_dict[onekey] = hf.get( key )[:] #np.array( hf.get( onekey )) except: - print("The %s dosen't have this %s value"%(fp, onekey) ) + print("The %s dosen't have this %s value" % (fp, onekey)) return extract_dict +def read_contrast_from_multi_csv(uids, path, times=None, unit=20): + """Y.G. 2016, Dec 23, load contrast from multi csv file""" - - -def read_contrast_from_multi_csv( uids, path, times=None, unit=20 ): - '''Y.G. 2016, Dec 23, load contrast from multi csv file''' - - N = len(uids) + N = len(uids) if times is None: - times = np.array( [0] + [2**i for i in range(N)] )*unit - for i, uid in enumerate(uids): - fp = path + uid + '/uid=%s--contrast_factorL.csv'%uid - contri = pds.read_csv( fp ) - qs = np.array( contri[contri.columns[0]] ) - contri_ = np.array( contri[contri.columns[1]] ) - if i ==0: - contr = np.zeros( [ N, len(qs)]) + times = np.array([0] + [2**i for i in range(N)]) * unit + for i, uid in enumerate(uids): + fp = path + uid + "/uid=%s--contrast_factorL.csv" % uid + contri = pds.read_csv(fp) + qs = np.array(contri[contri.columns[0]]) + contri_ = np.array(contri[contri.columns[1]]) + if i == 0: + contr = np.zeros([N, len(qs)]) contr[i] = contri_ - #contr[0,:] = np.nan + # contr[0,:] = np.nan return times, contr -def read_contrast_from_multi_h5( uids, path, ): - '''Y.G. 2016, Dec 23, load contrast from multi h5 file''' - N = len(uids) - times_xsvs = np.zeros( N ) - for i, uid in enumerate(uids): - t = extract_xpcs_results_from_h5( filename= '%s_Res.h5'%uid, - import_dir = path + uid + '/' , onekey= 'times_xsvs') - times_xsvs[i] = t['times_xsvs'][0] - contri = extract_xpcs_results_from_h5( filename= '%s_Res.h5'%uid, - import_dir = path + uid + '/' , onekey= 'contrast_factorL') - if i ==0: - contr = np.zeros( [ N, contri['contrast_factorL'].shape[0] ]) - contr[i] = contri['contrast_factorL'][:,0] - return times_xsvs, contr - - - - - +def read_contrast_from_multi_h5( + uids, + path, +): + """Y.G. 2016, Dec 23, load contrast from multi h5 file""" + N = len(uids) + times_xsvs = np.zeros(N) + for i, uid in enumerate(uids): + t = extract_xpcs_results_from_h5( + filename="%s_Res.h5" % uid, import_dir=path + uid + "/", onekey="times_xsvs" + ) + times_xsvs[i] = t["times_xsvs"][0] + contri = extract_xpcs_results_from_h5( + filename="%s_Res.h5" % uid, import_dir=path + uid + "/", onekey="contrast_factorL" + ) + if i == 0: + contr = np.zeros([N, contri["contrast_factorL"].shape[0]]) + contr[i] = contri["contrast_factorL"][:, 0] + return times_xsvs, contr diff --git a/pyCHX/chx_Fitters2D.py b/pyCHX/chx_Fitters2D.py index 852502e..a2f27ab 100644 --- a/pyCHX/chx_Fitters2D.py +++ b/pyCHX/chx_Fitters2D.py @@ -11,10 +11,7 @@ def gauss_func(x, xc, amp, sigma, baseline): def gauss2D_func(x, y, xc, amp, sigmax, yc, sigmay, baseline): - return ( - amp * np.exp(-((x - xc) ** 2) / 2.0 / sigmax**2) * np.exp(-((y - yc) ** 2) / 2.0 / sigmay**2) - + baseline - ) + return amp * np.exp(-((x - xc) ** 2) / 2.0 / sigmax**2) * np.exp(-((y - yc) ** 2) / 2.0 / sigmay**2) + baseline def extract_param(bestfits, key): diff --git a/pyCHX/chx_compress.py b/pyCHX/chx_compress.py index 706cf7e..16e9881 100644 --- a/pyCHX/chx_compress.py +++ b/pyCHX/chx_compress.py @@ -1,610 +1,858 @@ -import os,shutil +import gc +import os +import pickle as pkl +import shutil +import struct +import sys +from contextlib import closing from glob import iglob +from multiprocessing import Pool +import dill import matplotlib.pyplot as plt -from pyCHX.chx_libs import (np, roi, time, datetime, os, getpass, db, - LogNorm, RUN_GUI) -from pyCHX.chx_generic_functions import (create_time_slice,get_detector, get_sid_filenames, - load_data,reverse_updown,rot90_clockwise, get_eigerImage_per_file,copy_data,delete_data, ) - -import struct -from tqdm import tqdm -from contextlib import closing - -from multiprocessing import Pool -import dill -import sys -import gc -import pickle as pkl # imports handler from CHX # this is where the decision is made whether or not to use dask -#from chxtools.handlers import EigerImages, EigerHandler -from eiger_io.fs_handler import EigerHandler,EigerImages +# from chxtools.handlers import EigerImages, EigerHandler +from eiger_io.fs_handler import EigerHandler, EigerImages +from tqdm import tqdm -def run_dill_encoded(what): +from pyCHX.chx_generic_functions import ( + copy_data, + create_time_slice, + delete_data, + get_detector, + get_eigerImage_per_file, + get_sid_filenames, + load_data, + reverse_updown, + rot90_clockwise, +) +from pyCHX.chx_libs import RUN_GUI, LogNorm, datetime, db, getpass, np, os, roi, time + + +def run_dill_encoded(what): fun, args = dill.loads(what) return fun(*args) -def apply_async(pool, fun, args, callback=None): - return pool.apply_async( run_dill_encoded, (dill.dumps((fun, args)),), callback= callback) + +def apply_async(pool, fun, args, callback=None): + return pool.apply_async(run_dill_encoded, (dill.dumps((fun, args)),), callback=callback) -def map_async(pool, fun, args ): - return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),)) +def map_async(pool, fun, args): + return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),)) - -def pass_FD(FD,n): - #FD.rdframe(n) + +def pass_FD(FD, n): + # FD.rdframe(n) try: FD.seekimg(n) except: pass return False -def go_through_FD(FD): - if not pass_FD(FD,FD.beg): + + +def go_through_FD(FD): + if not pass_FD(FD, FD.beg): for i in range(FD.beg, FD.end): - pass_FD(FD,i) + pass_FD(FD, i) else: pass - - - - - -def compress_eigerdata( images, mask, md, filename=None, force_compress=False, - bad_pixel_threshold=1e15, bad_pixel_low_threshold=0, - hot_pixel_threshold=2**30, nobytes=2,bins=1, bad_frame_list=None, - para_compress= False, num_sub=100, dtypes='uid',reverse =True, rot90=False, - num_max_para_process=500, with_pickle=False, direct_load_data=True, data_path=None, - images_per_file=100, copy_rawdata=True,new_path = '/tmp_data/data/'): - ''' + + +def compress_eigerdata( + images, + mask, + md, + filename=None, + force_compress=False, + bad_pixel_threshold=1e15, + bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, + nobytes=2, + bins=1, + bad_frame_list=None, + para_compress=False, + num_sub=100, + dtypes="uid", + reverse=True, + rot90=False, + num_max_para_process=500, + with_pickle=False, + direct_load_data=True, + data_path=None, + images_per_file=100, + copy_rawdata=True, + new_path="/tmp_data/data/", +): + """ Init 2016, YG@CHX DEV 2018, June, make images_per_file a dummy, will be determined by get_eigerImage_per_file if direct_load_data Add copy_rawdata opt. - - ''' - - end= len(images)//bins + + """ + + end = len(images) // bins if filename is None: - filename= '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%md['uid'] - if dtypes!= 'uid': - para_compress= False + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % md["uid"] + if dtypes != "uid": + para_compress = False else: if para_compress: - images='foo' - #para_compress= True - #print( dtypes ) - if direct_load_data: - images_per_file = get_eigerImage_per_file( data_path ) + images = "foo" + # para_compress= True + # print( dtypes ) + if direct_load_data: + images_per_file = get_eigerImage_per_file(data_path) if data_path is None: sud = get_sid_filenames(db[uid]) data_path = sud[2][0] if force_compress: - print ("Create a new compress file with filename as :%s."%filename) + print("Create a new compress file with filename as :%s." % filename) if para_compress: # stop connection to be before forking... (let it reset again) db.reg.disconnect() db.mds.reset_connection() - print( 'Using a multiprocess to compress the data.') - return para_compress_eigerdata( images, mask, md, filename, - bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, - bins=bins, num_sub=num_sub, dtypes=dtypes, rot90=rot90, - reverse=reverse, num_max_para_process=num_max_para_process, - with_pickle= with_pickle, direct_load_data= direct_load_data, - data_path=data_path,images_per_file=images_per_file,copy_rawdata=copy_rawdata,new_path=new_path) + print("Using a multiprocess to compress the data.") + return para_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + num_sub=num_sub, + dtypes=dtypes, + rot90=rot90, + reverse=reverse, + num_max_para_process=num_max_para_process, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + copy_rawdata=copy_rawdata, + new_path=new_path, + ) else: - return init_compress_eigerdata( images, mask, md, filename, - bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path, - images_per_file=images_per_file) + return init_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + ) else: - if not os.path.exists( filename ): - print ("Create a new compress file with filename as :%s."%filename) + if not os.path.exists(filename): + print("Create a new compress file with filename as :%s." % filename) if para_compress: - print( 'Using a multiprocess to compress the data.') - return para_compress_eigerdata( images, mask, md, filename, - bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins, - num_sub=num_sub, dtypes=dtypes, reverse=reverse,rot90=rot90, - num_max_para_process=num_max_para_process,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path,images_per_file=images_per_file,copy_rawdata=copy_rawdata) + print("Using a multiprocess to compress the data.") + return para_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + num_sub=num_sub, + dtypes=dtypes, + reverse=reverse, + rot90=rot90, + num_max_para_process=num_max_para_process, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + copy_rawdata=copy_rawdata, + ) else: - return init_compress_eigerdata( images, mask, md, filename, - bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold, nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path, images_per_file=images_per_file) - else: - print ("Using already created compressed file with filename as :%s."%filename) - beg=0 - return read_compressed_eigerdata( mask, filename, beg, end, - bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold ,bad_frame_list=bad_frame_list,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path, images_per_file=images_per_file) - - - -def read_compressed_eigerdata( mask, filename, beg, end, - bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, - bad_pixel_low_threshold=0,bad_frame_list=None,with_pickle= False, - direct_load_data=False,data_path=None,images_per_file=100): - ''' - Read already compress eiger data - Return - mask - avg_img - imsum - bad_frame_list - - ''' - #should use try and except instead of with_pickle in the future! + return init_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + ) + else: + print("Using already created compressed file with filename as :%s." % filename) + beg = 0 + return read_compressed_eigerdata( + mask, + filename, + beg, + end, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + bad_frame_list=bad_frame_list, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + ) + + +def read_compressed_eigerdata( + mask, + filename, + beg, + end, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + bad_frame_list=None, + with_pickle=False, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + Read already compress eiger data + Return + mask + avg_img + imsum + bad_frame_list + + """ + # should use try and except instead of with_pickle in the future! CAL = False if not with_pickle: CAL = True else: try: - mask, avg_img, imgsum, bad_frame_list_ = pkl.load( open(filename + '.pkl', 'rb' ) ) + mask, avg_img, imgsum, bad_frame_list_ = pkl.load(open(filename + ".pkl", "rb")) except: CAL = True - if CAL: - FD = Multifile( filename, beg, end) - imgsum = np.zeros( FD.end- FD.beg, dtype= np.float64 ) - avg_img = np.zeros( [FD.md['ncols'], FD.md['nrows'] ] , dtype= np.float64 ) - imgsum, bad_frame_list_ = get_each_frame_intensityc( FD, sampling = 1, - bad_pixel_threshold=bad_pixel_threshold, bad_pixel_low_threshold=bad_pixel_low_threshold, - hot_pixel_threshold=hot_pixel_threshold, plot_ = False, - bad_frame_list=bad_frame_list) - avg_img = get_avg_imgc( FD, beg=None,end=None,sampling = 1, plot_ = False,bad_frame_list=bad_frame_list_ ) - FD.FID.close() - - return mask, avg_img, imgsum, bad_frame_list_ - -def para_compress_eigerdata( images, mask, md, filename, num_sub=100, - bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, - bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='uid',reverse =True,rot90=False, - num_max_para_process=500, cpu_core_number=72, with_pickle=True, - direct_load_data=False, data_path=None,images_per_file=100, - copy_rawdata=True,new_path = '/tmp_data/data/'): - - data_path_ = data_path - if dtypes=='uid': - uid= md['uid'] #images + if CAL: + FD = Multifile(filename, beg, end) + imgsum = np.zeros(FD.end - FD.beg, dtype=np.float64) + avg_img = np.zeros([FD.md["ncols"], FD.md["nrows"]], dtype=np.float64) + imgsum, bad_frame_list_ = get_each_frame_intensityc( + FD, + sampling=1, + bad_pixel_threshold=bad_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + hot_pixel_threshold=hot_pixel_threshold, + plot_=False, + bad_frame_list=bad_frame_list, + ) + avg_img = get_avg_imgc(FD, beg=None, end=None, sampling=1, plot_=False, bad_frame_list=bad_frame_list_) + FD.FID.close() + + return mask, avg_img, imgsum, bad_frame_list_ + + +def para_compress_eigerdata( + images, + mask, + md, + filename, + num_sub=100, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + dtypes="uid", + reverse=True, + rot90=False, + num_max_para_process=500, + cpu_core_number=72, + with_pickle=True, + direct_load_data=False, + data_path=None, + images_per_file=100, + copy_rawdata=True, + new_path="/tmp_data/data/", +): + + data_path_ = data_path + if dtypes == "uid": + uid = md["uid"] # images if not direct_load_data: - detector = get_detector( db[uid ] ) - images_ = load_data( uid, detector, reverse= reverse,rot90=rot90 ) + detector = get_detector(db[uid]) + images_ = load_data(uid, detector, reverse=reverse, rot90=rot90) else: - #print('Here for images_per_file: %s'%images_per_file) - #images_ = EigerImages( data_path, images_per_file=images_per_file) - #print('here') - if not copy_rawdata: - images_ = EigerImages(data_path,images_per_file, md) + # print('Here for images_per_file: %s'%images_per_file) + # images_ = EigerImages( data_path, images_per_file=images_per_file) + # print('here') + if not copy_rawdata: + images_ = EigerImages(data_path, images_per_file, md) else: - print('Due to a IO problem running on GPFS. The raw data will be copied to /tmp_data/Data.') - print('Copying...') - copy_data( data_path, new_path ) - #print(data_path, new_path) - new_master_file = new_path + os.path.basename(data_path) - data_path_ = new_master_file - images_ = EigerImages( new_master_file, images_per_file, md) - #print(md) + print("Due to a IO problem running on GPFS. The raw data will be copied to /tmp_data/Data.") + print("Copying...") + copy_data(data_path, new_path) + # print(data_path, new_path) + new_master_file = new_path + os.path.basename(data_path) + data_path_ = new_master_file + images_ = EigerImages(new_master_file, images_per_file, md) + # print(md) if reverse: - images_ = reverse_updown( images_ ) # Why not np.flipud? - if rot90: - images_ = rot90_clockwise( images_ ) - - N= len(images_) - + images_ = reverse_updown(images_) # Why not np.flipud? + if rot90: + images_ = rot90_clockwise(images_) + + N = len(images_) + else: - N = len(images) - N = int( np.ceil( N/ bins ) ) - Nf = int( np.ceil( N/ num_sub ) ) - if Nf > cpu_core_number: - print("The process number is larger than %s (XF11ID server core number)"%cpu_core_number) + N = len(images) + N = int(np.ceil(N / bins)) + Nf = int(np.ceil(N / num_sub)) + if Nf > cpu_core_number: + print("The process number is larger than %s (XF11ID server core number)" % cpu_core_number) num_sub_old = num_sub - num_sub = int( np.ceil(N/cpu_core_number)) - Nf = int( np.ceil( N/ num_sub ) ) - print ("The sub compressed file number was changed from %s to %s"%( num_sub_old, num_sub )) - create_compress_header( md, filename +'-header', nobytes, bins, rot90=rot90 ) - #print( 'done for header here') - #print(data_path_, images_per_file) - results = para_segment_compress_eigerdata( images=images, mask=mask, md=md,filename=filename, - num_sub=num_sub, bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, - bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes=nobytes, bins=bins, dtypes=dtypes, - num_max_para_process=num_max_para_process, - reverse = reverse,rot90=rot90, - direct_load_data=direct_load_data, data_path=data_path_, - images_per_file=images_per_file) - - res_ = [ results[k].get() for k in list(sorted(results.keys())) ] - imgsum = np.zeros( N ) - bad_frame_list = np.zeros( N, dtype=bool ) + num_sub = int(np.ceil(N / cpu_core_number)) + Nf = int(np.ceil(N / num_sub)) + print("The sub compressed file number was changed from %s to %s" % (num_sub_old, num_sub)) + create_compress_header(md, filename + "-header", nobytes, bins, rot90=rot90) + # print( 'done for header here') + # print(data_path_, images_per_file) + results = para_segment_compress_eigerdata( + images=images, + mask=mask, + md=md, + filename=filename, + num_sub=num_sub, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + dtypes=dtypes, + num_max_para_process=num_max_para_process, + reverse=reverse, + rot90=rot90, + direct_load_data=direct_load_data, + data_path=data_path_, + images_per_file=images_per_file, + ) + + res_ = [results[k].get() for k in list(sorted(results.keys()))] + imgsum = np.zeros(N) + bad_frame_list = np.zeros(N, dtype=bool) good_count = 1 - for i in range( Nf ): + for i in range(Nf): mask_, avg_img_, imgsum_, bad_frame_list_ = res_[i] - imgsum[i*num_sub: (i+1)*num_sub] = imgsum_ - bad_frame_list[i*num_sub: (i+1)*num_sub] = bad_frame_list_ - if i==0: + imgsum[i * num_sub : (i + 1) * num_sub] = imgsum_ + bad_frame_list[i * num_sub : (i + 1) * num_sub] = bad_frame_list_ + if i == 0: mask = mask_ - avg_img = np.zeros_like( avg_img_ ) + avg_img = np.zeros_like(avg_img_) else: - mask *= mask_ - if not np.sum( np.isnan( avg_img_)): - avg_img += avg_img_ + mask *= mask_ + if not np.sum(np.isnan(avg_img_)): + avg_img += avg_img_ good_count += 1 - - bad_frame_list = np.where( bad_frame_list )[0] - avg_img /= good_count - + + bad_frame_list = np.where(bad_frame_list)[0] + avg_img /= good_count + if len(bad_frame_list): - print ('Bad frame list are: %s' %bad_frame_list) + print("Bad frame list are: %s" % bad_frame_list) else: - print ('No bad frames are involved.') - print( 'Combining the seperated compressed files together...') - combine_compressed( filename, Nf, del_old=True) + print("No bad frames are involved.") + print("Combining the seperated compressed files together...") + combine_compressed(filename, Nf, del_old=True) del results del res_ - if with_pickle: - pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) ) + if with_pickle: + pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) if copy_rawdata: - delete_data( data_path, new_path ) - return mask, avg_img, imgsum, bad_frame_list + delete_data(data_path, new_path) + return mask, avg_img, imgsum, bad_frame_list -def combine_compressed( filename, Nf, del_old=True): - old_files = [filename +'-header'] + +def combine_compressed(filename, Nf, del_old=True): + old_files = [filename + "-header"] for i in range(Nf): - old_files.append(filename + '_temp-%i.tmp' % i) - combine_binary_files(filename, old_files, del_old) - -def combine_binary_files(filename, old_files, del_old = False): - '''Combine binary files together''' - fn_ = open(filename, 'wb') - for ftemp in old_files: - shutil.copyfileobj( open(ftemp, 'rb'), fn_) + old_files.append(filename + "_temp-%i.tmp" % i) + combine_binary_files(filename, old_files, del_old) + + +def combine_binary_files(filename, old_files, del_old=False): + """Combine binary files together""" + fn_ = open(filename, "wb") + for ftemp in old_files: + shutil.copyfileobj(open(ftemp, "rb"), fn_) if del_old: - os.remove( ftemp ) - fn_.close() - -def para_segment_compress_eigerdata( images, mask, md, filename, num_sub=100, - bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, - bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='images', - reverse =True, rot90=False, - num_max_para_process=50,direct_load_data=False, data_path=None, - images_per_file=100): - ''' + os.remove(ftemp) + fn_.close() + + +def para_segment_compress_eigerdata( + images, + mask, + md, + filename, + num_sub=100, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + dtypes="images", + reverse=True, + rot90=False, + num_max_para_process=50, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ parallelly compressed eiger data without header, this function is for parallel compress - ''' - if dtypes=='uid': - uid= md['uid'] #images + """ + if dtypes == "uid": + uid = md["uid"] # images if not direct_load_data: - detector = get_detector( db[uid ] ) - images_ = load_data( uid, detector, reverse= reverse, rot90=rot90 ) + detector = get_detector(db[uid]) + images_ = load_data(uid, detector, reverse=reverse, rot90=rot90) else: images_ = EigerImages(data_path, images_per_file, md) if reverse: - images_ = reverse_updown( images_ ) - if rot90: - images_ = rot90_clockwise( images_ ) - - N= len(images_) - + images_ = reverse_updown(images_) + if rot90: + images_ = rot90_clockwise(images_) + + N = len(images_) + else: - N = len(images) - - #N = int( np.ceil( N/ bins ) ) - num_sub *= bins - if N%num_sub: - Nf = N// num_sub +1 - print('The average image intensity would be slightly not correct, about 1% error.') - print( 'Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image') + N = len(images) + + # N = int( np.ceil( N/ bins ) ) + num_sub *= bins + if N % num_sub: + Nf = N // num_sub + 1 + print("The average image intensity would be slightly not correct, about 1% error.") + print("Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image") else: - Nf = N//num_sub - print( 'It will create %i temporary files for parallel compression.'%Nf) + Nf = N // num_sub + print("It will create %i temporary files for parallel compression." % Nf) - if Nf> num_max_para_process: - N_runs = np.int( np.ceil( Nf/float(num_max_para_process))) - print('The parallel run number: %s is larger than num_max_para_process: %s'%(Nf, num_max_para_process )) + if Nf > num_max_para_process: + N_runs = np.int(np.ceil(Nf / float(num_max_para_process))) + print("The parallel run number: %s is larger than num_max_para_process: %s" % (Nf, num_max_para_process)) else: - N_runs= 1 - result = {} - #print( mask_filename )# + '*'* 10 + 'here' ) - for nr in range( N_runs ): - if (nr+1)*num_max_para_process > Nf: - inputs= range( num_max_para_process*nr, Nf ) - else: - inputs= range( num_max_para_process*nr, num_max_para_process*(nr + 1 ) ) - fns = [ filename + '_temp-%i.tmp'%i for i in inputs] - #print( nr, inputs, ) - pool = Pool(processes= len(inputs) ) #, maxtasksperchild=1000 ) - #print( inputs ) - for i in inputs: - if i*num_sub <= N: - result[i] = pool.apply_async( segment_compress_eigerdata, [ - images, mask, md, filename + '_temp-%i.tmp'%i,bad_pixel_threshold, hot_pixel_threshold, bad_pixel_low_threshold, nobytes, bins, i*num_sub, (i+1)*num_sub, dtypes, reverse,rot90, direct_load_data, data_path,images_per_file ] ) - + N_runs = 1 + result = {} + # print( mask_filename )# + '*'* 10 + 'here' ) + for nr in range(N_runs): + if (nr + 1) * num_max_para_process > Nf: + inputs = range(num_max_para_process * nr, Nf) + else: + inputs = range(num_max_para_process * nr, num_max_para_process * (nr + 1)) + fns = [filename + "_temp-%i.tmp" % i for i in inputs] + # print( nr, inputs, ) + pool = Pool(processes=len(inputs)) # , maxtasksperchild=1000 ) + # print( inputs ) + for i in inputs: + if i * num_sub <= N: + result[i] = pool.apply_async( + segment_compress_eigerdata, + [ + images, + mask, + md, + filename + "_temp-%i.tmp" % i, + bad_pixel_threshold, + hot_pixel_threshold, + bad_pixel_low_threshold, + nobytes, + bins, + i * num_sub, + (i + 1) * num_sub, + dtypes, + reverse, + rot90, + direct_load_data, + data_path, + images_per_file, + ], + ) + pool.close() pool.join() - pool.terminate() - return result - -def segment_compress_eigerdata( images, mask, md, filename, - bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, - bad_pixel_low_threshold=0, nobytes=4, bins=1, - N1=None, N2=None, dtypes='images',reverse =True, rot90=False,direct_load_data=False, data_path=None,images_per_file=100 ): - ''' + pool.terminate() + return result + + +def segment_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + N1=None, + N2=None, + dtypes="images", + reverse=True, + rot90=False, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ Create a compressed eiger data without header, this function is for parallel compress for parallel compress don't pass any non-scalar parameters - ''' - if dtypes=='uid': - uid= md['uid'] #images + """ + if dtypes == "uid": + uid = md["uid"] # images if not direct_load_data: - detector = get_detector( db[uid ] ) - images = load_data( uid, detector, reverse= reverse, rot90=rot90 )[N1:N2] - else: - images = EigerImages(data_path, images_per_file, md)[N1:N2] + detector = get_detector(db[uid]) + images = load_data(uid, detector, reverse=reverse, rot90=rot90)[N1:N2] + else: + images = EigerImages(data_path, images_per_file, md)[N1:N2] if reverse: - images = reverse_updown( EigerImages(data_path, images_per_file, md) )[N1:N2] - if rot90: - images = rot90_clockwise( images ) - - Nimg_ = len( images) - M,N = images[0].shape - avg_img = np.zeros( [M,N], dtype= np.float64 ) - Nopix = float( avg_img.size ) - n=0 + images = reverse_updown(EigerImages(data_path, images_per_file, md))[N1:N2] + if rot90: + images = rot90_clockwise(images) + + Nimg_ = len(images) + M, N = images[0].shape + avg_img = np.zeros([M, N], dtype=np.float64) + Nopix = float(avg_img.size) + n = 0 good_count = 0 - #frac = 0.0 - if nobytes==2: - dtype= np.int16 - elif nobytes==4: - dtype= np.int32 - elif nobytes==8: - dtype=np.float64 + # frac = 0.0 + if nobytes == 2: + dtype = np.int16 + elif nobytes == 4: + dtype = np.int32 + elif nobytes == 8: + dtype = np.float64 else: - print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") - dtype= np.int32 - - - #Nimg = Nimg_//bins - Nimg = int( np.ceil( Nimg_ / bins ) ) - time_edge = np.array(create_time_slice( N= Nimg_, - slice_num= Nimg, slice_width= bins )) - #print( time_edge, Nimg_, Nimg, bins, N1, N2 ) - imgsum = np.zeros( Nimg ) - if bins!=1: - #print('The frames will be binned by %s'%bins) - dtype=np.float64 - - fp = open( filename,'wb' ) - for n in range(Nimg): - t1,t2 = time_edge[n] - if bins!=1: - img = np.array( np.average( images[t1:t2], axis=0 ) , dtype= dtype) + print("Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype = np.int32 + + # Nimg = Nimg_//bins + Nimg = int(np.ceil(Nimg_ / bins)) + time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bins)) + # print( time_edge, Nimg_, Nimg, bins, N1, N2 ) + imgsum = np.zeros(Nimg) + if bins != 1: + # print('The frames will be binned by %s'%bins) + dtype = np.float64 + + fp = open(filename, "wb") + for n in range(Nimg): + t1, t2 = time_edge[n] + if bins != 1: + img = np.array(np.average(images[t1:t2], axis=0), dtype=dtype) else: - img = np.array( images[t1], dtype=dtype) - mask &= img < hot_pixel_threshold - p = np.where( (np.ravel(img)>0) * np.ravel(mask) )[0] #don't use masked data - v = np.ravel( np.array( img, dtype= dtype )) [p] - dlen = len(p) - imgsum[n] = v.sum() - if (dlen==0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold): + img = np.array(images[t1], dtype=dtype) + mask &= img < hot_pixel_threshold + p = np.where((np.ravel(img) > 0) * np.ravel(mask))[0] # don't use masked data + v = np.ravel(np.array(img, dtype=dtype))[p] + dlen = len(p) + imgsum[n] = v.sum() + if (dlen == 0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <= bad_pixel_low_threshold): dlen = 0 - fp.write( struct.pack( '@I', dlen )) - else: - np.ravel( avg_img )[p] += v - good_count +=1 - fp.write( struct.pack( '@I', dlen )) - fp.write( struct.pack( '@{}i'.format( dlen), *p)) - if bins==1: - fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v)) + fp.write(struct.pack("@I", dlen)) + else: + np.ravel(avg_img)[p] += v + good_count += 1 + fp.write(struct.pack("@I", dlen)) + fp.write(struct.pack("@{}i".format(dlen), *p)) + if bins == 1: + fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) else: - fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) #n +=1 - del p,v, img - fp.flush() - fp.close() - avg_img /= good_count - bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) - sys.stdout.write('#') - sys.stdout.flush() - #del images, mask, avg_img, imgsum, bad_frame_list - #print( 'Should release memory here') - return mask, avg_img, imgsum, bad_frame_list - - - -def create_compress_header( md, filename, nobytes=4, bins=1, rot90=False ): - ''' + fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) # n +=1 + del p, v, img + fp.flush() + fp.close() + avg_img /= good_count + bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) + sys.stdout.write("#") + sys.stdout.flush() + # del images, mask, avg_img, imgsum, bad_frame_list + # print( 'Should release memory here') + return mask, avg_img, imgsum, bad_frame_list + + +def create_compress_header(md, filename, nobytes=4, bins=1, rot90=False): + """ Create the head for a compressed eiger data, this function is for parallel compress - ''' - fp = open( filename,'wb' ) - #Make Header 1024 bytes - #md = images.md - if bins!=1: - nobytes=8 - flag = True - #print( list(md.keys()) ) - #print(md) - if 'pixel_mask' in list(md.keys()): - sx,sy = md['pixel_mask'].shape[0], md['pixel_mask'].shape[1] - elif 'img_shape' in list(md.keys()): - sx,sy = md['img_shape'][0], md['img_shape'][1] + """ + fp = open(filename, "wb") + # Make Header 1024 bytes + # md = images.md + if bins != 1: + nobytes = 8 + flag = True + # print( list(md.keys()) ) + # print(md) + if "pixel_mask" in list(md.keys()): + sx, sy = md["pixel_mask"].shape[0], md["pixel_mask"].shape[1] + elif "img_shape" in list(md.keys()): + sx, sy = md["img_shape"][0], md["img_shape"][1] else: - sx,sy= 2167, 2070 #by default for 4M - #print(flag) - klst = [ 'beam_center_x','beam_center_y', 'count_time','detector_distance', - 'frame_time','incident_wavelength', 'x_pixel_size','y_pixel_size'] - vs = [ 0 ,0, 0, 0, - 0, 0, 75, 75] + sx, sy = 2167, 2070 # by default for 4M + # print(flag) + klst = [ + "beam_center_x", + "beam_center_y", + "count_time", + "detector_distance", + "frame_time", + "incident_wavelength", + "x_pixel_size", + "y_pixel_size", + ] + vs = [0, 0, 0, 0, 0, 0, 75, 75] for i, k in enumerate(klst): if k in list(md.keys()): - vs[i] = md[k] - if flag: + vs[i] = md[k] + if flag: if rot90: - Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', - vs[0], vs[1], vs[2], vs[3], - vs[4], vs[5], vs[6], vs[7], - nobytes,sx, sy, - 0, sx, - 0,sy ) - + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + vs[0], + vs[1], + vs[2], + vs[3], + vs[4], + vs[5], + vs[6], + vs[7], + nobytes, + sx, + sy, + 0, + sx, + 0, + sy, + ) + else: - Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', - vs[0], vs[1], vs[2], vs[3], - vs[4], vs[5], vs[6], vs[7], -#md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], #md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], - nobytes, sy,sx, - 0, sy, - 0, sx - ) - - - - fp.write( Header) - fp.close() - - - -def init_compress_eigerdata( images, mask, md, filename, - bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, - bad_pixel_low_threshold=0,nobytes=4, bins=1, with_pickle=True, - reverse =True, rot90=False, - direct_load_data=False, data_path=None,images_per_file=100, - ): - ''' - Compress the eiger data - - Create a new mask by remove hot_pixel - Do image average - Do each image sum - Find badframe_list for where image sum above bad_pixel_threshold - Generate a compressed data with filename - - if bins!=1, will bin the images with bin number as bins - - Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', - 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', - bytes per pixel (either 2 or 4 (Default)), - Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ] - - Return - mask - avg_img - imsum - bad_frame_list - - ''' - fp = open( filename,'wb' ) - #Make Header 1024 bytes - #md = images.md - if bins!=1: - nobytes=8 - if 'count_time' not in list( md.keys() ): - md['count_time']=0 - if 'detector_distance' not in list( md.keys() ): - md['detector_distance']=0 - if 'frame_time' not in list( md.keys() ): - md['frame_time']=0 - if 'incident_wavelength' not in list( md.keys() ): - md['incident_wavelength']=0 - if 'y_pixel_size' not in list( md.keys() ): - md['y_pixel_size']=0 - if 'x_pixel_size' not in list( md.keys() ): - md['x_pixel_size']=0 - if 'beam_center_x' not in list( md.keys() ): - md['beam_center_x']=0 - if 'beam_center_y' not in list( md.keys() ): - md['beam_center_y']=0 - + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + vs[0], + vs[1], + vs[2], + vs[3], + vs[4], + vs[5], + vs[6], + vs[7], + # md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], #md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, + sy, + sx, + 0, + sy, + 0, + sx, + ) + + fp.write(Header) + fp.close() + + +def init_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + with_pickle=True, + reverse=True, + rot90=False, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + Compress the eiger data + + Create a new mask by remove hot_pixel + Do image average + Do each image sum + Find badframe_list for where image sum above bad_pixel_threshold + Generate a compressed data with filename + + if bins!=1, will bin the images with bin number as bins + + Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ] + + Return + mask + avg_img + imsum + bad_frame_list + + """ + fp = open(filename, "wb") + # Make Header 1024 bytes + # md = images.md + if bins != 1: + nobytes = 8 + if "count_time" not in list(md.keys()): + md["count_time"] = 0 + if "detector_distance" not in list(md.keys()): + md["detector_distance"] = 0 + if "frame_time" not in list(md.keys()): + md["frame_time"] = 0 + if "incident_wavelength" not in list(md.keys()): + md["incident_wavelength"] = 0 + if "y_pixel_size" not in list(md.keys()): + md["y_pixel_size"] = 0 + if "x_pixel_size" not in list(md.keys()): + md["x_pixel_size"] = 0 + if "beam_center_x" not in list(md.keys()): + md["beam_center_x"] = 0 + if "beam_center_y" not in list(md.keys()): + md["beam_center_y"] = 0 + if not rot90: - Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', - md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], - md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], - nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0], - 0, md['pixel_mask'].shape[1], - 0, md['pixel_mask'].shape[0] - ) + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["pixel_mask"].shape[1], + md["pixel_mask"].shape[0], + 0, + md["pixel_mask"].shape[1], + 0, + md["pixel_mask"].shape[0], + ) else: - Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', - md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], - md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], - nobytes, md['pixel_mask'].shape[0], md['pixel_mask'].shape[1], - 0, md['pixel_mask'].shape[0], - 0, md['pixel_mask'].shape[1] - ) - - fp.write( Header) - - Nimg_ = len( images) - avg_img = np.zeros_like( images[0], dtype= np.float64 ) - Nopix = float( avg_img.size ) - n=0 + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["pixel_mask"].shape[0], + md["pixel_mask"].shape[1], + 0, + md["pixel_mask"].shape[0], + 0, + md["pixel_mask"].shape[1], + ) + + fp.write(Header) + + Nimg_ = len(images) + avg_img = np.zeros_like(images[0], dtype=np.float64) + Nopix = float(avg_img.size) + n = 0 good_count = 0 frac = 0.0 - if nobytes==2: - dtype= np.int16 - elif nobytes==4: - dtype= np.int32 - elif nobytes==8: - dtype=np.float64 + if nobytes == 2: + dtype = np.int16 + elif nobytes == 4: + dtype = np.int32 + elif nobytes == 8: + dtype = np.float64 else: - print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") - dtype= np.int32 - - - Nimg = Nimg_//bins - time_edge = np.array(create_time_slice( N= Nimg_, - slice_num= Nimg, slice_width= bins )) - - imgsum = np.zeros( Nimg ) - if bins!=1: - print('The frames will be binned by %s'%bins) - - for n in tqdm( range(Nimg) ): - t1,t2 = time_edge[n] - img = np.average( images[t1:t2], axis=0 ) - mask &= img < hot_pixel_threshold - p = np.where( (np.ravel(img)>0) & np.ravel(mask) )[0] #don't use masked data - v = np.ravel( np.array( img, dtype= dtype )) [p] - dlen = len(p) + print("Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype = np.int32 + + Nimg = Nimg_ // bins + time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bins)) + + imgsum = np.zeros(Nimg) + if bins != 1: + print("The frames will be binned by %s" % bins) + + for n in tqdm(range(Nimg)): + t1, t2 = time_edge[n] + img = np.average(images[t1:t2], axis=0) + mask &= img < hot_pixel_threshold + p = np.where((np.ravel(img) > 0) & np.ravel(mask))[0] # don't use masked data + v = np.ravel(np.array(img, dtype=dtype))[p] + dlen = len(p) imgsum[n] = v.sum() - if (imgsum[n] >bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold): - #if imgsum[n] >=bad_pixel_threshold : + if (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <= bad_pixel_low_threshold): + # if imgsum[n] >=bad_pixel_threshold : dlen = 0 - fp.write( struct.pack( '@I', dlen )) - else: - np.ravel(avg_img )[p] += v - good_count +=1 - frac += dlen/Nopix - #s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2]) - fp.write( struct.pack( '@I', dlen )) - fp.write( struct.pack( '@{}i'.format( dlen), *p)) - if bins==1: - if nobytes!=8: - fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v)) + fp.write(struct.pack("@I", dlen)) + else: + np.ravel(avg_img)[p] += v + good_count += 1 + frac += dlen / Nopix + # s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2]) + fp.write(struct.pack("@I", dlen)) + fp.write(struct.pack("@{}i".format(dlen), *p)) + if bins == 1: + if nobytes != 8: + fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) else: - fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) + fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) else: - fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) - #n +=1 - - fp.close() - frac /=good_count - print( "The fraction of pixel occupied by photon is %6.3f%% "%(100*frac) ) + fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) + # n +=1 + + fp.close() + frac /= good_count + print("The fraction of pixel occupied by photon is %6.3f%% " % (100 * frac)) avg_img /= good_count - - bad_frame_list = np.where( (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) )[0] - #bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0] - #bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0] - #bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) ) - - + + bad_frame_list = np.where( + (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) + )[0] + # bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0] + # bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0] + # bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) ) + if len(bad_frame_list): - print ('Bad frame list are: %s' %bad_frame_list) + print("Bad frame list are: %s" % bad_frame_list) else: - print ('No bad frames are involved.') - if with_pickle: - pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) ) - return mask, avg_img, imgsum, bad_frame_list - + print("No bad frames are involved.") + if with_pickle: + pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) + return mask, avg_img, imgsum, bad_frame_list + - """ Description: This is code that Mark wrote to open the multifile format @@ -627,235 +875,247 @@ def init_compress_eigerdata( images, mask, md, filename, |--------------IMG N+1 begin------------| |----------------etc.....---------------| - - Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', - 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + + Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', bytes per pixel (either 2 or 4 (Default)), - Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End, - - + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End, + + """ class Multifile: - '''The class representing the multifile. - The recno is in 1 based numbering scheme (first record is 1) - This is efficient for reading in increasing order. - Note: reading same image twice in a row is like reading an earlier - numbered image and means the program starts for the beginning again. - - ''' - def __init__(self,filename,beg,end, reverse=False ): - '''Multifile initialization. Open the file. - Here I use the read routine which returns byte objects - (everything is an object in python). I use struct.unpack - to convert the byte object to other data type (int object - etc) - NOTE: At each record n, the file cursor points to record n+1 - ''' - self.FID = open(filename,"rb") -# self.FID.seek(0,os.SEEK_SET) + """The class representing the multifile. + The recno is in 1 based numbering scheme (first record is 1) + This is efficient for reading in increasing order. + Note: reading same image twice in a row is like reading an earlier + numbered image and means the program starts for the beginning again. + + """ + + def __init__(self, filename, beg, end, reverse=False): + """Multifile initialization. Open the file. + Here I use the read routine which returns byte objects + (everything is an object in python). I use struct.unpack + to convert the byte object to other data type (int object + etc) + NOTE: At each record n, the file cursor points to record n+1 + """ + self.FID = open(filename, "rb") + # self.FID.seek(0,os.SEEK_SET) self.filename = filename - #br: bytes read + # br: bytes read br = self.FID.read(1024) - self.beg=beg - self.end=end - self.reverse=reverse - ms_keys = ['beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', - 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', - 'bytes', - 'nrows', 'ncols', 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' - ] - - magic = struct.unpack('@16s', br[:16]) - md_temp = struct.unpack('@8d7I916x', br[16:]) + self.beg = beg + self.end = end + self.reverse = reverse + ms_keys = [ + "beam_center_x", + "beam_center_y", + "count_time", + "detector_distance", + "frame_time", + "incident_wavelength", + "x_pixel_size", + "y_pixel_size", + "bytes", + "nrows", + "ncols", + "rows_begin", + "rows_end", + "cols_begin", + "cols_end", + ] + + magic = struct.unpack("@16s", br[:16]) + md_temp = struct.unpack("@8d7I916x", br[16:]) self.md = dict(zip(ms_keys, md_temp)) - - self.imgread=0 + + self.imgread = 0 self.recno = 0 if reverse: - nrows = self.md['nrows'] - ncols = self.md['ncols'] - self.md['nrows'] = ncols - self.md['ncols'] = nrows - rbeg = self.md['rows_begin'] - rend = self.md['rows_end'] - cbeg = self.md['cols_begin'] - cend = self.md['cols_end'] - self.md['rows_begin']=cbeg - self.md['rows_end']=cend - self.md['cols_begin']=rbeg - self.md['cols_end']=rend - - - - # some initialization stuff - self.byts = self.md['bytes'] - if (self.byts==2): + nrows = self.md["nrows"] + ncols = self.md["ncols"] + self.md["nrows"] = ncols + self.md["ncols"] = nrows + rbeg = self.md["rows_begin"] + rend = self.md["rows_end"] + cbeg = self.md["cols_begin"] + cend = self.md["cols_end"] + self.md["rows_begin"] = cbeg + self.md["rows_end"] = cend + self.md["cols_begin"] = rbeg + self.md["cols_end"] = rend + + # some initialization stuff + self.byts = self.md["bytes"] + if self.byts == 2: self.valtype = np.uint16 - elif (self.byts == 4): + elif self.byts == 4: self.valtype = np.uint32 - elif (self.byts == 8): + elif self.byts == 8: self.valtype = np.float64 - #now convert pieces of these bytes to our data - self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] - + # now convert pieces of these bytes to our data + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + # now read first image - #print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts) + # print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts) def _readHeader(self): - self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] def _readImageRaw(self): - - p= np.fromfile(self.FID, dtype = np.int32,count= self.dlen) - v= np.fromfile(self.FID, dtype = self.valtype,count= self.dlen) - self.imgread=1 - return(p,v) + + p = np.fromfile(self.FID, dtype=np.int32, count=self.dlen) + v = np.fromfile(self.FID, dtype=self.valtype, count=self.dlen) + self.imgread = 1 + return (p, v) def _readImage(self): - (p,v)=self._readImageRaw() - img = np.zeros( ( self.md['ncols'], self.md['nrows'] ) ) - np.put( np.ravel(img), p, v ) - return(img) - - def seekimg(self,n=None): - - '''Position file to read the nth image. - For now only reads first image ignores n - ''' - # the logic involving finding the cursor position - if (n is None): + (p, v) = self._readImageRaw() + img = np.zeros((self.md["ncols"], self.md["nrows"])) + np.put(np.ravel(img), p, v) + return img + + def seekimg(self, n=None): + """Position file to read the nth image. + For now only reads first image ignores n + """ + # the logic involving finding the cursor position + if n is None: n = self.recno - if (n < self.beg or n > self.end): - raise IndexError('Error, record out of range') - #print (n, self.recno, self.FID.tell() ) - if ((n == self.recno) and (self.imgread==0)): - pass # do nothing - + if n < self.beg or n > self.end: + raise IndexError("Error, record out of range") + # print (n, self.recno, self.FID.tell() ) + if (n == self.recno) and (self.imgread == 0): + pass # do nothing + else: - if (n <= self.recno): #ensure cursor less than search pos - self.FID.seek(1024,os.SEEK_SET) - self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] + if n <= self.recno: # ensure cursor less than search pos + self.FID.seek(1024, os.SEEK_SET) + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] self.recno = 0 - self.imgread=0 + self.imgread = 0 if n == 0: - return - #have to iterate on seeking since dlen varies - #remember for rec recno, cursor is always at recno+1 - if(self.imgread==0 ): #move to next header if need to - self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR) - for i in range(self.recno+1,n): - #the less seeks performed the faster - #print (i) - self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] - #print 's',self.dlen - self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR) + return + # have to iterate on seeking since dlen varies + # remember for rec recno, cursor is always at recno+1 + if self.imgread == 0: # move to next header if need to + self.FID.seek(self.dlen * (4 + self.byts), os.SEEK_CUR) + for i in range(self.recno + 1, n): + # the less seeks performed the faster + # print (i) + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + # print 's',self.dlen + self.FID.seek(self.dlen * (4 + self.byts), os.SEEK_CUR) # we are now at recno in file, read the header and data - #self._clearImage() + # self._clearImage() self._readHeader() - self.imgread=0 + self.imgread = 0 self.recno = n - def rdframe(self,n): - if self.seekimg(n)!=-1: - return(self._readImage()) - def rdrawframe(self,n): - if self.seekimg(n)!=-1: - return(self._readImageRaw()) + def rdframe(self, n): + if self.seekimg(n) != -1: + return self._readImage() + + def rdrawframe(self, n): + if self.seekimg(n) != -1: + return self._readImageRaw() - -class Multifile_Bins( object ): - ''' +class Multifile_Bins(object): + """ Bin a compressed file with bins number See Multifile for details for Multifile_class - ''' + """ + def __init__(self, FD, bins=100): - ''' + """ FD: the handler of a compressed Eiger frames bins: bins number - ''' - - self.FD=FD - if (FD.end - FD.beg)%bins: - print ('Please give a better bins number and make the length of FD/bins= integer') - else: + """ + + self.FD = FD + if (FD.end - FD.beg) % bins: + print("Please give a better bins number and make the length of FD/bins= integer") + else: self.bins = bins self.md = FD.md - #self.beg = FD.beg + # self.beg = FD.beg self.beg = 0 - Nimg = (FD.end - FD.beg) - slice_num = Nimg//bins - self.end = slice_num - self.time_edge = np.array(create_time_slice( N= Nimg, - slice_num= slice_num, slice_width= bins )) + FD.beg + Nimg = FD.end - FD.beg + slice_num = Nimg // bins + self.end = slice_num + self.time_edge = np.array(create_time_slice(N=Nimg, slice_num=slice_num, slice_width=bins)) + FD.beg self.get_bin_frame() - - def get_bin_frame(self): - FD= self.FD - self.frames = np.zeros( [ FD.md['ncols'],FD.md['nrows'], len(self.time_edge)] ) - for n in tqdm( range(len(self.time_edge))): - #print (n) - t1,t2 = self.time_edge[n] - #print( t1, t2) - self.frames[:,:,n] = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1, - plot_ = False, show_progress = False ) - def rdframe(self,n): - return self.frames[:,:,n] - - def rdrawframe(self,n): - x_= np.ravel( self.rdframe(n) ) - p= np.where( x_ ) [0] - v = np.array( x_[ p ]) - return ( np.array(p, dtype=np.int32), v) - - + + def get_bin_frame(self): + FD = self.FD + self.frames = np.zeros([FD.md["ncols"], FD.md["nrows"], len(self.time_edge)]) + for n in tqdm(range(len(self.time_edge))): + # print (n) + t1, t2 = self.time_edge[n] + # print( t1, t2) + self.frames[:, :, n] = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False, show_progress=False) + + def rdframe(self, n): + return self.frames[:, :, n] + + def rdrawframe(self, n): + x_ = np.ravel(self.rdframe(n)) + p = np.where(x_)[0] + v = np.array(x_[p]) + return (np.array(p, dtype=np.int32), v) + + class MultifileBNL: - ''' + """ Re-write multifile from scratch. - ''' + """ + HEADER_SIZE = 1024 - def __init__(self, filename, mode='rb'): - ''' - Prepare a file for reading or writing. - mode : either 'rb' or 'wb' - ''' - if mode == 'wb': + + def __init__(self, filename, mode="rb"): + """ + Prepare a file for reading or writing. + mode : either 'rb' or 'wb' + """ + if mode == "wb": raise ValueError("Write mode 'wb' not supported yet") - if mode != 'rb' and mode != 'wb': - raise ValueError("Error, mode must be 'rb' or 'wb'" - "got : {}".format(mode)) + if mode != "rb" and mode != "wb": + raise ValueError("Error, mode must be 'rb' or 'wb'" "got : {}".format(mode)) self._filename = filename self._mode = mode # open the file descriptor # create a memmap - if mode == 'rb': - self._fd = np.memmap(filename, dtype='c') - elif mode == 'wb': + if mode == "rb": + self._fd = np.memmap(filename, dtype="c") + elif mode == "wb": self._fd = open(filename, "wb") # these are only necessary for writing self.md = self._read_main_header() - self._cols = int(self.md['nrows']) - self._rows = int(self.md['ncols']) + self._cols = int(self.md["nrows"]) + self._rows = int(self.md["ncols"]) # some initialization stuff - self.nbytes = self.md['bytes'] - if (self.nbytes==2): - self.valtype = " self.Nframes: raise KeyError("Error, only {} frames, asked for {}".format(self.Nframes, n)) # dlen is 4 bytes cur = self.frame_indexes[n] - dlen = np.frombuffer(self._fd[cur:cur+4], dtype=" nbytes - vals = self._fd[cur: cur+dlen*self.nbytes] + vals = self._fd[cur : cur + dlen * self.nbytes] vals = np.frombuffer(vals, dtype=self.valtype) return pos, vals + def rdframe(self, n): # read header then image pos, vals = self._read_raw(n) - img = np.zeros((self._rows*self._cols,)) + img = np.zeros((self._rows * self._cols,)) img[pos] = vals return img.reshape((self._rows, self._cols)) + def rdrawframe(self, n): # read header then image return self._read_raw(n) - + + class MultifileBNLCustom(MultifileBNL): def __init__(self, filename, beg=0, end=None, **kwargs): super().__init__(filename, **kwargs) self.beg = beg if end is None: - end = self.Nframes-1 + end = self.Nframes - 1 self.end = end + def rdframe(self, n): if n > self.end or n < self.beg: raise IndexError("Index out of range") - #return super().rdframe(n - self.beg) - return super().rdframe( n ) + # return super().rdframe(n - self.beg) + return super().rdframe(n) + def rdrawframe(self, n): - #return super().rdrawframe(n - self.beg) + # return super().rdrawframe(n - self.beg) if n > self.end or n < self.beg: - raise IndexError("Index out of range") - return super().rdrawframe(n ) - - - -def get_avg_imgc( FD, beg=None,end=None, sampling = 100, plot_ = False, bad_frame_list=None, - show_progress=True, *argv,**kwargs): - '''Get average imagef from a data_series by every sampling number to save time''' - #avg_img = np.average(data_series[:: sampling], axis=0) - + raise IndexError("Index out of range") + return super().rdrawframe(n) + + +def get_avg_imgc( + FD, beg=None, end=None, sampling=100, plot_=False, bad_frame_list=None, show_progress=True, *argv, **kwargs +): + """Get average imagef from a data_series by every sampling number to save time""" + # avg_img = np.average(data_series[:: sampling], axis=0) + if beg is None: beg = FD.beg if end is None: end = FD.end - + avg_img = FD.rdframe(beg) - n=1 - flag=True - if show_progress: - #print( sampling-1 + beg , end, sampling ) + n = 1 + flag = True + if show_progress: + # print( sampling-1 + beg , end, sampling ) if bad_frame_list is None: - bad_frame_list =[] - fra_num = int( (end - beg )/sampling ) - len( bad_frame_list ) - for i in tqdm(range( sampling-1 + beg , end, sampling ), desc= 'Averaging %s images'% fra_num): + bad_frame_list = [] + fra_num = int((end - beg) / sampling) - len(bad_frame_list) + for i in tqdm(range(sampling - 1 + beg, end, sampling), desc="Averaging %s images" % fra_num): if bad_frame_list is not None: if i in bad_frame_list: - flag= False + flag = False else: - flag=True - #print(i, flag) + flag = True + # print(i, flag) if flag: - (p,v) = FD.rdrawframe(i) - if len(p)>0: - np.ravel(avg_img )[p] += v - n += 1 + (p, v) = FD.rdrawframe(i) + if len(p) > 0: + np.ravel(avg_img)[p] += v + n += 1 else: - for i in range( sampling-1 + beg , end, sampling ): + for i in range(sampling - 1 + beg, end, sampling): if bad_frame_list is not None: if i in bad_frame_list: - flag= False + flag = False else: - flag=True + flag = True if flag: - (p,v) = FD.rdrawframe(i) - if len(p)>0: - np.ravel(avg_img )[p] += v - n += 1 - - avg_img /= n + (p, v) = FD.rdrawframe(i) + if len(p) > 0: + np.ravel(avg_img)[p] += v + n += 1 + + avg_img /= n if plot_: if RUN_GUI: fig = Figure() ax = fig.add_subplot(111) else: fig, ax = plt.subplots() - uid = 'uid' - if 'uid' in kwargs.keys(): - uid = kwargs['uid'] - im = ax.imshow(avg_img , cmap='viridis',origin='lower', - norm= LogNorm(vmin=0.001, vmax=1e2)) - #ax.set_title("Masked Averaged Image") - ax.set_title('uid= %s--Masked-Averaged-Image-'%uid) + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + im = ax.imshow(avg_img, cmap="viridis", origin="lower", norm=LogNorm(vmin=0.001, vmax=1e2)) + # ax.set_title("Masked Averaged Image") + ax.set_title("uid= %s--Masked-Averaged-Image-" % uid) fig.colorbar(im) if save: - #dt =datetime.now() - #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - path = kwargs['path'] - if 'uid' in kwargs: - uid = kwargs['uid'] + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] else: - uid = 'uid' - #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' - fp = path + "uid=%s--avg-img-"%uid + '.png' - plt.savefig( fp, dpi=fig.dpi) - #plt.show() + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-" % uid + ".png" + plt.savefig(fp, dpi=fig.dpi) + # plt.show() return avg_img - -def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor = False): +def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor=False): """Compute the mean intensity for each ROI in the compressed file (FD), support parallel computation Parameters @@ -1041,12 +1318,14 @@ def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor = Fals index : list The labels for each element of the `mean_intensity` list """ - - qind, pixelist = roi.extract_label_indices( labeled_array ) - sx,sy = ( FD.rdframe(FD.beg) ).shape - if labeled_array.shape != ( sx,sy ): + + qind, pixelist = roi.extract_label_indices(labeled_array) + sx, sy = (FD.rdframe(FD.beg)).shape + if labeled_array.shape != (sx, sy): raise ValueError( - " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( sx,sy, labeled_array.shape[0], labeled_array.shape[1]) ) + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" + % (sx, sy, labeled_array.shape[0], labeled_array.shape[1]) + ) # handle various input for `index` if index is None: index = list(np.unique(labeled_array)) @@ -1057,133 +1336,138 @@ def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor = Fals except TypeError: index = [index] - index = np.array( index ) - #print ('here') - good_ind = np.zeros( max(qind), dtype= np.int32 ) - good_ind[ index -1 ] = np.arange( len(index) ) +1 - w = np.where( good_ind[qind -1 ] )[0] - qind = good_ind[ qind[w] -1 ] + index = np.array(index) + # print ('here') + good_ind = np.zeros(max(qind), dtype=np.int32) + good_ind[index - 1] = np.arange(len(index)) + 1 + w = np.where(good_ind[qind - 1])[0] + qind = good_ind[qind[w] - 1] pixelist = pixelist[w] - # pre-allocate an array for performance # might be able to use list comprehension to make this faster - - mean_intensity = np.zeros( [ int( ( FD.end - FD.beg)/sampling ) , len(index)] ) - #fra_pix = np.zeros_like( pixelist, dtype=np.float64) - timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) - timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) - #maxqind = max(qind) - norm = np.bincount( qind )[1:] - n= 0 - #for i in tqdm(range( FD.beg , FD.end )): + + mean_intensity = np.zeros([int((FD.end - FD.beg) / sampling), len(index)]) + # fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + # maxqind = max(qind) + norm = np.bincount(qind)[1:] + n = 0 + # for i in tqdm(range( FD.beg , FD.end )): if not multi_cor: - for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get ROI intensity of each frame' ): - (p,v) = FD.rdrawframe(i) - w = np.where( timg[p] )[0] - pxlist = timg[ p[w] ] -1 - mean_intensity[n] = np.bincount( qind[pxlist], weights = v[w], minlength = len(index)+1 )[1:] - n +=1 + for i in tqdm(range(FD.beg, FD.end, sampling), desc="Get ROI intensity of each frame"): + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + mean_intensity[n] = np.bincount(qind[pxlist], weights=v[w], minlength=len(index) + 1)[1:] + n += 1 else: - ring_masks = [ np.array(labeled_array==i, dtype = np.int64) for i in np.unique( labeled_array )[1:] ] - inputs = range( len(ring_masks) ) + ring_masks = [np.array(labeled_array == i, dtype=np.int64) for i in np.unique(labeled_array)[1:]] + inputs = range(len(ring_masks)) go_through_FD(FD) - pool = Pool(processes= len(inputs) ) - print( 'Starting assign the tasks...') - results = {} - for i in tqdm ( inputs ): - results[i] = apply_async( pool, _get_mean_intensity_one_q, ( FD, sampling, ring_masks[i] ) ) - pool.close() - print( 'Starting running the tasks...') - res = [ results[k].get() for k in tqdm( list(sorted(results.keys())) ) ] - #return res - for i in inputs: - mean_intensity[:,i] = res[i] - print( 'ROI mean_intensit calculation is DONE!') + pool = Pool(processes=len(inputs)) + print("Starting assign the tasks...") + results = {} + for i in tqdm(inputs): + results[i] = apply_async(pool, _get_mean_intensity_one_q, (FD, sampling, ring_masks[i])) + pool.close() + print("Starting running the tasks...") + res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] + # return res + for i in inputs: + mean_intensity[:, i] = res[i] + print("ROI mean_intensit calculation is DONE!") del results - del res - - mean_intensity /= norm + del res + + mean_intensity /= norm return mean_intensity, index -def _get_mean_intensity_one_q( FD, sampling, labels ): - mi = np.zeros( int( ( FD.end - FD.beg)/sampling ) ) - n=0 - qind, pixelist = roi.extract_label_indices( labels ) - # iterate over the images to compute multi-tau correlation - fra_pix = np.zeros_like( pixelist, dtype=np.float64) - timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) - timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) - for i in range( FD.beg, FD.end, sampling ): - (p,v) = FD.rdrawframe(i) - w = np.where( timg[p] )[0] - pxlist = timg[ p[w] ] -1 - mi[n] = np.bincount( qind[pxlist], weights = v[w], minlength = 2 )[1:] - n +=1 +def _get_mean_intensity_one_q(FD, sampling, labels): + mi = np.zeros(int((FD.end - FD.beg) / sampling)) + n = 0 + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + for i in range(FD.beg, FD.end, sampling): + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + mi[n] = np.bincount(qind[pxlist], weights=v[w], minlength=2)[1:] + n += 1 return mi - - - -def get_each_frame_intensityc( FD, sampling = 1, - bad_pixel_threshold=1e10, bad_pixel_low_threshold=0, - hot_pixel_threshold=2**30, - plot_ = False, bad_frame_list=None, save=False, *argv,**kwargs): - '''Get the total intensity of each frame by sampling every N frames - Also get bad_frame_list by check whether above bad_pixel_threshold - - Usuage: - imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, - bad_pixel_threshold=1e10, plot_ = True) - ''' - - #print ( argv, kwargs ) - #mask &= img < hot_pixel_threshold - imgsum = np.zeros( int( (FD.end - FD.beg )/ sampling ) ) - n=0 - for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get each frame intensity' ): - (p,v) = FD.rdrawframe(i) - if len(p)>0: - imgsum[n] = np.sum( v ) + + +def get_each_frame_intensityc( + FD, + sampling=1, + bad_pixel_threshold=1e10, + bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, + plot_=False, + bad_frame_list=None, + save=False, + *argv, + **kwargs +): + """Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + """ + + # print ( argv, kwargs ) + # mask &= img < hot_pixel_threshold + imgsum = np.zeros(int((FD.end - FD.beg) / sampling)) + n = 0 + for i in tqdm(range(FD.beg, FD.end, sampling), desc="Get each frame intensity"): + (p, v) = FD.rdrawframe(i) + if len(p) > 0: + imgsum[n] = np.sum(v) n += 1 - + if plot_: - uid = 'uid' - if 'uid' in kwargs.keys(): - uid = kwargs['uid'] - fig, ax = plt.subplots() - ax.plot( imgsum,'bo') - ax.set_title('uid= %s--imgsum'%uid) - ax.set_xlabel( 'Frame_bin_%s'%sampling ) - ax.set_ylabel( 'Total_Intensity' ) - + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + fig, ax = plt.subplots() + ax.plot(imgsum, "bo") + ax.set_title("uid= %s--imgsum" % uid) + ax.set_xlabel("Frame_bin_%s" % sampling) + ax.set_ylabel("Total_Intensity") + if save: - #dt =datetime.now() - #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - path = kwargs['path'] - if 'uid' in kwargs: - uid = kwargs['uid'] + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] else: - uid = 'uid' - #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' - fp = path + "uid=%s--imgsum-"%uid + '.png' - fig.savefig( fp, dpi=fig.dpi) - - plt.show() - - bad_frame_list_ = np.where( ( np.array(imgsum) > bad_pixel_threshold ) | ( np.array(imgsum) <= bad_pixel_low_threshold) )[0] + FD.beg - - if bad_frame_list is not None: - bad_frame_list = np.unique( np.concatenate([bad_frame_list, bad_frame_list_]) ) - else: - bad_frame_list = bad_frame_list_ - - if len(bad_frame_list): - print ('Bad frame list length is: %s' %len(bad_frame_list)) - else: - print ('No bad frames are involved.') - return imgsum,bad_frame_list + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + plt.show() + bad_frame_list_ = ( + np.where((np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold))[0] + + FD.beg + ) + if bad_frame_list is not None: + bad_frame_list = np.unique(np.concatenate([bad_frame_list, bad_frame_list_])) + else: + bad_frame_list = bad_frame_list_ + if len(bad_frame_list): + print("Bad frame list length is: %s" % len(bad_frame_list)) + else: + print("No bad frames are involved.") + return imgsum, bad_frame_list diff --git a/pyCHX/chx_correlationc.py b/pyCHX/chx_correlationc.py index af0dbd4..02bc754 100644 --- a/pyCHX/chx_correlationc.py +++ b/pyCHX/chx_correlationc.py @@ -4,23 +4,34 @@ This module is for computation of time correlation by using compressing algorithm """ - from __future__ import absolute_import, division, print_function -from skbeam.core.utils import multi_tau_lags -from skbeam.core.roi import extract_label_indices +import logging from collections import namedtuple + import numpy as np import skbeam.core.roi as roi +from skbeam.core.roi import extract_label_indices +from skbeam.core.utils import multi_tau_lags -import logging logger = logging.getLogger(__name__) from tqdm import tqdm -def _one_time_process(buf, G, past_intensity_norm, future_intensity_norm, - label_array, num_bufs, num_pixels, img_per_level, - level, buf_no, norm, lev_len): +def _one_time_process( + buf, + G, + past_intensity_norm, + future_intensity_norm, + label_array, + num_bufs, + num_pixels, + img_per_level, + level, + buf_no, + norm, + lev_len, +): """Reference implementation of the inner loop of multi-tau one time correlation This helper function calculates G, past_intensity_norm and @@ -66,10 +77,10 @@ def _one_time_process(buf, G, past_intensity_norm, future_intensity_norm, # in multi-tau correlation, the subsequent levels have half as many # buffers as the first i_min = num_bufs // 2 if level else 0 - #maxqind=G.shape[1] + # maxqind=G.shape[1] for i in range(i_min, min(img_per_level[level], num_bufs)): # compute the index into the autocorrelation matrix - t_index = int( level * num_bufs / 2 + i ) + t_index = int(level * num_bufs / 2 + i) delay_no = (buf_no - i) % num_bufs # get the images for correlating past_img = buf[level, delay_no] @@ -77,29 +88,41 @@ def _one_time_process(buf, G, past_intensity_norm, future_intensity_norm, # find the normalization that can work both for bad_images # and good_images ind = int(t_index - lev_len[:level].sum()) - normalize = img_per_level[level] - i - norm[level+1][ind] + normalize = img_per_level[level] - i - norm[level + 1][ind] # take out the past_ing and future_img created using bad images # (bad images are converted to np.nan array) if np.isnan(past_img).any() or np.isnan(future_img).any(): norm[level + 1][ind] += 1 else: - for w, arr in zip([past_img*future_img, past_img, future_img], - [G, past_intensity_norm, future_intensity_norm]): + for w, arr in zip( + [past_img * future_img, past_img, future_img], [G, past_intensity_norm, future_intensity_norm] + ): binned = np.bincount(label_array, weights=w)[1:] - #nonz = np.where(w)[0] - #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] - arr[t_index] += ((binned / num_pixels - - arr[t_index]) / normalize) + # nonz = np.where(w)[0] + # binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + arr[t_index] += (binned / num_pixels - arr[t_index]) / normalize return None # modifies arguments in place! - -def _one_time_process_error(buf, G, past_intensity_norm, future_intensity_norm, - label_array, num_bufs, num_pixels, img_per_level, - level, buf_no, norm, lev_len, - G_err, past_intensity_norm_err, future_intensity_norm_err ): +def _one_time_process_error( + buf, + G, + past_intensity_norm, + future_intensity_norm, + label_array, + num_bufs, + num_pixels, + img_per_level, + level, + buf_no, + norm, + lev_len, + G_err, + past_intensity_norm_err, + future_intensity_norm_err, +): """Reference implementation of the inner loop of multi-tau one time - correlation with the calculation of errorbar (statistical error due to multipixel measurements ) + correlation with the calculation of errorbar (statistical error due to multipixel measurements ) The statistical error: var( g2(Q) ) = sum( [g2(Qi)- g2(Q)]^2 )/N(N-1), Lumma, RSI, 2000 This helper function calculates G, past_intensity_norm and future_intensity_norm at each level, symmetric normalization is used. @@ -144,10 +167,10 @@ def _one_time_process_error(buf, G, past_intensity_norm, future_intensity_norm, # in multi-tau correlation, the subsequent levels have half as many # buffers as the first i_min = num_bufs // 2 if level else 0 - #maxqind=G.shape[1] + # maxqind=G.shape[1] for i in range(i_min, min(img_per_level[level], num_bufs)): # compute the index into the autocorrelation matrix - t_index = int( level * num_bufs / 2 + i ) + t_index = int(level * num_bufs / 2 + i) delay_no = (buf_no - i) % num_bufs # get the images for correlating past_img = buf[level, delay_no] @@ -155,89 +178,95 @@ def _one_time_process_error(buf, G, past_intensity_norm, future_intensity_norm, # find the normalization that can work both for bad_images # and good_images ind = int(t_index - lev_len[:level].sum()) - normalize = img_per_level[level] - i - norm[level+1][ind] + normalize = img_per_level[level] - i - norm[level + 1][ind] # take out the past_ing and future_img created using bad images # (bad images are converted to np.nan array) if np.isnan(past_img).any() or np.isnan(future_img).any(): norm[level + 1][ind] += 1 else: - - #for w, arr in zip([past_img*future_img, past_img, future_img], - # [G, past_intensity_norm, future_intensity_norm, + + # for w, arr in zip([past_img*future_img, past_img, future_img], + # [G, past_intensity_norm, future_intensity_norm, # ]): # binned = np.bincount(label_array, weights=w)[1:] # #nonz = np.where(w)[0] - # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] # arr[t_index] += ((binned / num_pixels - # arr[t_index]) / normalize) - for w, arr in zip([past_img*future_img, past_img, future_img], - [ - G_err, past_intensity_norm_err, future_intensity_norm_err, - ]): - arr[t_index] += ( w - arr[t_index]) / normalize + for w, arr in zip( + [past_img * future_img, past_img, future_img], + [ + G_err, + past_intensity_norm_err, + future_intensity_norm_err, + ], + ): + arr[t_index] += (w - arr[t_index]) / normalize return None # modifies arguments in place! -results = namedtuple( - 'correlation_results', - ['g2', 'lag_steps', 'internal_state'] -) +results = namedtuple("correlation_results", ["g2", "lag_steps", "internal_state"]) _internal_state = namedtuple( - 'correlation_state', - ['buf', - 'G', - 'past_intensity', - 'future_intensity', - 'img_per_level', - 'label_array', - 'track_level', - 'cur', - 'pixel_list', - 'num_pixels', - 'lag_steps', - 'norm', - 'lev_len'] + "correlation_state", + [ + "buf", + "G", + "past_intensity", + "future_intensity", + "img_per_level", + "label_array", + "track_level", + "cur", + "pixel_list", + "num_pixels", + "lag_steps", + "norm", + "lev_len", + ], ) _internal_state_err = namedtuple( - 'correlation_state', - ['buf', - 'G', - 'past_intensity', - 'future_intensity', - 'img_per_level', - 'label_array', - 'track_level', - 'cur', - 'pixel_list', - 'num_pixels', - 'lag_steps', - 'norm', - 'lev_len', - 'G_all', - 'past_intensity_all', - 'future_intensity_all' - ] + "correlation_state", + [ + "buf", + "G", + "past_intensity", + "future_intensity", + "img_per_level", + "label_array", + "track_level", + "cur", + "pixel_list", + "num_pixels", + "lag_steps", + "norm", + "lev_len", + "G_all", + "past_intensity_all", + "future_intensity_all", + ], ) _two_time_internal_state = namedtuple( - 'two_time_correlation_state', - ['buf', - 'img_per_level', - 'label_array', - 'track_level', - 'cur', - 'pixel_list', - 'num_pixels', - 'lag_steps', - 'g2', - 'count_level', - 'current_img_time', - 'time_ind', - 'norm', - 'lev_len'] + "two_time_correlation_state", + [ + "buf", + "img_per_level", + "label_array", + "track_level", + "cur", + "pixel_list", + "num_pixels", + "lag_steps", + "g2", + "count_level", + "current_img_time", + "time_ind", + "norm", + "lev_len", + ], ) @@ -280,13 +309,11 @@ def _validate_and_transform_inputs(num_bufs, num_levels, labels): length of each levels """ if num_bufs % 2 != 0: - raise ValueError("There must be an even number of `num_bufs`. You " - "provided %s" % num_bufs) + raise ValueError("There must be an even number of `num_bufs`. You " "provided %s" % num_bufs) label_array, pixel_list = extract_label_indices(labels) # map the indices onto a sequential list of integers starting at 1 - label_mapping = {label: n+1 - for n, label in enumerate(np.unique(label_array))} + label_mapping = {label: n + 1 for n, label in enumerate(np.unique(label_array))} # remap the label array to go from 1 -> max(_labels) for label, n in label_mapping.items(): label_array[label_array == label] = n @@ -307,8 +334,7 @@ def _validate_and_transform_inputs(num_bufs, num_levels, labels): # Ring buffer, a buffer with periodic boundary conditions. # Images must be keep for up to maximum delay in buf. - buf = np.zeros((num_levels, num_bufs, len(pixel_list)), - dtype=np.float64) + buf = np.zeros((num_levels, num_bufs, len(pixel_list)), dtype=np.float64) # to track how many images processed in each level img_per_level = np.zeros(num_levels, dtype=np.int64) # to track which levels have already been processed @@ -316,11 +342,22 @@ def _validate_and_transform_inputs(num_bufs, num_levels, labels): # to increment buffer cur = np.ones(num_levels, dtype=np.int64) - return (label_array, pixel_list, num_rois, num_pixels, - lag_steps, buf, img_per_level, track_level, cur, - norm, lev_len) + return ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) + -def _init_state_one_time(num_levels, num_bufs, labels, cal_error = False): +def _init_state_one_time(num_levels, num_bufs, labels, cal_error=False): """Initialize a stateful namedtuple for the generator-based multi-tau for one time correlation Parameters @@ -336,28 +373,36 @@ def _init_state_one_time(num_levels, num_bufs, labels, cal_error = False): `lazy_one_time` requires so that it can be used to pick up processing after it was interrupted """ - (label_array, pixel_list, num_rois, num_pixels, lag_steps, buf, - img_per_level, track_level, cur, norm, - lev_len) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) = _validate_and_transform_inputs(num_bufs, num_levels, labels) # G holds the un normalized auto- correlation result. We # accumulate computations into G as the algorithm proceeds. - G = np.zeros(( int( (num_levels + 1) * num_bufs / 2), num_rois), - dtype=np.float64) - + G = np.zeros((int((num_levels + 1) * num_bufs / 2), num_rois), dtype=np.float64) + # matrix for normalizing G into g2 past_intensity = np.zeros_like(G) # matrix for normalizing G into g2 future_intensity = np.zeros_like(G) if cal_error: - G_all = np.zeros(( int( (num_levels + 1) * num_bufs / 2), len(pixel_list)), - dtype=np.float64) - + G_all = np.zeros((int((num_levels + 1) * num_bufs / 2), len(pixel_list)), dtype=np.float64) + # matrix for normalizing G into g2 past_intensity_all = np.zeros_like(G_all) # matrix for normalizing G into g2 - future_intensity_all = np.zeros_like(G_all) + future_intensity_all = np.zeros_like(G_all) return _internal_state_err( buf, G, @@ -374,8 +419,8 @@ def _init_state_one_time(num_levels, num_bufs, labels, cal_error = False): lev_len, G_all, past_intensity_all, - future_intensity_all - ) + future_intensity_all, + ) else: return _internal_state( buf, @@ -394,87 +439,92 @@ def _init_state_one_time(num_levels, num_bufs, labels, cal_error = False): ) -def fill_pixel( p, v, pixelist): - fra_pix = np.zeros_like( pixelist ) - fra_pix[ np.in1d( pixelist,p ) ] = v[np.in1d( p, pixelist )] - return fra_pix - - - - - -def lazy_one_time(FD, num_levels, num_bufs, labels, - internal_state=None, bad_frame_list=None, imgsum=None, norm = None, cal_error=False ): - - """Generator implementation of 1-time multi-tau correlation - If you do not want multi-tau correlation, set num_levels to 1 and - num_bufs to the number of images you wish to correlate -The number of bins (of size 1) is one larger than the largest value in -`x`. If `minlength` is specified, there will be at least this number -of bins in the output array (though it will be longer if necessary, -depending on the contents of `x`). -Each bin gives the number of occurrences of its index value in `x`. -If `weights` is specified the input array is weighted by it, i.e. if a -value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead -of ``out[n] += 1``. +def fill_pixel(p, v, pixelist): + fra_pix = np.zeros_like(pixelist) + fra_pix[np.in1d(pixelist, p)] = v[np.in1d(p, pixelist)] + return fra_pix - Jan 2, 2018 YG. Add error bar calculation - Parameters - ---------- - image_iterable : FD, a compressed eiger file by Multifile class - num_levels : int - how many generations of downsampling to perform, i.e., the depth of - the binomial tree of averaged frames - num_bufs : int, must be even - maximum lag step to compute in each generation of downsampling - labels : array - Labeled array of the same shape as the image stack. - Each ROI is represented by sequential integers starting at one. For - example, if you have four ROIs, they must be labeled 1, 2, 3, - 4. Background is labeled as 0 - internal_state : namedtuple, optional - internal_state is a bucket for all of the internal state of the - generator. It is part of the `results` object that is yielded from - this generator - - For the sake of normalization: - - imgsum: a list with the same length as FD, sum of each frame - qp, iq: the circular average radius (in pixel) and intensity - center: beam center - - Yields - ------ +def lazy_one_time( + FD, + num_levels, + num_bufs, + labels, + internal_state=None, + bad_frame_list=None, + imgsum=None, + norm=None, + cal_error=False, +): + """Generator implementation of 1-time multi-tau correlation + If you do not want multi-tau correlation, set num_levels to 1 and + num_bufs to the number of images you wish to correlate + The number of bins (of size 1) is one larger than the largest value in + `x`. If `minlength` is specified, there will be at least this number + of bins in the output array (though it will be longer if necessary, + depending on the contents of `x`). + Each bin gives the number of occurrences of its index value in `x`. + If `weights` is specified the input array is weighted by it, i.e. if a + value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead + of ``out[n] += 1``. + + Jan 2, 2018 YG. Add error bar calculation + + Parameters + ---------- + image_iterable : FD, a compressed eiger file by Multifile class + num_levels : int + how many generations of downsampling to perform, i.e., the depth of + the binomial tree of averaged frames + num_bufs : int, must be even + maximum lag step to compute in each generation of downsampling + labels : array + Labeled array of the same shape as the image stack. + Each ROI is represented by sequential integers starting at one. For + example, if you have four ROIs, they must be labeled 1, 2, 3, + 4. Background is labeled as 0 + internal_state : namedtuple, optional + internal_state is a bucket for all of the internal state of the + generator. It is part of the `results` object that is yielded from + this generator + + For the sake of normalization: + + imgsum: a list with the same length as FD, sum of each frame + qp, iq: the circular average radius (in pixel) and intensity + center: beam center + + Yields + ------ -Returns -------- + Returns + ------- - A `results` object is yielded after every image has been processed. - This `reults` object contains, in this order: - - `g2`: the normalized correlation - shape is (len(lag_steps), num_rois) - - `lag_steps`: the times at which the correlation was computed - - `_internal_state`: all of the internal state. Can be passed back in - to `lazy_one_time` as the `internal_state` parameter - Notes - ----- - The normalized intensity-intensity time-autocorrelation function - is defined as - .. math:: - g_2(q, t') = \\frac{ }{^2} - t' > 0 - Here, ``I(q, t)`` refers to the scattering strength at the momentum - transfer vector ``q`` in reciprocal space at time ``t``, and the brackets - ``<...>`` refer to averages over time ``t``. The quantity ``t'`` denotes - the delay time - This implementation is based on published work. [1]_ - References - ---------- - .. [1] D. Lumma, L. B. Lurio, S. G. J. Mochrie and M. Sutton, - "Area detector based photon correlation in the regime of - short data batches: Data reduction for dynamic x-ray - scattering," Rev. Sci. Instrum., vol 71, p 3274-3289, 2000. + A `results` object is yielded after every image has been processed. + This `reults` object contains, in this order: + - `g2`: the normalized correlation + shape is (len(lag_steps), num_rois) + - `lag_steps`: the times at which the correlation was computed + - `_internal_state`: all of the internal state. Can be passed back in + to `lazy_one_time` as the `internal_state` parameter + Notes + ----- + The normalized intensity-intensity time-autocorrelation function + is defined as + .. math:: + g_2(q, t') = \\frac{ }{^2} + t' > 0 + Here, ``I(q, t)`` refers to the scattering strength at the momentum + transfer vector ``q`` in reciprocal space at time ``t``, and the brackets + ``<...>`` refer to averages over time ``t``. The quantity ``t'`` denotes + the delay time + This implementation is based on published work. [1]_ + References + ---------- + .. [1] D. Lumma, L. B. Lurio, S. G. J. Mochrie and M. Sutton, + "Area detector based photon correlation in the regime of + short data batches: Data reduction for dynamic x-ray + scattering," Rev. Sci. Instrum., vol 71, p 3274-3289, 2000. """ if internal_state is None: @@ -482,66 +532,90 @@ def lazy_one_time(FD, num_levels, num_bufs, labels, # create a shorthand reference to the results and state named tuple s = internal_state - qind, pixelist = roi.extract_label_indices( labels ) - # iterate over the images to compute multi-tau correlation - - fra_pix = np.zeros_like( pixelist, dtype=np.float64) - - timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) - timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) - + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + if bad_frame_list is None: - bad_frame_list=[] - for i in tqdm(range( FD.beg , FD.end )): + bad_frame_list = [] + for i in tqdm(range(FD.beg, FD.end)): if i in bad_frame_list: - fra_pix[:]= np.nan + fra_pix[:] = np.nan else: - (p,v) = FD.rdrawframe(i) - w = np.where( timg[p] )[0] - pxlist = timg[ p[w] ] -1 - + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + if imgsum is None: if norm is None: - fra_pix[ pxlist] = v[w] - else: + fra_pix[pxlist] = v[w] + else: S = norm.shape - if len(S)>1: - fra_pix[ pxlist] = v[w]/ norm[i,pxlist] #-1.0 - else: - fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0 + if len(S) > 1: + fra_pix[pxlist] = v[w] / norm[i, pxlist] # -1.0 + else: + fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 else: if norm is None: - fra_pix[ pxlist] = v[w] / imgsum[i] + fra_pix[pxlist] = v[w] / imgsum[i] else: S = norm.shape - if len(S)>1: - fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[i,pxlist] - else: - fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist] - level = 0 + if len(S) > 1: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[i, pxlist] + else: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] + level = 0 # increment buffer - s.cur[0] = (1 + s.cur[0]) % num_bufs - # Put the ROI pixels into the ring buffer. - s.buf[0, s.cur[0] - 1] = fra_pix - fra_pix[:]=0 - - #print( i, len(p), len(w), len( pixelist)) - - #print ('i= %s init fra_pix'%i ) + s.cur[0] = (1 + s.cur[0]) % num_bufs + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:] = 0 + + # print( i, len(p), len(w), len( pixelist)) + + # print ('i= %s init fra_pix'%i ) buf_no = s.cur[0] - 1 # Compute the correlations between the first level # (undownsampled) frames. This modifies G, # past_intensity, future_intensity, # and img_per_level in place! if cal_error: - _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, - s.label_array, num_bufs, s.num_pixels, - s.img_per_level, level, buf_no, s.norm, s.lev_len, - s.G_all, s.past_intensity_all, s.future_intensity_all) - else: - _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, - s.label_array, num_bufs, s.num_pixels, - s.img_per_level, level, buf_no, s.norm, s.lev_len) + _one_time_process_error( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + s.G_all, + s.past_intensity_all, + s.future_intensity_all, + ) + else: + _one_time_process( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) # check whether the number of levels is one, otherwise # continue processing the next level @@ -553,13 +627,12 @@ def lazy_one_time(FD, num_levels, num_bufs, labels, s.track_level[level] = True processing = False else: - prev = (1 + (s.cur[level - 1] - 2) % num_bufs) - s.cur[level] = ( - 1 + s.cur[level] % num_bufs) + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs - s.buf[level, s.cur[level] - 1] = (( - s.buf[level - 1, prev - 1] + - s.buf[level - 1, s.cur[level - 1] - 1]) / 2) + s.buf[level, s.cur[level] - 1] = ( + s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] + ) / 2 # make the track_level zero once that level is processed s.track_level[level] = False @@ -569,14 +642,38 @@ def lazy_one_time(FD, num_levels, num_bufs, labels, # on previous call above. buf_no = s.cur[level] - 1 if cal_error: - _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, - s.label_array, num_bufs, s.num_pixels, - s.img_per_level, level, buf_no, s.norm, s.lev_len, - s.G_all, s.past_intensity_all, s.future_intensity_all) - else: - _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, - s.label_array, num_bufs, s.num_pixels, - s.img_per_level, level, buf_no, s.norm, s.lev_len) + _one_time_process_error( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + s.G_all, + s.past_intensity_all, + s.future_intensity_all, + ) + else: + _one_time_process( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) level += 1 @@ -586,77 +683,108 @@ def lazy_one_time(FD, num_levels, num_bufs, labels, # If any past intensities are zero, then g2 cannot be normalized at # those levels. This if/else code block is basically preventing # divide-by-zero errors. - if not cal_error: + if not cal_error: if len(np.where(s.past_intensity == 0)[0]) != 0: g_max1 = np.where(s.past_intensity == 0)[0][0] else: - g_max1 = s.past_intensity.shape[0] + g_max1 = s.past_intensity.shape[0] if len(np.where(s.future_intensity == 0)[0]) != 0: g_max2 = np.where(s.future_intensity == 0)[0][0] else: - g_max2 = s.future_intensity.shape[0] - g_max = min( g_max1, g_max2) - g2 = (s.G[:g_max] / (s.past_intensity[:g_max] * - s.future_intensity[:g_max])) - yield results(g2, s.lag_steps[:g_max], s) + g_max2 = s.future_intensity.shape[0] + g_max = min(g_max1, g_max2) + g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) + yield results(g2, s.lag_steps[:g_max], s) else: - yield results(None,s.lag_steps, s) - - - -def lazy_one_time_debug(FD, num_levels, num_bufs, labels, - internal_state=None, bad_frame_list=None, imgsum=None, norm = None, cal_error=False ): + yield results(None, s.lag_steps, s) + + +def lazy_one_time_debug( + FD, + num_levels, + num_bufs, + labels, + internal_state=None, + bad_frame_list=None, + imgsum=None, + norm=None, + cal_error=False, +): if internal_state is None: internal_state = _init_state_one_time(num_levels, num_bufs, labels, cal_error) # create a shorthand reference to the results and state named tuple s = internal_state - qind, pixelist = roi.extract_label_indices( labels ) - # iterate over the images to compute multi-tau correlation - fra_pix = np.zeros_like( pixelist, dtype=np.float64) - timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) - timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) if bad_frame_list is None: - bad_frame_list=[] - for i in range( FD.beg , FD.end ): + bad_frame_list = [] + for i in range(FD.beg, FD.end): print(i) if i in bad_frame_list: - fra_pix[:]= np.nan + fra_pix[:] = np.nan else: - (p,v) = FD.rdrawframe(i) - w = np.where( timg[p] )[0] - pxlist = timg[ p[w] ] -1 + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 if imgsum is None: if norm is None: - fra_pix[ pxlist] = v[w] - else: - fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0 + fra_pix[pxlist] = v[w] + else: + fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 else: if norm is None: - fra_pix[ pxlist] = v[w] / imgsum[i] + fra_pix[pxlist] = v[w] / imgsum[i] else: - fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist] - level = 0 + fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] + level = 0 # increment buffer - s.cur[0] = (1 + s.cur[0]) % num_bufs - # Put the ROI pixels into the ring buffer. - s.buf[0, s.cur[0] - 1] = fra_pix - fra_pix[:]=0 - #print( i, len(p), len(w), len( pixelist)) - #print ('i= %s init fra_pix'%i ) + s.cur[0] = (1 + s.cur[0]) % num_bufs + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:] = 0 + # print( i, len(p), len(w), len( pixelist)) + # print ('i= %s init fra_pix'%i ) buf_no = s.cur[0] - 1 # Compute the correlations between the first level # (undownsampled) frames. This modifies G, # past_intensity, future_intensity, # and img_per_level in place! if cal_error: - _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, - s.label_array, num_bufs, s.num_pixels, - s.img_per_level, level, buf_no, s.norm, s.lev_len, - s.G_all, s.past_intensity_all, s.future_intensity_all) - else: - _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, - s.label_array, num_bufs, s.num_pixels, - s.img_per_level, level, buf_no, s.norm, s.lev_len) + _one_time_process_error( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + s.G_all, + s.past_intensity_all, + s.future_intensity_all, + ) + else: + _one_time_process( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) # check whether the number of levels is one, otherwise # continue processing the next level @@ -667,13 +795,12 @@ def lazy_one_time_debug(FD, num_levels, num_bufs, labels, s.track_level[level] = True processing = False else: - prev = (1 + (s.cur[level - 1] - 2) % num_bufs) - s.cur[level] = ( - 1 + s.cur[level] % num_bufs) + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs - s.buf[level, s.cur[level] - 1] = (( - s.buf[level - 1, prev - 1] + - s.buf[level - 1, s.cur[level - 1] - 1]) / 2) + s.buf[level, s.cur[level] - 1] = ( + s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] + ) / 2 # make the track_level zero once that level is processed s.track_level[level] = False # call processing_func for each multi-tau level greater @@ -681,14 +808,38 @@ def lazy_one_time_debug(FD, num_levels, num_bufs, labels, # on previous call above. buf_no = s.cur[level] - 1 if cal_error: - _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, - s.label_array, num_bufs, s.num_pixels, - s.img_per_level, level, buf_no, s.norm, s.lev_len, - s.G_all, s.past_intensity_all, s.future_intensity_all) - else: - _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, - s.label_array, num_bufs, s.num_pixels, - s.img_per_level, level, buf_no, s.norm, s.lev_len) + _one_time_process_error( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + s.G_all, + s.past_intensity_all, + s.future_intensity_all, + ) + else: + _one_time_process( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) level += 1 # Checking whether there is next level for processing @@ -696,25 +847,23 @@ def lazy_one_time_debug(FD, num_levels, num_bufs, labels, # If any past intensities are zero, then g2 cannot be normalized at # those levels. This if/else code block is basically preventing # divide-by-zero errors. - if not cal_error: + if not cal_error: if len(np.where(s.past_intensity == 0)[0]) != 0: g_max1 = np.where(s.past_intensity == 0)[0][0] else: - g_max1 = s.past_intensity.shape[0] + g_max1 = s.past_intensity.shape[0] if len(np.where(s.future_intensity == 0)[0]) != 0: g_max2 = np.where(s.future_intensity == 0)[0][0] else: - g_max2 = s.future_intensity.shape[0] - g_max = min( g_max1, g_max2) - g2 = (s.G[:g_max] / (s.past_intensity[:g_max] * - s.future_intensity[:g_max])) - yield results(g2, s.lag_steps[:g_max], s) - #yield( i ) - + g_max2 = s.future_intensity.shape[0] + g_max = min(g_max1, g_max2) + g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) + yield results(g2, s.lag_steps[:g_max], s) + # yield( i ) + else: - yield results(None,s.lag_steps, s) - - + yield results(None, s.lag_steps, s) + def auto_corr_scat_factor(lags, beta, relaxation_rate, baseline=1): """ @@ -756,10 +905,11 @@ def auto_corr_scat_factor(lags, beta, relaxation_rate, baseline=1): J. Synchrotron Rad. vol 21, p 1288-1295, 2014 """ return beta * np.exp(-2 * relaxation_rate * lags) + baseline - -def multi_tau_auto_corr(num_levels, num_bufs, labels, images, bad_frame_list=None, - imgsum=None, norm=None,cal_error=False ): + +def multi_tau_auto_corr( + num_levels, num_bufs, labels, images, bad_frame_list=None, imgsum=None, norm=None, cal_error=False +): """Wraps generator implementation of multi-tau Original code(in Yorick) for multi tau auto correlation author: Mark Sutton @@ -770,17 +920,25 @@ def multi_tau_auto_corr(num_levels, num_bufs, labels, images, bad_frame_list=Non the `lazy_one_time()` function. The semantics of the variables remain unchanged. """ - gen = lazy_one_time(images, num_levels, num_bufs, labels,bad_frame_list=bad_frame_list, imgsum=imgsum, - norm=norm,cal_error=cal_error ) + gen = lazy_one_time( + images, + num_levels, + num_bufs, + labels, + bad_frame_list=bad_frame_list, + imgsum=imgsum, + norm=norm, + cal_error=cal_error, + ) for result in gen: pass if cal_error: return result.g2, result.lag_steps, result.internal_state - else: + else: return result.g2, result.lag_steps -def multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list =None, - imgsum= None, norm = None ): + +def multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list=None, imgsum=None, norm=None): """Wraps generator implementation of multi-tau two time correlation This function computes two-time correlation Original code : author: Yugang Zhang @@ -789,21 +947,29 @@ def multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_lis results : namedtuple For parameter definition, see the docstring for the `lazy_two_time()` function in this module - """ - gen = lazy_two_time(FD, num_lev, num_buf, ring_mask, - two_time_internal_state= None, - bad_frame_list=bad_frame_list, imgsum=imgsum, norm = norm ) + """ + gen = lazy_two_time( + FD, + num_lev, + num_buf, + ring_mask, + two_time_internal_state=None, + bad_frame_list=bad_frame_list, + imgsum=imgsum, + norm=norm, + ) for result in gen: pass return two_time_state_to_results(result) - - -def lazy_two_time(FD, num_levels, num_bufs, labels, - two_time_internal_state=None, bad_frame_list=None, imgsum= None, norm = None ): - -#def lazy_two_time(labels, images, num_frames, num_bufs, num_levels=1, -# two_time_internal_state=None): - """ Generator implementation of two-time correlation + + +def lazy_two_time( + FD, num_levels, num_bufs, labels, two_time_internal_state=None, bad_frame_list=None, imgsum=None, norm=None +): + + # def lazy_two_time(labels, images, num_frames, num_bufs, num_levels=1, + # two_time_internal_state=None): + """Generator implementation of two-time correlation If you do not want multi-tau correlation, set num_levels to 1 and num_bufs to the number of images you wish to correlate Multi-tau correlation uses a scheme to achieve long-time correlations @@ -813,14 +979,14 @@ def lazy_two_time(FD, num_levels, num_bufs, labels, ** see comments on multi_tau_auto_corr Parameters ---------- - FD: the handler of compressed data + FD: the handler of compressed data num_levels : int, optional how many generations of downsampling to perform, i.e., the depth of the binomial tree of averaged frames default is one num_bufs : int, must be even maximum lag step to compute in each generation of - downsampling + downsampling labels : array labeled array of the same shape as the image stack; each ROI is represented by a distinct label (i.e., integer) @@ -857,51 +1023,59 @@ def lazy_two_time(FD, num_levels, num_bufs, labels, and aging in collodial gels studied by x-ray photon correlation spectroscopy," Phys. Rev. E., vol 76, p 010401(1-4), 2007. """ - - num_frames = FD.end - FD.beg + + num_frames = FD.end - FD.beg if two_time_internal_state is None: - two_time_internal_state = _init_state_two_time(num_levels, num_bufs,labels, num_frames) + two_time_internal_state = _init_state_two_time(num_levels, num_bufs, labels, num_frames) # create a shorthand reference to the results and state named tuple - s = two_time_internal_state - qind, pixelist = roi.extract_label_indices( labels ) + s = two_time_internal_state + qind, pixelist = roi.extract_label_indices(labels) # iterate over the images to compute multi-tau correlation - fra_pix = np.zeros_like( pixelist, dtype=np.float64) - timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) - timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) if bad_frame_list is None: - bad_frame_list=[] - - for i in tqdm(range( FD.beg , FD.end )): + bad_frame_list = [] + + for i in tqdm(range(FD.beg, FD.end)): if i in bad_frame_list: - fra_pix[:]= np.nan + fra_pix[:] = np.nan else: - (p,v) = FD.rdrawframe(i) - w = np.where( timg[p] )[0] - pxlist = timg[ p[w] ] -1 + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 if imgsum is None: if norm is None: - fra_pix[ pxlist] = v[w] - else: - fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0 + fra_pix[pxlist] = v[w] + else: + fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 else: if norm is None: - fra_pix[ pxlist] = v[w] / imgsum[i] + fra_pix[pxlist] = v[w] / imgsum[i] else: - fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist] - - level = 0 + fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] + + level = 0 # increment buffer - s.cur[0] = (1 + s.cur[0]) % num_bufs + s.cur[0] = (1 + s.cur[0]) % num_bufs s.count_level[0] = 1 + s.count_level[0] # get the current image time - s = s._replace(current_img_time=(s.current_img_time + 1)) - # Put the ROI pixels into the ring buffer. - s.buf[0, s.cur[0] - 1] = fra_pix - fra_pix[:]=0 - _two_time_process(s.buf, s.g2, s.label_array, num_bufs, - s.num_pixels, s.img_per_level, s.lag_steps, - s.current_img_time, - level=0, buf_no=s.cur[0] - 1) + s = s._replace(current_img_time=(s.current_img_time + 1)) + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:] = 0 + _two_time_process( + s.buf, + s.g2, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + s.lag_steps, + s.current_img_time, + level=0, + buf_no=s.cur[0] - 1, + ) # time frame for each level s.time_ind[0].append(s.current_img_time) # check whether the number of levels is one, otherwise @@ -916,14 +1090,14 @@ def lazy_two_time(FD, num_levels, num_bufs, labels, else: prev = 1 + (s.cur[level - 1] - 2) % num_bufs s.cur[level] = 1 + s.cur[level] % num_bufs - s.count_level[level] = 1 + s.count_level[level] - s.buf[level, s.cur[level] - 1] = ( s.buf[level - 1, prev - 1] + - s.buf[level - 1, s.cur[level - 1] - 1] )/2 + s.count_level[level] = 1 + s.count_level[level] + s.buf[level, s.cur[level] - 1] = ( + s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] + ) / 2 t1_idx = (s.count_level[level] - 1) * 2 - current_img_time = ((s.time_ind[level - 1])[t1_idx] + - (s.time_ind[level - 1])[t1_idx + 1])/2. + current_img_time = ((s.time_ind[level - 1])[t1_idx] + (s.time_ind[level - 1])[t1_idx + 1]) / 2.0 # time frame for each level s.time_ind[level].append(current_img_time) # make the track_level zero once that level is processed @@ -932,15 +1106,23 @@ def lazy_two_time(FD, num_levels, num_bufs, labels, # for multi-tau levels greater than one # Again, this is modifying things in place. See comment # on previous call above. - _two_time_process(s.buf, s.g2, s.label_array, num_bufs, - s.num_pixels, s.img_per_level, s.lag_steps, - current_img_time, - level=level, buf_no=s.cur[level]-1) + _two_time_process( + s.buf, + s.g2, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + s.lag_steps, + current_img_time, + level=level, + buf_no=s.cur[level] - 1, + ) level += 1 # Checking whether there is next level for processing processing = level < num_levels - #print (s.g2[1,:,1] ) + # print (s.g2[1,:,1] ) yield s @@ -958,15 +1140,13 @@ def two_time_state_to_results(state): """ for q in range(np.max(state.label_array)): x0 = (state.g2)[q, :, :] - (state.g2)[q, :, :] = (np.tril(x0) + np.tril(x0).T - - np.diag(np.diag(x0))) + (state.g2)[q, :, :] = np.tril(x0) + np.tril(x0).T - np.diag(np.diag(x0)) return results(state.g2, state.lag_steps, state) - - -def _two_time_process(buf, g2, label_array, num_bufs, num_pixels, - img_per_level, lag_steps, current_img_time, - level, buf_no): + +def _two_time_process( + buf, g2, label_array, num_bufs, num_pixels, img_per_level, lag_steps, current_img_time, level, buf_no +): """ Parameters ---------- @@ -1003,41 +1183,36 @@ def _two_time_process(buf, g2, label_array, num_bufs, num_pixels, if level == 0: i_min = 0 else: - i_min = num_bufs//2 + i_min = num_bufs // 2 for i in range(i_min, min(img_per_level[level], num_bufs)): - t_index = level*num_bufs/2 + i + t_index = level * num_bufs / 2 + i delay_no = (buf_no - i) % num_bufs past_img = buf[level, delay_no] - future_img = buf[level, buf_no] - - #print( np.sum( past_img ), np.sum( future_img )) - + future_img = buf[level, buf_no] + + # print( np.sum( past_img ), np.sum( future_img )) + # get the matrix of correlation function without normalizations - tmp_binned = (np.bincount(label_array, - weights=past_img*future_img)[1:]) + tmp_binned = np.bincount(label_array, weights=past_img * future_img)[1:] # get the matrix of past intensity normalizations - pi_binned = (np.bincount(label_array, - weights=past_img)[1:]) + pi_binned = np.bincount(label_array, weights=past_img)[1:] # get the matrix of future intensity normalizations - fi_binned = (np.bincount(label_array, - weights=future_img)[1:]) + fi_binned = np.bincount(label_array, weights=future_img)[1:] + + tind1 = current_img_time - 1 + tind2 = current_img_time - lag_steps[int(t_index)] - 1 + # print( current_img_time ) - tind1 = (current_img_time - 1) - tind2 = (current_img_time - lag_steps[int(t_index)] - 1) - #print( current_img_time ) - if not isinstance(current_img_time, int): - nshift = 2**(level-1) - for i in range(-nshift+1, nshift+1): - g2[:, int(tind1+i), - int(tind2+i)] = (tmp_binned/(pi_binned * - fi_binned))*num_pixels + nshift = 2 ** (level - 1) + for i in range(-nshift + 1, nshift + 1): + g2[:, int(tind1 + i), int(tind2 + i)] = (tmp_binned / (pi_binned * fi_binned)) * num_pixels else: - g2[:, int(tind1), int(tind2)] = tmp_binned/(pi_binned * fi_binned)*num_pixels - - #print( num_pixels ) + g2[:, int(tind1), int(tind2)] = tmp_binned / (pi_binned * fi_binned) * num_pixels + + # print( num_pixels ) def _init_state_two_time(num_levels, num_bufs, labels, num_frames): @@ -1058,9 +1233,19 @@ def _init_state_two_time(num_levels, num_bufs, labels, num_frames): `lazy_two_time` requires so that it can be used to pick up processing after it was interrupted """ - (label_array, pixel_list, num_rois, num_pixels, lag_steps, - buf, img_per_level, track_level, cur, norm, - lev_len) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) = _validate_and_transform_inputs(num_bufs, num_levels, labels) # to count images in each level count_level = np.zeros(num_levels, dtype=np.int64) @@ -1091,6 +1276,7 @@ def _init_state_two_time(num_levels, num_bufs, labels, num_frames): lev_len, ) + def one_time_from_two_time(two_time_corr): """ This will provide the one-time correlation data from two-time @@ -1110,143 +1296,149 @@ def one_time_from_two_time(two_time_corr): one_time_corr = np.zeros((two_time_corr.shape[0], two_time_corr.shape[2])) for g in two_time_corr: for j in range(two_time_corr.shape[2]): - one_time_corr[:, j] = np.trace(g, offset=j)/two_time_corr.shape[2] + one_time_corr[:, j] = np.trace(g, offset=j) / two_time_corr.shape[2] return one_time_corr - - -def cal_c12c( FD, ring_mask, - bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None, imgsum=None, norm=None ): - '''calculation two_time correlation by using a multi-tau algorithm''' - - #noframes = FD.end - good_start # number of frames, not "no frames" - + + +def cal_c12c(FD, ring_mask, bad_frame_list=None, good_start=0, num_buf=8, num_lev=None, imgsum=None, norm=None): + """calculation two_time correlation by using a multi-tau algorithm""" + + # noframes = FD.end - good_start # number of frames, not "no frames" + FD.beg = max(FD.beg, good_start) - noframes = FD.end - FD.beg # number of frames, not "no frames" - #num_buf = 8 # number of buffers + noframes = FD.end - FD.beg # number of frames, not "no frames" + # num_buf = 8 # number of buffers if num_lev is None: - num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 - print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) - if bad_frame_list is not None: - if len(bad_frame_list)!=0: - print ('Bad frame involved and will be precessed!') - noframes -= len(np.where(np.in1d( bad_frame_list, - range(good_start, FD.end)))[0]) - print ('%s frames will be processed...'%(noframes)) - - c12, lag_steps, state = multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list, - imgsum=imgsum, norm = norm ) - - print( 'Two Time Calculation is DONE!') + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list) != 0: + print("Bad frame involved and will be precessed!") + noframes -= len(np.where(np.in1d(bad_frame_list, range(good_start, FD.end)))[0]) + print("%s frames will be processed..." % (noframes)) + + c12, lag_steps, state = multi_tau_two_time_auto_corr( + num_lev, num_buf, ring_mask, FD, bad_frame_list, imgsum=imgsum, norm=norm + ) + + print("Two Time Calculation is DONE!") m, n, n = c12.shape - #print( m,n,n) - c12_ = np.zeros( [n,n,m] ) - for i in range( m): - c12_[:,:,i ] = c12[i] + # print( m,n,n) + c12_ = np.zeros([n, n, m]) + for i in range(m): + c12_[:, :, i] = c12[i] return c12_, lag_steps +def cal_g2c( + FD, + ring_mask, + bad_frame_list=None, + good_start=0, + num_buf=8, + num_lev=None, + imgsum=None, + norm=None, + cal_error=False, +): + """calculation g2 by using a multi-tau algorithm""" + + # noframes = FD.end - good_start # number of frames, not "no frames" -def cal_g2c( FD, ring_mask, - bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None, imgsum=None, norm=None,cal_error=False ): - '''calculation g2 by using a multi-tau algorithm''' - - #noframes = FD.end - good_start # number of frames, not "no frames" - FD.beg = max(FD.beg, good_start) - noframes = FD.end - FD.beg # number of frames, not "no frames" - #num_buf = 8 # number of buffers + noframes = FD.end - FD.beg # number of frames, not "no frames" + # num_buf = 8 # number of buffers if num_lev is None: - num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 - print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) - if bad_frame_list is not None: - if len(bad_frame_list)!=0: - print ('Bad frame involved and will be precessed!') - noframes -= len(np.where(np.in1d( bad_frame_list, - range(good_start, FD.end)))[0]) - - print ('%s frames will be processed...'%(noframes)) + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list) != 0: + print("Bad frame involved and will be precessed!") + noframes -= len(np.where(np.in1d(bad_frame_list, range(good_start, FD.end)))[0]) + + print("%s frames will be processed..." % (noframes)) if cal_error: - g2, lag_steps, s = multi_tau_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list, - imgsum=imgsum, norm = norm,cal_error=cal_error ) - - g2 = np.zeros_like( s.G ) - g2_err = np.zeros_like(g2) - qind, pixelist = extract_label_indices(ring_mask) + g2, lag_steps, s = multi_tau_auto_corr( + num_lev, num_buf, ring_mask, FD, bad_frame_list, imgsum=imgsum, norm=norm, cal_error=cal_error + ) + + g2 = np.zeros_like(s.G) + g2_err = np.zeros_like(g2) + qind, pixelist = extract_label_indices(ring_mask) noqs = len(np.unique(qind)) - nopr = np.bincount(qind, minlength=(noqs+1))[1:] - Ntau, Nq = s.G.shape + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + Ntau, Nq = s.G.shape g_max = 1e30 - for qi in range(1,1+Nq): - pixelist_qi = np.where( qind == qi)[0] - s_Gall_qi = s.G_all[:,pixelist_qi] - s_Pall_qi = s.past_intensity_all[:,pixelist_qi] - s_Fall_qi = s.future_intensity_all[:,pixelist_qi] - avgGi = (np.average( s_Gall_qi, axis=1)) - devGi = (np.std( s_Gall_qi, axis=1)) - avgPi = (np.average( s_Pall_qi, axis=1)) - devPi = (np.std( s_Pall_qi, axis=1)) - avgFi = (np.average( s_Fall_qi, axis=1)) - devFi = (np.std( s_Fall_qi, axis=1)) - + for qi in range(1, 1 + Nq): + pixelist_qi = np.where(qind == qi)[0] + s_Gall_qi = s.G_all[:, pixelist_qi] + s_Pall_qi = s.past_intensity_all[:, pixelist_qi] + s_Fall_qi = s.future_intensity_all[:, pixelist_qi] + avgGi = np.average(s_Gall_qi, axis=1) + devGi = np.std(s_Gall_qi, axis=1) + avgPi = np.average(s_Pall_qi, axis=1) + devPi = np.std(s_Pall_qi, axis=1) + avgFi = np.average(s_Fall_qi, axis=1) + devFi = np.std(s_Fall_qi, axis=1) + if len(np.where(avgPi == 0)[0]) != 0: g_max1 = np.where(avgPi == 0)[0][0] else: - g_max1 = avgPi.shape[0] + g_max1 = avgPi.shape[0] if len(np.where(avgFi == 0)[0]) != 0: g_max2 = np.where(avgFi == 0)[0][0] else: - g_max2 = avgFi.shape[0] - g_max = min( g_max1, g_max2) - #print(g_max) - #g2_ = (s.G[:g_max] / (s.past_intensity[:g_max] * + g_max2 = avgFi.shape[0] + g_max = min(g_max1, g_max2) + # print(g_max) + # g2_ = (s.G[:g_max] / (s.past_intensity[:g_max] * # s.future_intensity[:g_max])) - g2[:g_max,qi-1] = avgGi[:g_max]/( avgPi[:g_max] * avgFi[:g_max] ) - g2_err[:g_max,qi-1] = np.sqrt( - ( 1/ ( avgFi[:g_max] * avgPi[:g_max] ))**2 * devGi[:g_max] ** 2 + - ( avgGi[:g_max]/ ( avgFi[:g_max]**2 * avgPi[:g_max] ))**2 * devFi[:g_max] ** 2 + - ( avgGi[:g_max]/ ( avgFi[:g_max] * avgPi[:g_max]**2 ))**2 * devPi[:g_max] ** 2 - ) - - print( 'G2 with error bar calculation DONE!') - return g2[:g_max,:], lag_steps[:g_max], g2_err[:g_max,:]/np.sqrt(nopr), s - else: - g2, lag_steps = multi_tau_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list, - imgsum=imgsum, norm = norm,cal_error=cal_error ) - - print( 'G2 calculation DONE!') + g2[:g_max, qi - 1] = avgGi[:g_max] / (avgPi[:g_max] * avgFi[:g_max]) + g2_err[:g_max, qi - 1] = np.sqrt( + (1 / (avgFi[:g_max] * avgPi[:g_max])) ** 2 * devGi[:g_max] ** 2 + + (avgGi[:g_max] / (avgFi[:g_max] ** 2 * avgPi[:g_max])) ** 2 * devFi[:g_max] ** 2 + + (avgGi[:g_max] / (avgFi[:g_max] * avgPi[:g_max] ** 2)) ** 2 * devPi[:g_max] ** 2 + ) + + print("G2 with error bar calculation DONE!") + return g2[:g_max, :], lag_steps[:g_max], g2_err[:g_max, :] / np.sqrt(nopr), s + else: + g2, lag_steps = multi_tau_auto_corr( + num_lev, num_buf, ring_mask, FD, bad_frame_list, imgsum=imgsum, norm=norm, cal_error=cal_error + ) + + print("G2 calculation DONE!") return g2, lag_steps +def get_pixelist_interp_iq(qp, iq, ring_mask, center): + + qind, pixelist = roi.extract_label_indices(ring_mask) + # pixely = pixelist%FD.md['nrows'] -center[1] + # pixelx = pixelist//FD.md['nrows'] - center[0] + + pixely = pixelist % ring_mask.shape[1] - center[1] + pixelx = pixelist // ring_mask.shape[1] - center[0] + + r = np.hypot(pixelx, pixely) # leave as float. + # r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 + return np.interp(r, qp, iq) + -def get_pixelist_interp_iq( qp, iq, ring_mask, center): - - qind, pixelist = roi.extract_label_indices( ring_mask ) - #pixely = pixelist%FD.md['nrows'] -center[1] - #pixelx = pixelist//FD.md['nrows'] - center[0] - - pixely = pixelist%ring_mask.shape[1] -center[1] - pixelx = pixelist//ring_mask.shape[1] - center[0] - - r= np.hypot(pixelx, pixely) #leave as float. - #r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 - return np.interp( r, qp, iq ) - - class Get_Pixel_Arrayc_todo(object): - ''' - a class to get intested pixels from a images sequence, - load ROI of all images into memory + """ + a class to get intested pixels from a images sequence, + load ROI of all images into memory get_data: to get a 2-D array, shape as (len(images), len(pixellist)) - - One example: + + One example: data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() - ''' - - def __init__(self, FD, pixelist,beg=None, end=None, norm=None, imgsum = None, - norm_inten = None, qind=None): - ''' + """ + + def __init__(self, FD, pixelist, beg=None, end=None, norm=None, imgsum=None, norm_inten=None, qind=None): + """ indexable: a images sequences pixelist: 1-D array, interest pixel list norm: each q-ROI of each frame is normalized by the corresponding q-ROI of time averaged intensity @@ -1254,423 +1446,428 @@ def __init__(self, FD, pixelist,beg=None, end=None, norm=None, imgsum = None, norm_inten: if True, each q-ROI of each frame is normlized by total intensity of the correponding q-ROI of the corresponding frame qind: the index of each ROI in one frame, i.e., q if norm_inten is True: qind has to be given - - ''' + + """ if beg is None: self.beg = FD.beg if end is None: self.end = FD.end - #if self.beg ==0: + # if self.beg ==0: # self.length = self.end - self.beg - #else: + # else: # self.length = self.end - self.beg + 1 - - self.length = self.end - self.beg - + + self.length = self.end - self.beg + self.FD = FD - self.pixelist = pixelist - self.norm = norm + self.pixelist = pixelist + self.norm = norm self.imgsum = imgsum - self.norm_inten= norm_inten + self.norm_inten = norm_inten self.qind = qind if self.norm_inten is not None: if self.qind is None: - print('Please give qind.') - - def get_data(self ): - ''' + print("Please give qind.") + + def get_data(self): + """ To get intested pixels array Return: 2-D array, shape as (len(images), len(pixellist)) - ''' - - data_array = np.zeros([ self.length,len(self.pixelist)], dtype=np.float64) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 - #fra_pix = np.zeros_like( pixelist, dtype=np.float64) - timg = np.zeros( self.FD.md['ncols'] * self.FD.md['nrows'] , dtype=np.int32 ) - timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) - + """ + + data_array = np.zeros( + [self.length, len(self.pixelist)], dtype=np.float64 + ) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 + # fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros(self.FD.md["ncols"] * self.FD.md["nrows"], dtype=np.int32) + timg[self.pixelist] = np.arange(1, len(self.pixelist) + 1) + if self.norm_inten is not None: - #Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) - Mean_Int_Qind = np.ones( len( self.qind), dtype = np.float64) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 - noqs = len(np.unique( self.qind )) - nopr = np.bincount(self.qind-1) - noprs = np.concatenate( [ np.array([0]), np.cumsum(nopr) ] ) - qind_ = np.zeros_like( self.qind ) + # Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) + Mean_Int_Qind = np.ones( + len(self.qind), dtype=np.float64 + ) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 + noqs = len(np.unique(self.qind)) + nopr = np.bincount(self.qind - 1) + noprs = np.concatenate([np.array([0]), np.cumsum(nopr)]) + qind_ = np.zeros_like(self.qind) for j in range(noqs): - qind_[ noprs[j]: noprs[j+1] ] = np.where(self.qind==j+1)[0] - - n=0 - for i in tqdm(range( self.beg , self.end )): - (p,v) = self.FD.rdrawframe(i) - w = np.where( timg[p] )[0] - pxlist = timg[ p[w] ] -1 - #np.bincount( qind[pxlist], weight= - - - if self.mean_int_sets is not None:#for each frame will normalize each ROI by it's averaged value + qind_[noprs[j] : noprs[j + 1]] = np.where(self.qind == j + 1)[0] + + n = 0 + for i in tqdm(range(self.beg, self.end)): + (p, v) = self.FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + # np.bincount( qind[pxlist], weight= + + if self.mean_int_sets is not None: # for each frame will normalize each ROI by it's averaged value for j in range(noqs): - #if i ==100: + # if i ==100: # if j==0: - # print( self.mean_int_sets[i][j] ) + # print( self.mean_int_sets[i][j] ) # print( qind_[ noprs[j]: noprs[j+1] ] ) - Mean_Int_Qind[ qind_[ noprs[j]: noprs[j+1] ] ] = self.mean_int_sets[i][j] - norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] #self.mean_int_set or Mean_Int_Qind[pxlist] - - #if i==100: + Mean_Int_Qind[qind_[noprs[j] : noprs[j + 1]]] = self.mean_int_sets[i][j] + norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] # self.mean_int_set or Mean_Int_Qind[pxlist] + + # if i==100: # print( i, Mean_Int_Qind[ self.qind== 11 ]) - #print('Do norm_mean_int here') - #if i ==10: + # print('Do norm_mean_int here') + # if i ==10: # print( norm_Mean_Int_Qind ) else: - norm_Mean_Int_Qind = 1.0 - if self.imgsum is not None: - norm_imgsum = self.imgsum[i] + norm_Mean_Int_Qind = 1.0 + if self.imgsum is not None: + norm_imgsum = self.imgsum[i] else: - norm_imgsum = 1.0 + norm_imgsum = 1.0 if self.norm is not None: - norm_avgimg_roi = self.norm[pxlist] + norm_avgimg_roi = self.norm[pxlist] else: - norm_avgimg_roi = 1.0 - - norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi - #if i==100: + norm_avgimg_roi = 1.0 + + norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi + # if i==100: # print(norm_Mean_Int_Qind[:100]) - data_array[n][ pxlist] = v[w]/ norms - n +=1 - - return data_array - - - - - + data_array[n][pxlist] = v[w] / norms + n += 1 + + return data_array + + class Get_Pixel_Arrayc(object): - ''' - a class to get intested pixels from a images sequence, - load ROI of all images into memory + """ + a class to get intested pixels from a images sequence, + load ROI of all images into memory get_data: to get a 2-D array, shape as (len(images), len(pixellist)) - - One example: + + One example: data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() - ''' - - def __init__(self, FD, pixelist,beg=None, end=None, norm=None, imgsum = None, - mean_int_sets = None, qind=None ): - ''' + """ + + def __init__(self, FD, pixelist, beg=None, end=None, norm=None, imgsum=None, mean_int_sets=None, qind=None): + """ indexable: a images sequences pixelist: 1-D array, interest pixel list norm: each q-ROI of each frame is normalized by the corresponding q-ROI of time averaged intensity imgsum: each q-ROI of each frame is normalized by the total intensity of the corresponding frame, should have the same time sequences as FD, e.g., imgsum[10] corresponding to FD[10] mean_int_sets: each q-ROI of each frame is normlized by total intensity of the correponding q-ROI of the corresponding frame qind: the index of each ROI in one frame, i.e., q - if mean_int_sets is not None: qind has to be not None - - ''' + if mean_int_sets is not None: qind has to be not None + + """ if beg is None: self.beg = FD.beg if end is None: self.end = FD.end - #if self.beg ==0: + # if self.beg ==0: # self.length = self.end - self.beg - #else: + # else: # self.length = self.end - self.beg + 1 - - self.length = self.end - self.beg - + + self.length = self.end - self.beg + self.FD = FD - self.pixelist = pixelist - self.norm = norm + self.pixelist = pixelist + self.norm = norm self.imgsum = imgsum - self.mean_int_sets= mean_int_sets + self.mean_int_sets = mean_int_sets self.qind = qind if self.mean_int_sets is not None: if self.qind is None: - print('Please give qind.') - - def get_data(self ): - ''' + print("Please give qind.") + + def get_data(self): + """ To get intested pixels array Return: 2-D array, shape as (len(images), len(pixellist)) - ''' - - data_array = np.zeros([ self.length,len(self.pixelist)], dtype=np.float64) - #fra_pix = np.zeros_like( pixelist, dtype=np.float64) - timg = np.zeros( self.FD.md['ncols'] * self.FD.md['nrows'] , dtype=np.int32 ) - timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) - + """ + + data_array = np.zeros([self.length, len(self.pixelist)], dtype=np.float64) + # fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros(self.FD.md["ncols"] * self.FD.md["nrows"], dtype=np.int32) + timg[self.pixelist] = np.arange(1, len(self.pixelist) + 1) + if self.mean_int_sets is not None: - #Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) - Mean_Int_Qind = np.ones( len( self.qind), dtype = np.float64) - noqs = len(np.unique( self.qind )) - nopr = np.bincount(self.qind-1) - noprs = np.concatenate( [ np.array([0]), np.cumsum(nopr) ] ) - qind_ = np.zeros_like( self.qind ) + # Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) + Mean_Int_Qind = np.ones(len(self.qind), dtype=np.float64) + noqs = len(np.unique(self.qind)) + nopr = np.bincount(self.qind - 1) + noprs = np.concatenate([np.array([0]), np.cumsum(nopr)]) + qind_ = np.zeros_like(self.qind) for j in range(noqs): - qind_[ noprs[j]: noprs[j+1] ] = np.where(self.qind==j+1)[0] - - n=0 - for i in tqdm(range( self.beg , self.end )): - (p,v) = self.FD.rdrawframe(i) - w = np.where( timg[p] )[0] - pxlist = timg[ p[w] ] -1 - - if self.mean_int_sets is not None:#for normalization of each averaged ROI of each frame + qind_[noprs[j] : noprs[j + 1]] = np.where(self.qind == j + 1)[0] + + n = 0 + for i in tqdm(range(self.beg, self.end)): + (p, v) = self.FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + + if self.mean_int_sets is not None: # for normalization of each averaged ROI of each frame for j in range(noqs): - #if i ==100: + # if i ==100: # if j==0: - # print( self.mean_int_sets[i][j] ) + # print( self.mean_int_sets[i][j] ) # print( qind_[ noprs[j]: noprs[j+1] ] ) - Mean_Int_Qind[ qind_[ noprs[j]: noprs[j+1] ] ] = self.mean_int_sets[i][j] - norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] #self.mean_int_set or Mean_Int_Qind[pxlist] - - #if i==100: + Mean_Int_Qind[qind_[noprs[j] : noprs[j + 1]]] = self.mean_int_sets[i][j] + norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] # self.mean_int_set or Mean_Int_Qind[pxlist] + + # if i==100: # print( i, Mean_Int_Qind[ self.qind== 11 ]) - #print('Do norm_mean_int here') - #if i ==10: + # print('Do norm_mean_int here') + # if i ==10: # print( norm_Mean_Int_Qind ) else: - norm_Mean_Int_Qind = 1.0 - if self.imgsum is not None: - norm_imgsum = self.imgsum[i] + norm_Mean_Int_Qind = 1.0 + if self.imgsum is not None: + norm_imgsum = self.imgsum[i] else: - norm_imgsum = 1.0 + norm_imgsum = 1.0 if self.norm is not None: - if len( (self.norm).shape )>1: - norm_avgimg_roi = self.norm[i][pxlist] - #print('here') - - else: - norm_avgimg_roi = self.norm[pxlist] + if len((self.norm).shape) > 1: + norm_avgimg_roi = self.norm[i][pxlist] + # print('here') + + else: + norm_avgimg_roi = self.norm[pxlist] else: - norm_avgimg_roi = 1.0 - - norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi - #if i==100: + norm_avgimg_roi = 1.0 + + norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi + # if i==100: # print(norm_Mean_Int_Qind[:100]) - data_array[n][ pxlist] = v[w]/ norms - n +=1 - - return data_array + data_array[n][pxlist] = v[w] / norms + n += 1 + + return data_array -def auto_two_Arrayc( data_pixel, rois, index=None): - - ''' +def auto_two_Arrayc(data_pixel, rois, index=None): + """ Dec 16, 2015, Y.G.@CHX - a numpy operation method to get two-time correlation function - + a numpy operation method to get two-time correlation function + Parameters: data: images sequence, shape as [img[0], img[1], imgs_length] rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs Options: - - data_pixel: if not None, + + data_pixel: if not None, 2-D array, shape as (len(images), len(qind)), - use function Get_Pixel_Array( ).get_data( ) to get - - + use function Get_Pixel_Array( ).get_data( ) to get + + Return: g12: a 3-D array, shape as ( imgs_length, imgs_length, q) - - One example: - g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) - ''' - - qind, pixelist = roi.extract_label_indices( rois ) - noqs = len( np.unique(qind) ) - nopr = np.bincount(qind, minlength=(noqs+1))[1:] - noframes = data_pixel.shape[0] + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + """ + + qind, pixelist = roi.extract_label_indices(rois) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + noframes = data_pixel.shape[0] if index is None: - index = np.arange( 1, noqs + 1 ) + index = np.arange(1, noqs + 1) else: try: len(index) - index = np.array( index ) + index = np.array(index) except TypeError: - index = np.array( [index] ) - #print( index ) - qlist = np.arange( 1, noqs + 1 )[ index -1 ] - #print( qlist ) + index = np.array([index]) + # print( index ) + qlist = np.arange(1, noqs + 1)[index - 1] + # print( qlist ) try: - g12b = np.zeros( [noframes, noframes, len(qlist) ] ) + g12b = np.zeros([noframes, noframes, len(qlist)]) DO = True except: - print("The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely") - '''TO be done here ''' - DO = False - + print( + "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" + ) + """TO be done here """ + DO = False + if DO: i = 0 - for qi in tqdm(qlist ): - #print (qi-1) - pixelist_qi = np.where( qind == qi)[0] - #print (pixelist_qi.shape, data_pixel[qi].shape) - data_pixel_qi = data_pixel[:,pixelist_qi] - sum1 = (np.average( data_pixel_qi, axis=1)).reshape( 1, noframes ) - sum2 = sum1.T - #print( qi, qlist, ) - #print( g12b[:,:,qi -1 ] ) - g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2 / nopr[qi -1] - i +=1 + for qi in tqdm(qlist): + # print (qi-1) + pixelist_qi = np.where(qind == qi)[0] + # print (pixelist_qi.shape, data_pixel[qi].shape) + data_pixel_qi = data_pixel[:, pixelist_qi] + sum1 = (np.average(data_pixel_qi, axis=1)).reshape(1, noframes) + sum2 = sum1.T + # print( qi, qlist, ) + # print( g12b[:,:,qi -1 ] ) + g12b[:, :, i] = np.dot(data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1] + i += 1 return g12b -def auto_two_Arrayc_ExplicitNorm( data_pixel, rois, norm=None, index=None): - - ''' + +def auto_two_Arrayc_ExplicitNorm(data_pixel, rois, norm=None, index=None): + """ Dec 16, 2015, Y.G.@CHX a numpy operation method to get two-time correlation function by giving explict normalization - + Parameters: data: images sequence, shape as [img[0], img[1], imgs_length] rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs norm: if not None, shoud be the shape as data_pixel, will normalize two time by this norm if None, will return two time without normalization - + Options: - - data_pixel: if not None, + + data_pixel: if not None, 2-D array, shape as (len(images), len(qind)), - use function Get_Pixel_Array( ).get_data( ) to get - - + use function Get_Pixel_Array( ).get_data( ) to get + + Return: g12: a 3-D array, shape as ( imgs_length, imgs_length, q) - - One example: - g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) - ''' - - qind, pixelist = roi.extract_label_indices( rois ) - noqs = len( np.unique(qind) ) - nopr = np.bincount(qind, minlength=(noqs+1))[1:] - noframes = data_pixel.shape[0] + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + """ + + qind, pixelist = roi.extract_label_indices(rois) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + noframes = data_pixel.shape[0] if index is None: - index = np.arange( 1, noqs + 1 ) + index = np.arange(1, noqs + 1) else: try: len(index) - index = np.array( index ) + index = np.array(index) except TypeError: - index = np.array( [index] ) - #print( index ) - qlist = np.arange( 1, noqs + 1 )[ index -1 ] - #print( qlist ) + index = np.array([index]) + # print( index ) + qlist = np.arange(1, noqs + 1)[index - 1] + # print( qlist ) try: - g12b = np.zeros( [noframes, noframes, len(qlist) ] ) + g12b = np.zeros([noframes, noframes, len(qlist)]) DO = True except: - print("The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely") - '''TO be done here ''' - DO = False + print( + "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" + ) + """TO be done here """ + DO = False if DO: i = 0 - for qi in tqdm(qlist ): - pixelist_qi = np.where( qind == qi)[0] - data_pixel_qi = data_pixel[:,pixelist_qi] + for qi in tqdm(qlist): + pixelist_qi = np.where(qind == qi)[0] + data_pixel_qi = data_pixel[:, pixelist_qi] if norm is not None: - norm1 = norm[:,pixelist_qi] - sum1 = (np.average( norm1, axis=1)).reshape( 1, noframes ) - sum2 = sum1.T + norm1 = norm[:, pixelist_qi] + sum1 = (np.average(norm1, axis=1)).reshape(1, noframes) + sum2 = sum1.T else: - sum1=1 - sum2=1 - g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2/ nopr[qi -1] - i +=1 + sum1 = 1 + sum2 = 1 + g12b[:, :, i] = np.dot(data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1] + i += 1 return g12b - - -def two_time_norm( data_pixel, rois, index=None): - - ''' + + +def two_time_norm(data_pixel, rois, index=None): + """ Dec 16, 2015, Y.G.@CHX - a numpy operation method to get two-time correlation function - + a numpy operation method to get two-time correlation function + Parameters: data: images sequence, shape as [img[0], img[1], imgs_length] rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs - + Options: - - data_pixel: if not None, + + data_pixel: if not None, 2-D array, shape as (len(images), len(qind)), - use function Get_Pixel_Array( ).get_data( ) to get - - + use function Get_Pixel_Array( ).get_data( ) to get + + Return: g12: a 3-D array, shape as ( imgs_length, imgs_length, q) - - One example: - g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) - ''' - - qind, pixelist = roi.extract_label_indices( rois ) - noqs = len( np.unique(qind) ) - nopr = np.bincount(qind, minlength=(noqs+1))[1:] - noframes = data_pixel.shape[0] + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + """ + + qind, pixelist = roi.extract_label_indices(rois) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + noframes = data_pixel.shape[0] if index is None: - index = np.arange( 1, noqs + 1 ) + index = np.arange(1, noqs + 1) else: try: len(index) - index = np.array( index ) + index = np.array(index) except TypeError: - index = np.array( [index] ) - #print( index ) - qlist = np.arange( 1, noqs + 1 )[ index -1 ] - #print( qlist ) + index = np.array([index]) + # print( index ) + qlist = np.arange(1, noqs + 1)[index - 1] + # print( qlist ) try: - norm = np.zeros( len(qlist) ) + norm = np.zeros(len(qlist)) DO = True except: - print("The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely") - '''TO be done here ''' - DO = False - + print( + "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" + ) + """TO be done here """ + DO = False + if DO: i = 0 - for qi in tqdm(qlist ): - #print (qi-1) - pixelist_qi = np.where( qind == qi)[0] - #print (pixelist_qi.shape, data_pixel[qi].shape) - data_pixel_qi = data_pixel[:,pixelist_qi] - sum1 = (np.average( data_pixel_qi, axis=1)).reshape( 1, noframes ) - norm[i] = np.average(sum1 ) - #sum2 = sum1.T - #print( qi, qlist, ) - #print( g12b[:,:,qi -1 ] ) - #g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2 / nopr[qi -1] - i +=1 + for qi in tqdm(qlist): + # print (qi-1) + pixelist_qi = np.where(qind == qi)[0] + # print (pixelist_qi.shape, data_pixel[qi].shape) + data_pixel_qi = data_pixel[:, pixelist_qi] + sum1 = (np.average(data_pixel_qi, axis=1)).reshape(1, noframes) + norm[i] = np.average(sum1) + # sum2 = sum1.T + # print( qi, qlist, ) + # print( g12b[:,:,qi -1 ] ) + # g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2 / nopr[qi -1] + i += 1 return norm - - -def check_normalization( frame_num, q_list, imgsa, data_pixel ): - '''check the ROI intensity before and after normalization +def check_normalization(frame_num, q_list, imgsa, data_pixel): + """check the ROI intensity before and after normalization Input: frame_num: integer, the number of frame to be checked q_list: list of integer, the list of q to be checked imgsa: the raw data data_pixel: the normalized data, caculated by fucntion Get_Pixel_Arrayc - Plot the intensities - ''' - fig,ax=plt.subplots(2) - n=0 + Plot the intensities + """ + fig, ax = plt.subplots(2) + n = 0 for q in q_list: - norm_data = data_pixel[frame_num][qind==q] - raw_data = np.ravel( np.array(imgsa[frame_num]) )[pixelist[qind==q]] - #print(raw_data.mean()) - plot1D( raw_data,ax=ax[0], legend='q=%s'%(q), m=markers[n], - title='fra=%s_raw_data'%(frame_num)) - - #plot1D( raw_data/mean_int_sets_[frame_num][q-1], ax=ax[1], legend='q=%s'%(q), m=markers[n], - # xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) - #print( mean_int_sets_[frame_num][q-1] ) - plot1D( norm_data, ax=ax[1], legend='q=%s'%(q), m=markers[n], - xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) - n +=1 + norm_data = data_pixel[frame_num][qind == q] + raw_data = np.ravel(np.array(imgsa[frame_num]))[pixelist[qind == q]] + # print(raw_data.mean()) + plot1D(raw_data, ax=ax[0], legend="q=%s" % (q), m=markers[n], title="fra=%s_raw_data" % (frame_num)) + # plot1D( raw_data/mean_int_sets_[frame_num][q-1], ax=ax[1], legend='q=%s'%(q), m=markers[n], + # xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) + # print( mean_int_sets_[frame_num][q-1] ) + plot1D( + norm_data, + ax=ax[1], + legend="q=%s" % (q), + m=markers[n], + xlabel="pixel", + title="fra=%s_norm_data" % (frame_num), + ) + n += 1 diff --git a/pyCHX/chx_correlationp.py b/pyCHX/chx_correlationp.py index 6c34059..496ec67 100644 --- a/pyCHX/chx_correlationp.py +++ b/pyCHX/chx_correlationp.py @@ -3,6 +3,7 @@ yuzhang@bnl.gov This module is for parallel computation of time correlation """ + from __future__ import absolute_import, division, print_function import logging diff --git a/pyCHX/chx_correlationp2.py b/pyCHX/chx_correlationp2.py index 6de555a..8ddbc19 100644 --- a/pyCHX/chx_correlationp2.py +++ b/pyCHX/chx_correlationp2.py @@ -5,6 +5,7 @@ Feb 20, 2018 The chx_correlationp2 is for dedug g2 """ + from __future__ import absolute_import, division, print_function import logging diff --git a/pyCHX/chx_generic_functions.py b/pyCHX/chx_generic_functions.py index 5a04f5f..fef6168 100644 --- a/pyCHX/chx_generic_functions.py +++ b/pyCHX/chx_generic_functions.py @@ -1,29 +1,50 @@ -from pyCHX.chx_libs import * -#from tqdm import * -from pyCHX.chx_libs import ( colors, markers ) -from scipy.special import erf - -from skimage.filters import prewitt -from skimage.draw import line_aa, line, polygon, ellipse, disk +import copy +import datetime +from os import listdir +from shutil import copyfile -from modest_image import imshow import matplotlib.cm as mcm -from matplotlib import cm -import copy, scipy -import PIL -from shutil import copyfile -import datetime, pytz -from skbeam.core.utils import radial_grid, angle_grid, radius_to_twotheta, twotheta_to_q -from os import listdir import numpy as np +import PIL +import pytz +import scipy +from matplotlib import cm +from modest_image import imshow +from scipy.special import erf +from skbeam.core.utils import angle_grid, radial_grid, radius_to_twotheta, twotheta_to_q +from skimage.draw import disk, ellipse, line, line_aa, polygon +from skimage.filters import prewitt - -markers = ['o', 'D', 'v', '^', '<', '>', 'p', 's', 'H', - 'h', '*', 'd', - '8', '1', '3', '2', '4', '+', 'x', '_', '|', ',', '1',] -markers = np.array( markers *100 ) - - +# from tqdm import * +from pyCHX.chx_libs import * +from pyCHX.chx_libs import colors, markers + +markers = [ + "o", + "D", + "v", + "^", + "<", + ">", + "p", + "s", + "H", + "h", + "*", + "d", + "8", + "1", + "3", + "2", + "4", + "+", + "x", + "_", + "|", + ",", + "1", +] +markers = np.array(markers * 100) flatten_nestlist = lambda l: [item for sublist in l for item in sublist] @@ -33,46 +54,45 @@ """ -def get_frames_from_dscan( uid, detector = 'eiger4m_single_image' ): - '''Get frames from a dscan by giving uid and detector ''' +def get_frames_from_dscan(uid, detector="eiger4m_single_image"): + """Get frames from a dscan by giving uid and detector""" hdr = db[uid] - return db.get_images(hdr, detector ) + return db.get_images(hdr, detector) -def get_roi_intensity( img, roi_mask): +def get_roi_intensity(img, roi_mask): qind, pixelist = roi.extract_label_indices(roi_mask) noqs = len(np.unique(qind)) avgs = np.zeros(noqs) - for i in tqdm( range(1,1+noqs)): - avgs[i-1] = ( np.average( img[roi_mask==i] ) ) + for i in tqdm(range(1, 1 + noqs)): + avgs[i - 1] = np.average(img[roi_mask == i]) return avgs def generate_h5_list(inDir, filename): - '''YG DEV at 9/19/2019@CHX generate a lst file containing all h5 fiels in inDir + """YG DEV at 9/19/2019@CHX generate a lst file containing all h5 fiels in inDir Input: inDir: the input direction filename: the filename for output (have to lst as extension) Output: Save the all h5 filenames in a lst file - ''' - fp_list = listdir( inDir ) - if filename[-4:] !='.lst': - filename += '.lst' + """ + fp_list = listdir(inDir) + if filename[-4:] != ".lst": + filename += ".lst" for FP in fp_list: - FP_ = inDir+FP + FP_ = inDir + FP if os.path.isdir(FP_): - fp = listdir( FP_ ) + fp = listdir(FP_) for fp_ in fp: - if '.h5' in fp_: - append_txtfile( filename = filename, - data = np.array( [ FP_+'/'+fp_ ])) - print('The full path of all the .h5 in %s has been saved in %s.'%(inDir, filename)) - print( 'You can use ./analysis/run_gui to visualize all the h5 file.') + if ".h5" in fp_: + append_txtfile(filename=filename, data=np.array([FP_ + "/" + fp_])) + print("The full path of all the .h5 in %s has been saved in %s." % (inDir, filename)) + print("You can use ./analysis/run_gui to visualize all the h5 file.") -def fit_one_peak_curve( x,y, fit_range=None ): - '''YG Dev@Aug 10, 2019 fit a curve with a single Lorentzian shape +def fit_one_peak_curve(x, y, fit_range=None): + """YG Dev@Aug 10, 2019 fit a curve with a single Lorentzian shape Parameters: x: one-d array, x-axis data y: one-d array, y-axis data @@ -85,163 +105,185 @@ def fit_one_peak_curve( x,y, fit_range=None ): xf: the x in the fit out: the fitting class resutled from lmfit - ''' + """ from lmfit.models import LinearModel, LorentzianModel + peak = LorentzianModel() background = LinearModel() model = peak + background if fit_range != None: - x1,x2=fit_range - xf= x[x1:x2] + x1, x2 = fit_range + xf = x[x1:x2] yf = y[x1:x2] else: - xf = x - yf = y - model.set_param_hint('slope', value=5 ) - model.set_param_hint('intercept', value=0 ) - model.set_param_hint('center', value=0.005 ) - model.set_param_hint('amplitude', value= 0.1 ) - model.set_param_hint('sigma', value=0.003 ) - #out=model.fit(yf, x=xf)#, method='nelder') - out=model.fit(yf, x=xf, method= 'leastsq' ) - cen = out.params['center'].value - cen_std = out.params['center'].stderr - wid = out.params['sigma'].value *2 - wid_std = out.params['sigma'].stderr *2 - return cen, cen_std, wid, wid_std , xf, out - - -def plot_xy_with_fit( x, y, xf, out, - cen, cen_std,wid, wid_std, - xlim=[1e-3,0.01],xlabel= 'q ('r'$\AA^{-1}$)', - ylabel='I(q)', filename=None): - '''YG Dev@Aug 10, 2019 to plot x,y with fit, - currently this code is dedicated to plot q-Iq with fit and show the fittign parameter, peak pos, peak wid ''' - - yf2=out.model.eval(params=out.params, x=xf) - fig, ax = plt.subplots( ) - plot1D(x=x,y=y,ax=ax,m='o', ls='',c='k', legend='data') - plot1D(x=xf,y=yf2,ax=ax,m='', ls='-',c='r', legend='fit',logy=True) - ax.set_xlim( xlim ) - #ax.set_ylim( 0.1, 4) - #ax.set_title(uid+'--t=%.2f'%tt) - ax.set_xlabel( xlabel ) - ax.set_ylabel(ylabel ) - txts = r'peak' + r' = %.5f +/- %.5f '%( cen, cen_std ) - ax.text(x =0.02, y=.2, s=txts, fontsize=14, transform=ax.transAxes) - txts = r'wid' + r' = %.4f +/- %.4f'%( wid, wid_std) - #txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' - ax.text(x =0.02, y=.1, s=txts, fontsize=14, transform=ax.transAxes) + xf = x + yf = y + model.set_param_hint("slope", value=5) + model.set_param_hint("intercept", value=0) + model.set_param_hint("center", value=0.005) + model.set_param_hint("amplitude", value=0.1) + model.set_param_hint("sigma", value=0.003) + # out=model.fit(yf, x=xf)#, method='nelder') + out = model.fit(yf, x=xf, method="leastsq") + cen = out.params["center"].value + cen_std = out.params["center"].stderr + wid = out.params["sigma"].value * 2 + wid_std = out.params["sigma"].stderr * 2 + return cen, cen_std, wid, wid_std, xf, out + + +def plot_xy_with_fit( + x, + y, + xf, + out, + cen, + cen_std, + wid, + wid_std, + xlim=[1e-3, 0.01], + xlabel="q (" r"$\AA^{-1}$)", + ylabel="I(q)", + filename=None, +): + """YG Dev@Aug 10, 2019 to plot x,y with fit, + currently this code is dedicated to plot q-Iq with fit and show the fittign parameter, peak pos, peak wid""" + + yf2 = out.model.eval(params=out.params, x=xf) + fig, ax = plt.subplots() + plot1D(x=x, y=y, ax=ax, m="o", ls="", c="k", legend="data") + plot1D(x=xf, y=yf2, ax=ax, m="", ls="-", c="r", legend="fit", logy=True) + ax.set_xlim(xlim) + # ax.set_ylim( 0.1, 4) + # ax.set_title(uid+'--t=%.2f'%tt) + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + txts = r"peak" + r" = %.5f +/- %.5f " % (cen, cen_std) + ax.text(x=0.02, y=0.2, s=txts, fontsize=14, transform=ax.transAxes) + txts = r"wid" + r" = %.4f +/- %.4f" % (wid, wid_std) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=0.02, y=0.1, s=txts, fontsize=14, transform=ax.transAxes) plt.tight_layout() if filename != None: - plt.savefig( filename ) + plt.savefig(filename) return ax - - - -def get_touched_qwidth( qcenters ): - '''YG Dev@CHX April 2019, get touched qwidth by giving qcenters - ''' +def get_touched_qwidth(qcenters): + """YG Dev@CHX April 2019, get touched qwidth by giving qcenters""" qwX = np.zeros_like(qcenters) - qW= qcenters[1:] - qcenters[:-1] + qW = qcenters[1:] - qcenters[:-1] qwX[0] = qW[0] - for i in range(1,len(qcenters)-1): - #print(i) - qwX[i] = min( qW[i-1], qW[i] ) + for i in range(1, len(qcenters) - 1): + # print(i) + qwX[i] = min(qW[i - 1], qW[i]) qwX[-1] = qW[-1] - qwX *=0.9999 + qwX *= 0.9999 return qwX - -def append_txtfile( filename, data, fmt='%s', *argv,**kwargs ): - '''YG. Dev May 10, 2109 append data to a file +def append_txtfile(filename, data, fmt="%s", *argv, **kwargs): + """YG. Dev May 10, 2109 append data to a file Create an empty file if the file dose not exist, otherwise, will append the data to it Input: fp: filename data: the data to be append fmt: the parameter defined in np.savetxt - ''' + """ from numpy import savetxt - exists = os.path.isfile( filename) - if not exists: - np.savetxt( filename, [ ] , fmt='%s', ) - print('create new file') - f=open( filename, 'a') - savetxt( f, data, fmt = fmt , *argv,**kwargs ) + exists = os.path.isfile(filename) + if not exists: + np.savetxt( + filename, + [], + fmt="%s", + ) + print("create new file") + + f = open(filename, "a") + savetxt(f, data, fmt=fmt, *argv, **kwargs) f.close() -def get_roi_mask_qval_qwid_by_shift( new_cen, new_mask, old_cen,old_roi_mask, - setup_pargs, geometry, - limit_qnum= None): - '''YG Dev April 22, 2019 Get roi_mask, qval_dict, qwid_dict by shift the pre-defined big roi_mask''' - center=setup_pargs['center'] - roi_mask1 = shift_mask( new_cen=center, new_mask=new_mask, old_cen=old_cen, - old_roi_mask=old_roi_mask, limit_qnum= limit_qnum) + +def get_roi_mask_qval_qwid_by_shift( + new_cen, new_mask, old_cen, old_roi_mask, setup_pargs, geometry, limit_qnum=None +): + """YG Dev April 22, 2019 Get roi_mask, qval_dict, qwid_dict by shift the pre-defined big roi_mask""" + center = setup_pargs["center"] + roi_mask1 = shift_mask( + new_cen=center, new_mask=new_mask, old_cen=old_cen, old_roi_mask=old_roi_mask, limit_qnum=limit_qnum + ) qval_dict_, qwid_dict_ = get_masked_qval_qwid_dict_using_Rmax( - new_mask=new_mask, setup_pargs=setup_pargs, - old_roi_mask=old_roi_mask, old_cen=old_cen, geometry = geometry ) - w,w1 = get_zero_nozero_qind_from_roi_mask(roi_mask1,new_mask) - #print(w,w1) - qval_dictx = { k:v for (k,v) in list(qval_dict_.items()) if k in w1 } - qwid_dictx = { k:v for (k,v) in list(qwid_dict_.items()) if k in w1 } - qval_dict={} - qwid_dict={} - for i, k in enumerate( list(qval_dictx.keys())): + new_mask=new_mask, setup_pargs=setup_pargs, old_roi_mask=old_roi_mask, old_cen=old_cen, geometry=geometry + ) + w, w1 = get_zero_nozero_qind_from_roi_mask(roi_mask1, new_mask) + # print(w,w1) + qval_dictx = {k: v for (k, v) in list(qval_dict_.items()) if k in w1} + qwid_dictx = {k: v for (k, v) in list(qwid_dict_.items()) if k in w1} + qval_dict = {} + qwid_dict = {} + for i, k in enumerate(list(qval_dictx.keys())): qval_dict[i] = qval_dictx[k] qwid_dict[i] = qwid_dictx[k] return roi_mask1, qval_dict, qwid_dict -def get_zero_nozero_qind_from_roi_mask(roi_mask,mask): - '''YG Dev April 22, 2019 Get unique qind of roi_mask with zero and non-zero pixel number''' - qind, pixelist = roi.extract_label_indices(roi_mask*mask) +def get_zero_nozero_qind_from_roi_mask(roi_mask, mask): + """YG Dev April 22, 2019 Get unique qind of roi_mask with zero and non-zero pixel number""" + qind, pixelist = roi.extract_label_indices(roi_mask * mask) noqs = len(np.unique(qind)) - nopr = np.bincount(qind, minlength=(noqs+1))[1:] - w=np.where(nopr==0)[0] - w1=np.where(nopr!=0)[0] + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + w = np.where(nopr == 0)[0] + w1 = np.where(nopr != 0)[0] return w, w1 - -def get_masked_qval_qwid_dict_using_Rmax( new_mask, setup_pargs, old_roi_mask, old_cen, geometry ): - '''YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask using a Rmax method ''' - cy,cx= setup_pargs['center'] - my,mx=new_mask.shape - Rmax = int(np.ceil(max( np.hypot(cx,cy),np.hypot(cx-mx,cy-my),np.hypot(cx,cy-my),np.hypot(cx-mx,cy) ))) - Fmask = np.zeros([Rmax*2,Rmax*2],dtype=int) - Fmask[ Rmax-cy : Rmax-cy+my, Rmax-cx: Rmax-cx + mx]=new_mask - roi_mask1 = shift_mask( new_cen=[Rmax,Rmax], new_mask=np.ones_like(Fmask), old_cen=old_cen, - old_roi_mask=old_roi_mask, limit_qnum= None) - setup_pargs_={ 'center':[Rmax,Rmax], 'dpix': setup_pargs['dpix'], 'Ldet': setup_pargs['Ldet'], - 'lambda_': setup_pargs['lambda_'], } - qval_dict1, qwid_dict1 = get_masked_qval_qwid_dict( roi_mask1, Fmask, setup_pargs_, geometry ) - #w = get_zero_qind_from_roi_mask(roi_mask1,Fmask) - return qval_dict1, qwid_dict1#,w - +def get_masked_qval_qwid_dict_using_Rmax(new_mask, setup_pargs, old_roi_mask, old_cen, geometry): + """YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask using a Rmax method""" + cy, cx = setup_pargs["center"] + my, mx = new_mask.shape + Rmax = int( + np.ceil(max(np.hypot(cx, cy), np.hypot(cx - mx, cy - my), np.hypot(cx, cy - my), np.hypot(cx - mx, cy))) + ) + Fmask = np.zeros([Rmax * 2, Rmax * 2], dtype=int) + Fmask[Rmax - cy : Rmax - cy + my, Rmax - cx : Rmax - cx + mx] = new_mask + roi_mask1 = shift_mask( + new_cen=[Rmax, Rmax], + new_mask=np.ones_like(Fmask), + old_cen=old_cen, + old_roi_mask=old_roi_mask, + limit_qnum=None, + ) + setup_pargs_ = { + "center": [Rmax, Rmax], + "dpix": setup_pargs["dpix"], + "Ldet": setup_pargs["Ldet"], + "lambda_": setup_pargs["lambda_"], + } + qval_dict1, qwid_dict1 = get_masked_qval_qwid_dict(roi_mask1, Fmask, setup_pargs_, geometry) + # w = get_zero_qind_from_roi_mask(roi_mask1,Fmask) + return qval_dict1, qwid_dict1 # ,w -def get_masked_qval_qwid_dict( roi_mask, mask, setup_pargs, geometry ): - '''YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask ''' +def get_masked_qval_qwid_dict(roi_mask, mask, setup_pargs, geometry): + """YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask""" - qval_dict_, qwid_dict_ = get_qval_qwid_dict( roi_mask, setup_pargs, geometry= geometry) - w,w1 = get_zero_nozero_qind_from_roi_mask(roi_mask,mask) - qval_dictx = { k:v for (k,v) in list(qval_dict_.items()) if k not in w } - qwid_dictx = { k:v for (k,v) in list(qwid_dict_.items()) if k not in w } - qval_dict={} - qwid_dict={} - for i, k in enumerate( list(qval_dictx.keys())): + qval_dict_, qwid_dict_ = get_qval_qwid_dict(roi_mask, setup_pargs, geometry=geometry) + w, w1 = get_zero_nozero_qind_from_roi_mask(roi_mask, mask) + qval_dictx = {k: v for (k, v) in list(qval_dict_.items()) if k not in w} + qwid_dictx = {k: v for (k, v) in list(qwid_dict_.items()) if k not in w} + qval_dict = {} + qwid_dict = {} + for i, k in enumerate(list(qval_dictx.keys())): qval_dict[i] = qval_dictx[k] qwid_dict[i] = qwid_dictx[k] return qval_dict, qwid_dict -def get_qval_qwid_dict( roi_mask, setup_pargs, geometry='saxs'): - '''YG Dev April 6, 2019 +def get_qval_qwid_dict(roi_mask, setup_pargs, geometry="saxs"): + """YG Dev April 6, 2019 Get qval_dict and qwid_dict by giving roi_mask, setup_pargs Input: roi_mask: integer type 2D array @@ -266,68 +308,67 @@ def get_qval_qwid_dict( roi_mask, setup_pargs, geometry='saxs'): TODOLIST: to make GiSAXS work - ''' + """ - origin = setup_pargs['center']#[::-1] + origin = setup_pargs["center"] # [::-1] shape = roi_mask.shape qp_map = radial_grid(origin, shape) - phi_map = np.degrees( angle_grid(origin, shape) ) - two_theta = radius_to_twotheta( setup_pargs['Ldet'], setup_pargs['dpix'] * qp_map ) - q_map = utils.twotheta_to_q(two_theta, setup_pargs['lambda_']) + phi_map = np.degrees(angle_grid(origin, shape)) + two_theta = radius_to_twotheta(setup_pargs["Ldet"], setup_pargs["dpix"] * qp_map) + q_map = utils.twotheta_to_q(two_theta, setup_pargs["lambda_"]) qind, pixelist = roi.extract_label_indices(roi_mask) Qval = np.unique(qind) qval_dict_ = {} qwid_dict_ = {} - for j, i in enumerate( Qval): - qval = q_map[ roi_mask == i ] - #print( qval ) - if geometry=='saxs': - qval_dict_[j] = [( qval.max() + qval.min() )/2] # np.mean(qval) - qwid_dict_[j] = [( qval.max() - qval.min() ) ] - - elif geometry=='ang_saxs': - aval = phi_map[ roi_mask == i ] - #print(j,i,qval, aval) + for j, i in enumerate(Qval): + qval = q_map[roi_mask == i] + # print( qval ) + if geometry == "saxs": + qval_dict_[j] = [(qval.max() + qval.min()) / 2] # np.mean(qval) + qwid_dict_[j] = [(qval.max() - qval.min())] + + elif geometry == "ang_saxs": + aval = phi_map[roi_mask == i] + # print(j,i,qval, aval) qval_dict_[j] = np.zeros(2) qwid_dict_[j] = np.zeros(2) - qval_dict_[j][0] = ( qval.max() + qval.min() )/2 # np.mean(qval) - qwid_dict_[j][0] = ( qval.max() - qval.min() ) + qval_dict_[j][0] = (qval.max() + qval.min()) / 2 # np.mean(qval) + qwid_dict_[j][0] = qval.max() - qval.min() - if ( (aval.max() * aval.min())<0 ) & ( aval.max() > 90 ): - qval_dict_[j][1] = ( aval.max() + aval.min() )/2 -180 # np.mean(qval) - qwid_dict_[j][1] = abs( aval.max() - aval.min() -360 ) - #print('here -- %s'%j) + if ((aval.max() * aval.min()) < 0) & (aval.max() > 90): + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 - 180 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min() - 360) + # print('here -- %s'%j) else: - qval_dict_[j][1] = ( aval.max() + aval.min() )/2 # np.mean(qval) - qwid_dict_[j][1] = abs( aval.max() - aval.min() ) + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min()) - elif geometry=='flow_saxs': - sx,sy = roi_mask.shape - cx,cy = origin - aval = (phi_map[cx:])[ roi_mask[cx:] == i ] - if len(aval)==0: - aval = (phi_map[:cx])[ roi_mask[:cx] == i ] + 180 + elif geometry == "flow_saxs": + sx, sy = roi_mask.shape + cx, cy = origin + aval = (phi_map[cx:])[roi_mask[cx:] == i] + if len(aval) == 0: + aval = (phi_map[:cx])[roi_mask[:cx] == i] + 180 qval_dict_[j] = np.zeros(2) qwid_dict_[j] = np.zeros(2) - qval_dict_[j][0] = ( qval.max() + qval.min() )/2 # np.mean(qval) - qwid_dict_[j][0] = ( qval.max() - qval.min() ) - #print(aval) - if ( (aval.max() * aval.min())<0 ) & ( aval.max() > 90 ): - qval_dict_[j][1] = ( aval.max() + aval.min() )/2 -180 # np.mean(qval) - qwid_dict_[j][1] = abs( aval.max() - aval.min() -360 ) - #print('here -- %s'%j) + qval_dict_[j][0] = (qval.max() + qval.min()) / 2 # np.mean(qval) + qwid_dict_[j][0] = qval.max() - qval.min() + # print(aval) + if ((aval.max() * aval.min()) < 0) & (aval.max() > 90): + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 - 180 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min() - 360) + # print('here -- %s'%j) else: - qval_dict_[j][1] = ( aval.max() + aval.min() )/2 # np.mean(qval) - qwid_dict_[j][1] = abs( aval.max() - aval.min() ) + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min()) return qval_dict_, qwid_dict_ - -def get_SG_norm( FD, pixelist, bins=1, mask=None, window_size= 11, order= 5 ): - '''Get normalization of a time series by SavitzkyGolay filter +def get_SG_norm(FD, pixelist, bins=1, mask=None, window_size=11, order=5): + """Get normalization of a time series by SavitzkyGolay filter Input: FD: file handler for a compressed data pixelist: pixel list for a roi_mask @@ -337,64 +378,65 @@ def get_SG_norm( FD, pixelist, bins=1, mask=None, window_size= 11, order= 5 ): window_size, order, for the control of SG filter, see chx_generic_functions.py/sgolay2d for details Return: norm: shape as ( length of FD, length of pixelist ) - ''' + """ if mask == None: mask = 1 beg = FD.beg end = FD.end - N = end-beg + N = end - beg BEG = beg - if bins==1: + if bins == 1: END = end NB = N - MOD=0 + MOD = 0 else: - END = N//bins - MOD = N%bins + END = N // bins + MOD = N % bins NB = END - norm = np.zeros( [ end, len(pixelist) ] ) - for i in tqdm( range( NB ) ): + norm = np.zeros([end, len(pixelist)]) + for i in tqdm(range(NB)): if bins == 1: img = FD.rdframe(i + BEG) else: - for j in range( bins): - ct = i * bins + j + BEG - #print(ct) - if j==0: - img = FD.rdframe( ct ) + for j in range(bins): + ct = i * bins + j + BEG + # print(ct) + if j == 0: + img = FD.rdframe(ct) n = 1.0 else: - (p,v) = FD.rdrawframe(ct) - np.ravel( img )[p] += v - #img += FD.rdframe( ct ) + (p, v) = FD.rdrawframe(ct) + np.ravel(img)[p] += v + # img += FD.rdframe( ct ) n += 1 - img /= n - avg_imgf = sgolay2d( img, window_size= window_size, order= order) * mask + img /= n + avg_imgf = sgolay2d(img, window_size=window_size, order=order) * mask normi = np.ravel(avg_imgf)[pixelist] - if bins==1: - norm[i+beg] = normi + if bins == 1: + norm[i + beg] = normi else: - norm[ i*bins+beg: (i+1)*bins+beg ] = normi + norm[i * bins + beg : (i + 1) * bins + beg] = normi if MOD: for j in range(MOD): - ct = (1+i) * bins + j + BEG - if j==0: - img = FD.rdframe( ct ) + ct = (1 + i) * bins + j + BEG + if j == 0: + img = FD.rdframe(ct) n = 1.0 else: - (p,v) = FD.rdrawframe(ct) - np.ravel( img )[p] += v + (p, v) = FD.rdrawframe(ct) + np.ravel(img)[p] += v n += 1 - img /= n - #print(ct,n) - img = FD.rdframe( ct ) - avg_imgf = sgolay2d( img, window_size= window_size, order= order) * mask + img /= n + # print(ct,n) + img = FD.rdframe(ct) + avg_imgf = sgolay2d(img, window_size=window_size, order=order) * mask normi = np.ravel(avg_imgf)[pixelist] - norm[ (i+1)*bins + beg: (i+2)*bins + beg ] = normi + norm[(i + 1) * bins + beg : (i + 2) * bins + beg] = normi return norm -def shift_mask( new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None ): - '''Y.G. Dev April 2019@CHX to make a new roi_mask by shift and crop the old roi_mask, which is much bigger than the new mask + +def shift_mask(new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None): + """Y.G. Dev April 2019@CHX to make a new roi_mask by shift and crop the old roi_mask, which is much bigger than the new mask Input: new_cen: [x,y] in uint of pixel new_mask: provide the shape of the new roi_mask and also multiply this mask to the shifted mask @@ -404,29 +446,42 @@ def shift_mask( new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None ): Output: the shifted/croped roi_mask - ''' - nsx,nsy = new_mask.shape - down, up, left, right = new_cen[0], nsx - new_cen[0], new_cen[1], nsy - new_cen[1] - x1,x2,y1,y2 = [ old_cen[0] - down, old_cen[0] + up , old_cen[1] - left, old_cen[1] + right ] - nroi_mask_ = old_roi_mask[ x1:x2, y1:y2 ] * new_mask - nroi_mask = np.zeros_like( nroi_mask_ ) + """ + nsx, nsy = new_mask.shape + down, up, left, right = new_cen[0], nsx - new_cen[0], new_cen[1], nsy - new_cen[1] + x1, x2, y1, y2 = [old_cen[0] - down, old_cen[0] + up, old_cen[1] - left, old_cen[1] + right] + nroi_mask_ = old_roi_mask[x1:x2, y1:y2] * new_mask + nroi_mask = np.zeros_like(nroi_mask_) qind, pixelist = roi.extract_label_indices(nroi_mask_) qu = np.unique(qind) - #noqs = len( qu ) - #nopr = np.bincount(qind, minlength=(noqs+1))[1:] - #qm = nopr>0 + # noqs = len( qu ) + # nopr = np.bincount(qind, minlength=(noqs+1))[1:] + # qm = nopr>0 for j, qv in enumerate(qu): - nroi_mask[nroi_mask_ == qv] = j +1 + nroi_mask[nroi_mask_ == qv] = j + 1 if limit_qnum != None: - nroi_mask[ nroi_mask > limit_qnum ]=0 + nroi_mask[nroi_mask > limit_qnum] = 0 return nroi_mask -def plot_q_g2fitpara_general( g2_dict, g2_fitpara, geometry ='saxs', ylim = None, - plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False, - show_fit=True, ylabel='g2', qth_interest = None, max_plotnum_fig=1600,qphi_analysis=False, - *argv,**kwargs): - ''' +def plot_q_g2fitpara_general( + g2_dict, + g2_fitpara, + geometry="saxs", + ylim=None, + plot_all_range=True, + plot_index_range=None, + show_text=True, + return_fig=False, + show_fit=True, + ylabel="g2", + qth_interest=None, + max_plotnum_fig=1600, + qphi_analysis=False, + *argv, + **kwargs, +): + """ Mar 29,2019, Y.G.@CHX plot q~fit parameters @@ -444,141 +499,165 @@ def plot_q_g2fitpara_general( g2_dict, g2_fitpara, geometry ='saxs', ylim = Non Otherwise, power is variable. show_fit:, bool, if False, not show the fit - ''' + """ - if 'uid' in kwargs.keys(): - uid_ = kwargs['uid'] + if "uid" in kwargs.keys(): + uid_ = kwargs["uid"] else: - uid_ = 'uid' - if 'path' in kwargs.keys(): - path = kwargs['path'] + uid_ = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] else: - path = '' + path = "" data_dir = path - if ylabel=='g2': - ylabel='g_2' - if ylabel=='g4': - ylabel='g_4' + if ylabel == "g2": + ylabel = "g_2" + if ylabel == "g4": + ylabel = "g_4" - if geometry =='saxs': + if geometry == "saxs": if qphi_analysis: - geometry = 'ang_saxs' - + geometry = "ang_saxs" + + qval_dict_, fit_res_ = g2_dict, g2_fitpara + + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + fps = [] - qval_dict_, fit_res_ = g2_dict, g2_fitpara + # print(qr_label, qz_label, short_ulabel, long_ulabel) + # $print( num_short, num_long ) + beta, relaxation_rate, baseline, alpha = ( + g2_fitpara["beta"], + g2_fitpara["relaxation_rate"], + g2_fitpara["baseline"], + g2_fitpara["alpha"], + ) - (qr_label, qz_label, num_qz, num_qr, num_short, - num_long, short_label, long_label,short_ulabel, - long_ulabel,ind_long, master_plot, - mastp) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) fps = [] - - #print(qr_label, qz_label, short_ulabel, long_ulabel) - #$print( num_short, num_long ) - beta, relaxation_rate, baseline, alpha = ( g2_fitpara['beta'], - g2_fitpara['relaxation_rate'], - g2_fitpara['baseline'], - g2_fitpara['alpha'] ) - - fps=[] - for s_ind in range( num_short ): - ind_long_i = ind_long[ s_ind ] - num_long_i = len( ind_long_i ) - betai, relaxation_ratei, baselinei, alphai = (beta[ind_long_i], relaxation_rate[ind_long_i], - baseline[ind_long_i], alpha[ind_long_i] ) + for s_ind in range(num_short): + ind_long_i = ind_long[s_ind] + num_long_i = len(ind_long_i) + betai, relaxation_ratei, baselinei, alphai = ( + beta[ind_long_i], + relaxation_rate[ind_long_i], + baseline[ind_long_i], + alpha[ind_long_i], + ) qi = long_ulabel - #print(s_ind, qi, np.array( betai) ) + # print(s_ind, qi, np.array( betai) ) if RUN_GUI: fig = Figure(figsize=(10, 12)) else: - #fig = plt.figure( ) - if num_long_i <=4: - if master_plot != 'qz': + # fig = plt.figure( ) + if num_long_i <= 4: + if master_plot != "qz": fig = plt.figure(figsize=(8, 6)) else: - if num_short>1: + if num_short > 1: fig = plt.figure(figsize=(8, 4)) else: fig = plt.figure(figsize=(10, 6)) - #print('Here') + # print('Here') elif num_long_i > max_plotnum_fig: - num_fig = int(np.ceil(num_long_i/max_plotnum_fig)) #num_long_i //16 - fig = [ plt.figure(figsize=figsize) for i in range(num_fig) ] - #print( figsize ) + num_fig = int(np.ceil(num_long_i / max_plotnum_fig)) # num_long_i //16 + fig = [plt.figure(figsize=figsize) for i in range(num_fig)] + # print( figsize ) else: - #print('Here') - if master_plot != 'qz': + # print('Here') + if master_plot != "qz": fig = plt.figure(figsize=figsize) else: fig = plt.figure(figsize=(10, 10)) - if master_plot == 'qz': - if geometry=='ang_saxs': - title_short = 'Angle= %.2f'%( short_ulabel[s_ind] ) + r'$^\circ$' - elif geometry=='gi_saxs': - title_short = r'$Q_z= $' + '%.4f'%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' + if master_plot == "qz": + if geometry == "ang_saxs": + title_short = "Angle= %.2f" % (short_ulabel[s_ind]) + r"$^\circ$" + elif geometry == "gi_saxs": + title_short = r"$Q_z= $" + "%.4f" % (short_ulabel[s_ind]) + r"$\AA^{-1}$" else: - title_short = '' - else: #qr - if geometry=='ang_saxs' or geometry=='gi_saxs': - title_short = r'$Q_r= $' + '%.5f '%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' + title_short = "" + else: # qr + if geometry == "ang_saxs" or geometry == "gi_saxs": + title_short = r"$Q_r= $" + "%.5f " % (short_ulabel[s_ind]) + r"$\AA^{-1}$" else: - title_short='' - #print(geometry) - #filename ='' - til = '%s:--->%s'%(uid_, title_short ) - if num_long_i <=4: - plt.title( til,fontsize= 14, y =1.15) + title_short = "" + # print(geometry) + # filename ='' + til = "%s:--->%s" % (uid_, title_short) + if num_long_i <= 4: + plt.title(til, fontsize=14, y=1.15) else: - plt.title( til,fontsize=20, y =1.06) - #print( num_long ) - if num_long!=1: - #print( 'here') - plt.axis('off') - #sy = min(num_long_i,4) - sy = min(num_long_i, int( np.ceil( min(max_plotnum_fig,num_long_i)/4)) ) + plt.title(til, fontsize=20, y=1.06) + # print( num_long ) + if num_long != 1: + # print( 'here') + plt.axis("off") + # sy = min(num_long_i,4) + sy = min(num_long_i, int(np.ceil(min(max_plotnum_fig, num_long_i) / 4))) else: - sy =1 - sx = min(4, int( np.ceil( min(max_plotnum_fig,num_long_i)/float(sy) ) )) + sy = 1 + sx = min(4, int(np.ceil(min(max_plotnum_fig, num_long_i) / float(sy)))) temp = sy sy = sx sx = temp - if sx==1: - if sy==1: - plt.axis('on') - ax1 = fig.add_subplot( 4,1,1 ) - ax2 = fig.add_subplot( 4,1,2 ) - ax3 = fig.add_subplot( 4,1,3 ) - ax4 = fig.add_subplot( 4,1,4 ) - plot1D(x=qi, y=betai, m='o', ls='--', c='k', ax=ax1, legend=r'$\beta$', title='') - plot1D(x=qi, y=alphai, m='o', ls='--',c='r', ax=ax2, legend=r'$\alpha$', title='') - plot1D(x=qi, y=baselinei, m='o', ls='--', c='g', ax=ax3, legend=r'$baseline$', title='') - plot1D(x=qi, y=relaxation_ratei, m='o', c='b', ls='--', ax=ax4, legend= r'$\gamma$ $(s^{-1})$' , title='') - - ax4.set_ylabel( r'$\gamma$ $(s^{-1})$' ) + if sx == 1: + if sy == 1: + plt.axis("on") + ax1 = fig.add_subplot(4, 1, 1) + ax2 = fig.add_subplot(4, 1, 2) + ax3 = fig.add_subplot(4, 1, 3) + ax4 = fig.add_subplot(4, 1, 4) + plot1D(x=qi, y=betai, m="o", ls="--", c="k", ax=ax1, legend=r"$\beta$", title="") + plot1D(x=qi, y=alphai, m="o", ls="--", c="r", ax=ax2, legend=r"$\alpha$", title="") + plot1D(x=qi, y=baselinei, m="o", ls="--", c="g", ax=ax3, legend=r"$baseline$", title="") + plot1D(x=qi, y=relaxation_ratei, m="o", c="b", ls="--", ax=ax4, legend=r"$\gamma$ $(s^{-1})$", title="") + + ax4.set_ylabel(r"$\gamma$ $(s^{-1})$") ax4.set_xlabel(r"$q $ $(\AA)$", fontsize=16) - ax3.set_ylabel( r'$baseline' ) - ax2.set_ylabel( r'$\alpha$' ) - ax1.set_ylabel( r'$\beta$' ) + ax3.set_ylabel(r"$baseline") + ax2.set_ylabel(r"$\alpha$") + ax1.set_ylabel(r"$\beta$") fig.tight_layout() - fp = data_dir + uid_ + 'g2_q_fit_para_%s.png'%short_ulabel[s_ind] - fig.savefig( fp , dpi=fig.dpi) + fp = data_dir + uid_ + "g2_q_fit_para_%s.png" % short_ulabel[s_ind] + fig.savefig(fp, dpi=fig.dpi) fps.append(fp) - outputfile = data_dir + '%s_g2_q_fitpara_plot'%uid_ + '.png' - #print(uid) - combine_images( fps, outputfile, outsize= [ 2000,2400 ] ) - - - - - -def plot_q_rate_general( qval_dict, rate, geometry ='saxs', ylim = None, logq=True, lograte=True, - plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False, - show_fit=True, - *argv,**kwargs): - ''' + outputfile = data_dir + "%s_g2_q_fitpara_plot" % uid_ + ".png" + # print(uid) + combine_images(fps, outputfile, outsize=[2000, 2400]) + + +def plot_q_rate_general( + qval_dict, + rate, + geometry="saxs", + ylim=None, + logq=True, + lograte=True, + plot_all_range=True, + plot_index_range=None, + show_text=True, + return_fig=False, + show_fit=True, + *argv, + **kwargs, +): + """ Mar 29,2019, Y.G.@CHX plot q~rate in log-log scale @@ -596,64 +675,88 @@ def plot_q_rate_general( qval_dict, rate, geometry ='saxs', ylim = None, logq=T Otherwise, power is variable. show_fit:, bool, if False, not show the fit - ''' + """ - if 'uid' in kwargs.keys(): - uid = kwargs['uid'] + if "uid" in kwargs.keys(): + uid = kwargs["uid"] else: - uid = 'uid' - if 'path' in kwargs.keys(): - path = kwargs['path'] + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] else: - path = '' - (qr_label, qz_label, num_qz, num_qr, num_short, - num_long, short_label, long_label,short_ulabel, - long_ulabel,ind_long, master_plot, - mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) - - fig,ax = plt.subplots() - plt.title(r'$Q$''-Rate-%s'%(uid),fontsize=20, y =1.06) + path = "" + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + fig, ax = plt.subplots() + plt.title(r"$Q$" "-Rate-%s" % (uid), fontsize=20, y=1.06) Nqz = num_short - if Nqz!=1: - ls = '--' + if Nqz != 1: + ls = "--" else: - ls='' - #print(Nqz) - for i in range(Nqz): - ind_long_i = ind_long[ i ] - y = np.array( rate )[ind_long_i] - x = long_label[ind_long_i] - #print(i, x, y, D0 ) - if Nqz!=1: - label=r'$q_z=%.5f$'%short_ulabel[i] + ls = "" + # print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[i] + y = np.array(rate)[ind_long_i] + x = long_label[ind_long_i] + # print(i, x, y, D0 ) + if Nqz != 1: + label = r"$q_z=%.5f$" % short_ulabel[i] else: - label='' - ax.loglog(x, y, marker = 'o', ls =ls, label=label) - if Nqz!=1:legend = ax.legend(loc='best') + label = "" + ax.loglog(x, y, marker="o", ls=ls, label=label) + if Nqz != 1: + legend = ax.legend(loc="best") if plot_index_range != None: - d1,d2 = plot_index_range - d2 = min( len(x)-1, d2 ) - ax.set_xlim( (x**power)[d1], (x**power)[d2] ) - ax.set_ylim( y[d1],y[d2]) + d1, d2 = plot_index_range + d2 = min(len(x) - 1, d2) + ax.set_xlim((x**power)[d1], (x**power)[d2]) + ax.set_ylim(y[d1], y[d2]) if ylim != None: - ax.set_ylim( ylim ) + ax.set_ylim(ylim) - ax.set_ylabel('Relaxation rate 'r'$\gamma$'"($s^{-1}$) (log)") - ax.set_xlabel("$q$"r'($\AA$) (log)') - fp = path + '%s_Q_Rate_loglog'%(uid) + '.png' - fig.savefig( fp, dpi=fig.dpi) + ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$) (log)") + ax.set_xlabel("$q$" r"($\AA$) (log)") + fp = path + "%s_Q_Rate_loglog" % (uid) + ".png" + fig.savefig(fp, dpi=fig.dpi) fig.tight_layout() if return_fig: - return fig,ax - - - -def plot_xy_x2( x, y, x2=None, pargs=None, loglog=False, logy=True, fig_ax=None, - xlabel= 'q ('r'$\AA^{-1}$)', xlabel2='q (pixel)', title= '_q_Iq', - ylabel = 'I(q)',save=True, *argv,**kwargs): - '''YG.@CHX 2019/10/ Plot x, y, x2, if have, will plot as twiny( same y, different x) + return fig, ax + + +def plot_xy_x2( + x, + y, + x2=None, + pargs=None, + loglog=False, + logy=True, + fig_ax=None, + xlabel="q (" r"$\AA^{-1}$)", + xlabel2="q (pixel)", + title="_q_Iq", + ylabel="I(q)", + save=True, + *argv, + **kwargs, +): + """YG.@CHX 2019/10/ Plot x, y, x2, if have, will plot as twiny( same y, different x) This funciton is primary for plot q-Iq Input: @@ -665,111 +768,106 @@ def plot_xy_x2( x, y, x2=None, pargs=None, loglog=False, logy=True, fig_ax=Non save: if True, save the plot in the path defined in pargs kwargs: could include xlim (in unit of index), ylim (in unit of real value) - ''' + """ if fig_ax == None: fig, ax1 = plt.subplots() else: - fig,ax1=fig_ax + fig, ax1 = fig_ax if pargs != None: - uid = pargs['uid'] - path = pargs['path'] + uid = pargs["uid"] + path = pargs["path"] else: - uid='XXX' - path='' + uid = "XXX" + path = "" if loglog: - ax1.loglog( x,y, '-o') + ax1.loglog(x, y, "-o") elif logy: - ax1.semilogy( x,y, '-o') + ax1.semilogy(x, y, "-o") else: - ax1.plot( x,y, '-o') - ax1.set_xlabel( xlabel ) - ax1.set_ylabel( ylabel ) - title = ax1.set_title( '%s--'%uid + title) - Nx= len(x) - if 'xlim' in kwargs.keys(): - xlim = kwargs['xlim'] - if xlim[1]>Nx: - xlim[1]=Nx-1 + ax1.plot(x, y, "-o") + ax1.set_xlabel(xlabel) + ax1.set_ylabel(ylabel) + title = ax1.set_title("%s--" % uid + title) + Nx = len(x) + if "xlim" in kwargs.keys(): + xlim = kwargs["xlim"] + if xlim[1] > Nx: + xlim[1] = Nx - 1 else: - xlim=[ 0, Nx] - if 'ylim' in kwargs.keys(): - ylim = kwargs['ylim'] + xlim = [0, Nx] + if "ylim" in kwargs.keys(): + ylim = kwargs["ylim"] else: - ylim=[y.min(), y.max()] - lx1,lx2=xlim - ax1.set_xlim( [ x[lx1], x[lx2] ] ) - ax1.set_ylim( ylim ) + ylim = [y.min(), y.max()] + lx1, lx2 = xlim + ax1.set_xlim([x[lx1], x[lx2]]) + ax1.set_ylim(ylim) if x2 != None: ax2 = ax1.twiny() - ax2.set_xlabel( xlabel2 ) - ax2.set_ylabel( ylabel ) - ax2.set_xlim( [ x2[lx1], x2[lx2] ] ) + ax2.set_xlabel(xlabel2) + ax2.set_ylabel(ylabel) + ax2.set_xlim([x2[lx1], x2[lx2]]) title.set_y(1.1) fig.subplots_adjust(top=0.85) if save: - path = pargs['path'] - fp = path + '%s_q_Iq'%uid + '.png' - fig.savefig( fp, dpi=fig.dpi) + path = pargs["path"] + fp = path + "%s_q_Iq" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) - - -def save_oavs_tifs( uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1,threshold = 0 ): - '''save oavs as png''' - tifs = list( db[uid].data( 'OAV_image') )[0] +def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1, threshold=0): + """save oavs as png""" + tifs = list(db[uid].data("OAV_image"))[0] try: - pixel_scalebar=np.ceil(scalebar_size/md['OAV resolution um_pixel']) + pixel_scalebar = np.ceil(scalebar_size / md["OAV resolution um_pixel"]) except: - pixel_scalebar=None - print('No OAVS resolution is available.') + pixel_scalebar = None + print("No OAVS resolution is available.") - text_string='%s $\mu$m'%scalebar_size + text_string = "%s $\mu$m" % scalebar_size h = db[uid] - oavs=tifs - + oavs = tifs + # 12/03/2023: have a problem with OAV not being detector [0]...just try and go throught the list detectors = sorted(get_detectors(h)) for d in range(len(detectors)): try: - oav_period=h['descriptors'][d]['configuration']['OAV']['data']['OAV_cam_acquire_period'] - oav_expt=h['descriptors'][d]['configuration']['OAV']['data']['OAV_cam_acquire_time'] + oav_period = h["descriptors"][d]["configuration"]["OAV"]["data"]["OAV_cam_acquire_period"] + oav_expt = h["descriptors"][d]["configuration"]["OAV"]["data"]["OAV_cam_acquire_time"] except: pass - oav_times=[] + oav_times = [] for i in range(len(oavs)): - oav_times.append(oav_expt+i*oav_period) - fig=plt.subplots(int(np.ceil(len(oavs)/3)),3,figsize=(3*5.08,int(np.ceil(len(oavs)/3))*4)) + oav_times.append(oav_expt + i * oav_period) + fig = plt.subplots(int(np.ceil(len(oavs) / 3)), 3, figsize=(3 * 5.08, int(np.ceil(len(oavs) / 3)) * 4)) for m in range(len(oavs)): - plt.subplot(int(np.ceil(len(oavs)/3)),3,m+1) - #plt.subplots(figsize=(5.2,4)) + plt.subplot(int(np.ceil(len(oavs) / 3)), 3, m + 1) + # plt.subplots(figsize=(5.2,4)) img = oavs[m] try: - ind = np.flipud(img*scale)[:,:,2] < threshold + ind = np.flipud(img * scale)[:, :, 2] < threshold except: - ind = np.flipud(img*scale) < threshold - rgb_cont_img=np.copy(np.flipud(img)) - #rgb_cont_img[ind,0]=1000 - if brightness_scale !=1: - rgb_cont_img=scale_rgb(rgb_cont_img,scale=brightness_scale) - - plt.imshow(rgb_cont_img,interpolation='none',resample=True, cmap = 'gray') - plt.axis('equal') - cross=[685,440,50] # definintion of direct beam: x, y, size - plt.plot([cross[0]-cross[2]/2,cross[0]+cross[2]/2],[cross[1],cross[1]],'r-') - plt.plot([cross[0],cross[0]],[cross[1]-cross[2]/2,cross[1]+cross[2]/2],'r-') + ind = np.flipud(img * scale) < threshold + rgb_cont_img = np.copy(np.flipud(img)) + # rgb_cont_img[ind,0]=1000 + if brightness_scale != 1: + rgb_cont_img = scale_rgb(rgb_cont_img, scale=brightness_scale) + + plt.imshow(rgb_cont_img, interpolation="none", resample=True, cmap="gray") + plt.axis("equal") + cross = [685, 440, 50] # definintion of direct beam: x, y, size + plt.plot([cross[0] - cross[2] / 2, cross[0] + cross[2] / 2], [cross[1], cross[1]], "r-") + plt.plot([cross[0], cross[0]], [cross[1] - cross[2] / 2, cross[1] + cross[2] / 2], "r-") if pixel_scalebar != None: - plt.plot([1100,1100+pixel_scalebar],[150,150],'r-',Linewidth=5) # scale bar. - plt.text(1000,50,text_string,fontsize=14,color='r') - plt.text(600,50,str(oav_times[m])[:5]+' [s]',fontsize=14,color='r') - plt.axis('off') - plt.savefig( data_dir + 'uid=%s_OVA_images.png'%uid) - + plt.plot([1100, 1100 + pixel_scalebar], [150, 150], "r-", Linewidth=5) # scale bar. + plt.text(1000, 50, text_string, fontsize=14, color="r") + plt.text(600, 50, str(oav_times[m])[:5] + " [s]", fontsize=14, color="r") + plt.axis("off") + plt.savefig(data_dir + "uid=%s_OVA_images.png" % uid) - - -def shift_mask_old( mask, shiftx, shifty): - '''YG Dev Feb 4@CHX create new mask by shift mask in x and y direction with unit in pixel +def shift_mask_old(mask, shiftx, shifty): + """YG Dev Feb 4@CHX create new mask by shift mask in x and y direction with unit in pixel Input: mask: int-type array, shiftx: int scalar, shift value in x direction with unit in pixel @@ -777,102 +875,110 @@ def shift_mask_old( mask, shiftx, shifty): Output: maskn: int-type array, shifted mask - ''' - qind, pixelist = roi.extract_label_indices( mask ) + """ + qind, pixelist = roi.extract_label_indices(mask) dims = mask.shape - imgwidthy = dims[1] #dimension in y, but in plot being x - imgwidthx = dims[0] #dimension in x, but in plot being y - pixely = pixelist%imgwidthy - pixelx = pixelist//imgwidthy - pixelyn = pixely + shiftx - pixelxn = pixelx + shifty - w = (pixelyn < imgwidthy ) & (pixelyn >= 0 ) & (pixelxn < imgwidthx ) & (pixelxn >= 0 ) + imgwidthy = dims[1] # dimension in y, but in plot being x + imgwidthx = dims[0] # dimension in x, but in plot being y + pixely = pixelist % imgwidthy + pixelx = pixelist // imgwidthy + pixelyn = pixely + shiftx + pixelxn = pixelx + shifty + w = (pixelyn < imgwidthy) & (pixelyn >= 0) & (pixelxn < imgwidthx) & (pixelxn >= 0) pixelist_new = pixelxn[w] * imgwidthy + pixelyn[w] - maskn = np.zeros_like( mask ) + maskn = np.zeros_like(mask) maskn.ravel()[pixelist_new] = qind[w] return maskn def get_current_time(): - '''get current time in a fomart of year/month/date/hour(24)/min/sec/, - e.g. 2009-01-05 22:14:39 - ''' - loc_dt = datetime.datetime.now(pytz.timezone('US/Eastern')) + """get current time in a fomart of year/month/date/hour(24)/min/sec/, + e.g. 2009-01-05 22:14:39 + """ + loc_dt = datetime.datetime.now(pytz.timezone("US/Eastern")) fmt = "%Y-%m-%d %H:%M:%S" - return loc_dt.strftime(fmt) + return loc_dt.strftime(fmt) - -def evalue_array( array, verbose = True ): - '''Y.G., Dev Nov 1, 2018 Get min, max, avg, std of an array ''' - _min, _max, avg, std = np.min( array), np.max( array), np.average( array ), np.std( array ) +def evalue_array(array, verbose=True): + """Y.G., Dev Nov 1, 2018 Get min, max, avg, std of an array""" + _min, _max, avg, std = np.min(array), np.max(array), np.average(array), np.std(array) if verbose: - print( 'The min, max, avg, std of this array are: %s %s %s %s, respectively.'%(_min, _max, avg, std ) ) - return _min, _max, avg, std - + print( + "The min, max, avg, std of this array are: %s %s %s %s, respectively." % (_min, _max, avg, std) + ) + return _min, _max, avg, std -def find_good_xpcs_uids( fuids, Nlim=100, det = [ '4m', '1m', '500'] ): - '''Y.G., Dev Nov 1, 2018 Find the good xpcs series - Input: - fuids: list, a list of full uids - Nlim: integer, the smallest number of images to be considered as XCPS sereis - det: list, a list of detector (can be short string of the full name of the detector) - Return: - the xpcs uids list +def find_good_xpcs_uids(fuids, Nlim=100, det=["4m", "1m", "500"]): + """Y.G., Dev Nov 1, 2018 Find the good xpcs series + Input: + fuids: list, a list of full uids + Nlim: integer, the smallest number of images to be considered as XCPS sereis + det: list, a list of detector (can be short string of the full name of the detector) + Return: + the xpcs uids list - ''' + """ guids = [] for i, uid in enumerate(fuids): - if db[uid]['start']['plan_name'] == 'count' or db[uid]['start']['plan_name'] == 'manual_count': - head = db[uid]['start'] - for dec in head['detectors']: + if db[uid]["start"]["plan_name"] == "count" or db[uid]["start"]["plan_name"] == "manual_count": + head = db[uid]["start"] + for dec in head["detectors"]: for dt in det: if dt in dec: - if 'number of images' in head: - if float(head['number of images'] ) >= Nlim: - #print(i, uid) + if "number of images" in head: + if float(head["number of images"]) >= Nlim: + # print(i, uid) guids.append(uid) - G = np.unique( guids ) - print('Found %s uids for XPCS series.'%len(G) ) + G = np.unique(guids) + print("Found %s uids for XPCS series." % len(G)) return G -def create_fullImg_with_box( shape, box_nx = 9 , box_ny = 8, ): - '''Y.G. 2018/10/26 Divide image with multi touched boxes +def create_fullImg_with_box( + shape, + box_nx=9, + box_ny=8, +): + """Y.G. 2018/10/26 Divide image with multi touched boxes Input shape: the shape of image box_nx: the number of box in x box_ny: the number width of box in y Return: roi_mask, (* mask ) - ''' + """ - #shape = mask.shape - Wrow, Wcol = int( np.ceil( shape[0]/box_nx )), int(np.ceil(shape[1]/box_ny) ) - #print(Wrow, Wcol) - roi_mask = np.zeros( shape, dtype=np.int32 ) - for i in range( box_nx ): + # shape = mask.shape + Wrow, Wcol = int(np.ceil(shape[0] / box_nx)), int(np.ceil(shape[1] / box_ny)) + # print(Wrow, Wcol) + roi_mask = np.zeros(shape, dtype=np.int32) + for i in range(box_nx): for j in range(box_ny): - roi_mask[ i*Wrow: (i+1)*Wrow , j*Wcol: (j+1)*Wcol ] = i * box_ny + j + 1 - #roi_mask *= mask + roi_mask[i * Wrow : (i + 1) * Wrow, j * Wcol : (j + 1) * Wcol] = i * box_ny + j + 1 + # roi_mask *= mask return roi_mask - -def get_refl_y0( inc_ang, inc_y0, Ldet, pixel_size, ): - ''' Get reflection beam center y +def get_refl_y0( + inc_ang, + inc_y0, + Ldet, + pixel_size, +): + """Get reflection beam center y Input: inc_ang: incident angle in degree inc_y0: incident beam y center in pixel Ldet: sample to detector distance in meter pixel_size: pixel size in meter Return: reflection beam center y in pixel - ''' - return Ldet * np.tan( np.radians(inc_ang)) * 2 / pixel_size + inc_y0 + """ + return Ldet * np.tan(np.radians(inc_ang)) * 2 / pixel_size + inc_y0 -def lin2log_g2(lin_tau,lin_g2,num_points=False): +def lin2log_g2(lin_tau, lin_g2, num_points=False): """ Lutz developed at Aug,2018 function to resample g2 with linear time steps into logarithmics @@ -881,85 +987,95 @@ def lin2log_g2(lin_tau,lin_g2,num_points=False): num_points=False -> determine number of logortihmically sampled time points automatically (8 pts./decade) num_points=18 -> use 18 logarithmically spaced time points """ - #prep taus and g2s: remove nan and first data point at tau=0 - rem = lin_tau==0 - #print('lin_tau: '+str(lin_tau.size)) - #print('lin_g2: '+str(lin_g2.size)) - lin_tau[rem]=np.nan - #lin_tau[0]=np.nan;#lin_g2[0]=np.nan + # prep taus and g2s: remove nan and first data point at tau=0 + rem = lin_tau == 0 + # print('lin_tau: '+str(lin_tau.size)) + # print('lin_g2: '+str(lin_g2.size)) + lin_tau[rem] = np.nan + # lin_tau[0]=np.nan;#lin_g2[0]=np.nan lin_g2 = lin_g2[np.isfinite(lin_tau)] lin_tau = lin_tau[np.isfinite(lin_tau)] - #print('from lin-to-log-g2_sampling: ',lin_tau) + # print('from lin-to-log-g2_sampling: ',lin_tau) if num_points == False: # automatically decide how many log-points (8/decade) - dec=int(np.ceil((np.log10(lin_tau.max())-np.log10(lin_tau.min()))*8)) + dec = int(np.ceil((np.log10(lin_tau.max()) - np.log10(lin_tau.min())) * 8)) else: - dec=int(num_points) - log_tau=np.logspace(np.log10(lin_tau[0]),np.log10(lin_tau.max()),dec) + dec = int(num_points) + log_tau = np.logspace(np.log10(lin_tau[0]), np.log10(lin_tau.max()), dec) # re-sample correlation function: - log_g2=[] - for i in range(log_tau.size-1): - y=[i,log_tau[i]-(log_tau[i+1]-log_tau[i])/2,log_tau[i]+(log_tau[i+1]-log_tau[i])/2] - #x=lin_tau[lin_tau>y[1]] - x1=lin_tau>y[1]; x2=lin_tauy[1]] + x1 = lin_tau > y[1] + x2 = lin_tau < y[2] + x = x1 * x2 + # print(np.average(lin_g2[x])) if np.isfinite(np.average(lin_g2[x])): log_g2.append(np.average(lin_g2[x])) else: - log_g2.append(np.interp(log_tau[i],lin_tau,lin_g2)) - if i == log_tau.size-2: - #print(log_tau[i+1]) - y=[i+1,log_tau[i+1]-(log_tau[i+1]-log_tau[i])/2,log_tau[i+1]] - x1=lin_tau>y[1]; x2=lin_tau y[1] + x2 = lin_tau < y[2] + x = x1 * x2 log_g2.append(np.average(lin_g2[x])) - return [log_tau,log_g2] + return [log_tau, log_g2] - -def get_eigerImage_per_file( data_fullpath ): - f= h5py.File(data_fullpath) - dset_keys = list(f['/entry/data'].keys()) +def get_eigerImage_per_file(data_fullpath): + f = h5py.File(data_fullpath) + dset_keys = list(f["/entry/data"].keys()) dset_keys.sort() - dset_root="/entry/data" + dset_root = "/entry/data" dset_keys = [dset_root + "/" + dset_key for dset_key in dset_keys] dset = f[dset_keys[0]] - return len(dset) + return len(dset) + -def copy_data( old_path, new_path = '/tmp_data/data/' ): - '''YG Dev July@CHX +def copy_data(old_path, new_path="/tmp_data/data/"): + """YG Dev July@CHX Copy Eiger file containing master and data files to a new path old_path: the full path of the Eiger master file new_path: the new path - ''' - import shutil,glob - #old_path = sud[2][0] - #new_path = '/tmp_data/data/' - fps = glob.glob( old_path[:-10] + '*' ) + """ + import glob + import shutil + + # old_path = sud[2][0] + # new_path = '/tmp_data/data/' + fps = glob.glob(old_path[:-10] + "*") for fp in tqdm(fps): - if not os.path.exists( new_path + os.path.basename(fp)): - shutil.copy( fp, new_path ) - print('The files %s are copied: %s.'%( old_path[:-10] + '*' , new_path + os.path.basename(fp) ) ) + if not os.path.exists(new_path + os.path.basename(fp)): + shutil.copy(fp, new_path) + print("The files %s are copied: %s." % (old_path[:-10] + "*", new_path + os.path.basename(fp))) + -def delete_data( old_path, new_path = '/tmp_data/data/' ): - '''YG Dev July@CHX +def delete_data(old_path, new_path="/tmp_data/data/"): + """YG Dev July@CHX Delete copied Eiger file containing master and data in a new path old_path: the full path of the Eiger master file new_path: the new path - ''' - import shutil,glob - #old_path = sud[2][0] - #new_path = '/tmp_data/data/' - fps = glob.glob( old_path[:-10] + '*' ) + """ + import glob + import shutil + + # old_path = sud[2][0] + # new_path = '/tmp_data/data/' + fps = glob.glob(old_path[:-10] + "*") for fp in tqdm(fps): nfp = new_path + os.path.basename(fp) - if os.path.exists( nfp ): - os.remove( nfp ) + if os.path.exists(nfp): + os.remove(nfp) -def show_tif_series( tif_series, Nx=None, center=None, w= 50, vmin=None, vmax= None, cmap = cmap_vge_hdr, - logs=False, figsize=[10,16] ): - ''' +def show_tif_series( + tif_series, Nx=None, center=None, w=50, vmin=None, vmax=None, cmap=cmap_vge_hdr, logs=False, figsize=[10, 16] +): + """ tif_series: list of 2D tiff images Nx: the number in the row for dispalying center: the center of iamge (or direct beam pixel) @@ -968,39 +1084,48 @@ def show_tif_series( tif_series, Nx=None, center=None, w= 50, vmin=None, vmax= vmax: if None, will be max intensity value of the ROI figsize: size of the plot (in inch) - ''' + """ if center != None: - cy,cx = center - #infs = sorted(sample_list) - N = len( tif_series ) - if Nx == None: - sy = int( np.sqrt(N)) + cy, cx = center + # infs = sorted(sample_list) + N = len(tif_series) + if Nx == None: + sy = int(np.sqrt(N)) else: sy = Nx - sx = int( np.ceil( N/sy ) ) - fig = plt.figure( figsize =figsize ) - for i in range( N ): - #print(i) - ax = fig.add_subplot( sx, sy, i+1) - #d = (np.array( PIL.Image.open( infs[i] ).convert('I') ))[ cy-w:cy+w, cx-w:cx+w ] + sx = int(np.ceil(N / sy)) + fig = plt.figure(figsize=figsize) + for i in range(N): + # print(i) + ax = fig.add_subplot(sx, sy, i + 1) + # d = (np.array( PIL.Image.open( infs[i] ).convert('I') ))[ cy-w:cy+w, cx-w:cx+w ] d = tif_series[i][::-1] - #vmax= np.max(d) - #pritn(vmax) - #vmin= 10#np.min(d) - show_img( d, logs = logs, show_colorbar= False,show_ticks =False, - ax= [fig, ax], image_name= '%02d'%(i+1), cmap = cmap, - vmin= vmin, vmax= vmax, - aspect=1, save=False, path=None) + # vmax= np.max(d) + # pritn(vmax) + # vmin= 10#np.min(d) + show_img( + d, + logs=logs, + show_colorbar=False, + show_ticks=False, + ax=[fig, ax], + image_name="%02d" % (i + 1), + cmap=cmap, + vmin=vmin, + vmax=vmax, + aspect=1, + save=False, + path=None, + ) return fig, ax +from scipy.special import erf - -from scipy.special import erf -def ps( y,shift=.5, replot=True, logplot='off', x= None): - ''' +def ps(y, shift=0.5, replot=True, logplot="off", x=None): + """ Dev 16, 2018 Modified ps() function in 95-utilities.py function to determine statistic on line profile (assumes either peak or erf-profile) @@ -1012,18 +1137,20 @@ def ps( y,shift=.5, replot=True, logplot='off', x= None): x: if not None, give x-data - ''' + """ if x == None: - x = np.arange( len(y) ) - x=np.array(x) - y=np.array(y) + x = np.arange(len(y)) + x = np.array(x) + y = np.array(y) + + PEAK = x[np.argmax(y)] + PEAK_y = np.max(y) + COM = np.sum(x * y) / np.sum(y) - PEAK=x[np.argmax(y)] - PEAK_y=np.max(y) - COM=np.sum(x * y) / np.sum(y) ### from Maksim: assume this is a peak profile: def is_positive(num): return True if num > 0 else False + # Normalize values first: ym = (y - np.min(y)) / (np.max(y) - np.min(y)) - shift # roots are at Y=0 positive = is_positive(ym[0]) @@ -1034,83 +1161,77 @@ def is_positive(num): list_of_roots.append(x[i - 1] + (x[i] - x[i - 1]) / (abs(ym[i]) + abs(ym[i - 1])) * abs(ym[i - 1])) positive = not positive if len(list_of_roots) >= 2: - FWHM=abs(list_of_roots[-1] - list_of_roots[0]) - CEN=list_of_roots[0]+0.5*(list_of_roots[1]-list_of_roots[0]) - ps.fwhm=FWHM - ps.cen=CEN - yf=ym - #return { + FWHM = abs(list_of_roots[-1] - list_of_roots[0]) + CEN = list_of_roots[0] + 0.5 * (list_of_roots[1] - list_of_roots[0]) + ps.fwhm = FWHM + ps.cen = CEN + yf = ym + # return { # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), # 'x_range': list_of_roots, - #} - else: # ok, maybe it's a step function.. - #print('no peak...trying step function...') + # } + else: # ok, maybe it's a step function.. + # print('no peak...trying step function...') ym = ym + shift - def err_func(x, x0, k=2, A=1, base=0 ): #### erf fit from Yugang - return base - A * erf(k*(x-x0)) - mod = Model( err_func ) - ### estimate starting values: - x0=np.mean(x) - #k=0.1*(np.max(x)-np.min(x)) - pars = mod.make_params( x0=x0, k=2, A = 1., base = 0. ) - result = mod.fit(ym, pars, x = x ) - CEN=result.best_values['x0'] - FWHM = result.best_values['k'] - A = result.best_values['A'] - b = result.best_values['base'] - yf_ = err_func(x, CEN, k=FWHM, A=A, base=b ) #result.best_fit - yf = (yf_ ) * (np.max(y) - np.min(y)) + np.min(y) - #(y - np.min(y)) / (np.max(y) - np.min(y)) - shift + def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang + return base - A * erf(k * (x - x0)) + mod = Model(err_func) + ### estimate starting values: + x0 = np.mean(x) + # k=0.1*(np.max(x)-np.min(x)) + pars = mod.make_params(x0=x0, k=2, A=1.0, base=0.0) + result = mod.fit(ym, pars, x=x) + CEN = result.best_values["x0"] + FWHM = result.best_values["k"] + A = result.best_values["A"] + b = result.best_values["base"] + yf_ = err_func(x, CEN, k=FWHM, A=A, base=b) # result.best_fit + yf = (yf_) * (np.max(y) - np.min(y)) + np.min(y) + + # (y - np.min(y)) / (np.max(y) - np.min(y)) - shift ps.cen = CEN ps.fwhm = FWHM if replot: ### re-plot results: - if logplot=='on': - fig, ax = plt.subplots() #plt.figure() - ax.semilogy([PEAK,PEAK],[np.min(y),np.max(y)],'k--',label='PEAK') + if logplot == "on": + fig, ax = plt.subplots() # plt.figure() + ax.semilogy([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") ax.hold(True) - ax.semilogy([CEN,CEN],[np.min(y),np.max(y)],'r-.',label='CEN') - ax.semilogy([COM,COM],[np.min(y),np.max(y)],'g.-.',label='COM') - ax.semilogy(x,y,'bo-') - #plt.xlabel(field);plt.ylabel(intensity_field) + ax.semilogy([CEN, CEN], [np.min(y), np.max(y)], "r-.", label="CEN") + ax.semilogy([COM, COM], [np.min(y), np.max(y)], "g.-.", label="COM") + ax.semilogy(x, y, "bo-") + # plt.xlabel(field);plt.ylabel(intensity_field) ax.legend() - #plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) - #plt.show() + # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + # plt.show() else: - #plt.close(999) - fig, ax = plt.subplots() #plt.figure() - ax.plot([PEAK,PEAK],[np.min(y),np.max(y)],'k--',label='PEAK') + # plt.close(999) + fig, ax = plt.subplots() # plt.figure() + ax.plot([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") - #ax.hold(True) - ax.plot([CEN,CEN],[np.min(y),np.max(y)],'m-.',label='CEN') - ax.plot([COM,COM],[np.min(y),np.max(y)],'g.-.',label='COM') - ax.plot(x,y,'bo--') - ax.plot(x,yf,'r-', label='Fit') + # ax.hold(True) + ax.plot([CEN, CEN], [np.min(y), np.max(y)], "m-.", label="CEN") + ax.plot([COM, COM], [np.min(y), np.max(y)], "g.-.", label="COM") + ax.plot(x, y, "bo--") + ax.plot(x, yf, "r-", label="Fit") - #plt.xlabel(field);plt.ylabel(intensity_field) + # plt.xlabel(field);plt.ylabel(intensity_field) ax.legend() - #plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) - #plt.show() + # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + # plt.show() ### assign values of interest as function attributes: - ps.peak=PEAK - ps.com=COM + ps.peak = PEAK + ps.com = COM return ps.cen - - - - - - - -def create_seg_ring( ring_edges, ang_edges, mask, setup_pargs ): - '''YG Dev April 6, 2018 +def create_seg_ring(ring_edges, ang_edges, mask, setup_pargs): + """YG Dev April 6, 2018 Create segment ring mask Input: ring_edges: edges of rings (in pixel), e.g., [ [320,340], [450, 460], ] @@ -1130,49 +1251,59 @@ def create_seg_ring( ring_edges, ang_edges, mask, setup_pargs ): roi_mask: segmented ring mask: two-D array qval_dict: dict, key as q-number, val: q val - ''' - - roi_mask_qr, qr, qr_edge = get_ring_mask(mask, inner_radius= None, outer_radius = None, - width = None, num_rings = None, edges= np.array( ring_edges), unit='pixel', - pargs= setup_pargs) - - roi_mask_ang, ang_center, ang_edge = get_angular_mask( mask, inner_angle= None, - outer_angle = None, width = None, edges = np.array( ang_edges ), - num_angles = None, center = center, flow_geometry= False ) - + """ - roi_mask, good_ind = combine_two_roi_mask( roi_mask_qr, roi_mask_ang,pixel_num_thres=100) - qval_dict_ = get_qval_dict( qr_center = qr, qz_center = ang_center,one_qz_multi_qr=False) - qval_dict = { i:qval_dict_[k] for (i,k) in enumerate( good_ind) } + roi_mask_qr, qr, qr_edge = get_ring_mask( + mask, + inner_radius=None, + outer_radius=None, + width=None, + num_rings=None, + edges=np.array(ring_edges), + unit="pixel", + pargs=setup_pargs, + ) + + roi_mask_ang, ang_center, ang_edge = get_angular_mask( + mask, + inner_angle=None, + outer_angle=None, + width=None, + edges=np.array(ang_edges), + num_angles=None, + center=center, + flow_geometry=False, + ) + + roi_mask, good_ind = combine_two_roi_mask(roi_mask_qr, roi_mask_ang, pixel_num_thres=100) + qval_dict_ = get_qval_dict(qr_center=qr, qz_center=ang_center, one_qz_multi_qr=False) + qval_dict = {i: qval_dict_[k] for (i, k) in enumerate(good_ind)} return roi_mask, qval_dict - - -def find_bad_pixels_FD( bad_frame_list, FD, img_shape = [514, 1030], - threshold= 15, show_progress=True): - '''Designed to find bad pixel list in 500K - threshold: the max intensity in 5K - ''' - bad = np.zeros( img_shape, dtype=bool ) - if show_progress: - for i in tqdm(bad_frame_list[ bad_frame_list>=FD.beg]): - p,v = FD.rdrawframe(i) - w = np.where( v > threshold)[0] - bad.ravel()[ p[w] ] = 1 +def find_bad_pixels_FD(bad_frame_list, FD, img_shape=[514, 1030], threshold=15, show_progress=True): + """Designed to find bad pixel list in 500K + threshold: the max intensity in 5K + """ + bad = np.zeros(img_shape, dtype=bool) + if show_progress: + for i in tqdm(bad_frame_list[bad_frame_list >= FD.beg]): + p, v = FD.rdrawframe(i) + w = np.where(v > threshold)[0] + bad.ravel()[p[w]] = 1 # x,y = np.where( imgsa[i] > threshold) # bad[x[0],y[0]] = 1 else: - for i in bad_frame_list[ bad_frame_list>=FD.beg]: - p,v = FD.rdrawframe(i) - w = np.where( v > threshold)[0] - bad.ravel()[ p[w] ] = 1 + for i in bad_frame_list[bad_frame_list >= FD.beg]: + p, v = FD.rdrawframe(i) + w = np.where(v > threshold)[0] + bad.ravel()[p[w]] = 1 return ~bad -def get_q_iq_using_dynamic_mask( FD, mask, setup_pargs, bin_number=1, threshold=15 ): - '''DEV by Yugang@CHX, June 6, 2019 +def get_q_iq_using_dynamic_mask(FD, mask, setup_pargs, bin_number=1, threshold=15): + """DEV by Yugang@CHX, June 6, 2019 Get circular average of a time series using a dynamics mask, which pixel values are defined as zeors if above a threshold. Return an averaged q(pix)-Iq-q(A-1) of the whole time series using bin frames with bin_number @@ -1189,58 +1320,55 @@ def get_q_iq_using_dynamic_mask( FD, mask, setup_pargs, bin_number=1, threshold qp_saxs: q in pixel iq_saxs: intenstity q_saxs: q in A-1 - ''' + """ beg = FD.beg end = FD.end shape = FD.rdframe(beg).shape - Nimg_ = FD.end-FD.beg - #Nimg_ = 100 - Nimg = Nimg_//bin_number - time_edge = np.array(create_time_slice( N= Nimg_, - slice_num= Nimg, slice_width= bin_number )) + beg - for n in tqdm( range(Nimg) ): - t1,t2 = time_edge[n] - #print(t1,t2) - if bin_number==1: + Nimg_ = FD.end - FD.beg + # Nimg_ = 100 + Nimg = Nimg_ // bin_number + time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bin_number)) + beg + for n in tqdm(range(Nimg)): + t1, t2 = time_edge[n] + # print(t1,t2) + if bin_number == 1: avg_imgi = FD.rdframe(t1) else: - avg_imgi = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1, - plot_ = False,show_progress= False) - badpi = find_bad_pixels_FD( np.arange(t1,t2) , FD, - img_shape = avg_imgi.shape, threshold= threshold, show_progress=False ) - img = avg_imgi* mask * badpi - qp_saxsi, iq_saxsi, q_saxsi = get_circular_average( img, - mask * badpi, save= False, - pargs=setup_pargs ) - #print( img.max()) - if t1==FD.beg: - qp_saxs, iq_saxs, q_saxs = np.zeros_like( qp_saxsi ), np.zeros_like( iq_saxsi ), np.zeros_like( q_saxsi ) + avg_imgi = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False, show_progress=False) + badpi = find_bad_pixels_FD( + np.arange(t1, t2), FD, img_shape=avg_imgi.shape, threshold=threshold, show_progress=False + ) + img = avg_imgi * mask * badpi + qp_saxsi, iq_saxsi, q_saxsi = get_circular_average(img, mask * badpi, save=False, pargs=setup_pargs) + # print( img.max()) + if t1 == FD.beg: + qp_saxs, iq_saxs, q_saxs = np.zeros_like(qp_saxsi), np.zeros_like(iq_saxsi), np.zeros_like(q_saxsi) qp_saxs += qp_saxsi iq_saxs += iq_saxsi - q_saxs += q_saxsi + q_saxs += q_saxsi qp_saxs /= Nimg iq_saxs /= Nimg q_saxs /= Nimg return qp_saxs, iq_saxs, q_saxs -def get_waxs_beam_center( gamma, origin = [432, 363], Ldet = 1495, pixel_size = 75 * 1e-3 ): - '''YG Feb 10, 2018 - Calculate beam center for WAXS geometry by giving beam center at gamma=0 and the target gamma - Input: - gamma: angle in degree - Ldet: sample to detector distance, 1495 mm for CHX WAXS - origin: beam center for gamma = 0, (python x,y coordinate in pixel) - pxiel size: 75 * 1e-3 mm for Eiger 1M - output: - beam center: for the target gamma, in pixel - ''' - return [ int( origin[0] + np.tan( np.radians(gamma)) * Ldet/pixel_size) ,origin[1] ] +def get_waxs_beam_center(gamma, origin=[432, 363], Ldet=1495, pixel_size=75 * 1e-3): + """YG Feb 10, 2018 + Calculate beam center for WAXS geometry by giving beam center at gamma=0 and the target gamma + Input: + gamma: angle in degree + Ldet: sample to detector distance, 1495 mm for CHX WAXS + origin: beam center for gamma = 0, (python x,y coordinate in pixel) + pxiel size: 75 * 1e-3 mm for Eiger 1M + output: + beam center: for the target gamma, in pixel + """ + return [int(origin[0] + np.tan(np.radians(gamma)) * Ldet / pixel_size), origin[1]] -def get_img_from_iq( qp, iq, img_shape, center): - '''YG Jan 24, 2018 +def get_img_from_iq(qp, iq, img_shape, center): + """YG Jan 24, 2018 Get image from circular average Input: qp: q in pixel unit @@ -1249,121 +1377,120 @@ def get_img_from_iq( qp, iq, img_shape, center): center: [center_y, center_x] e.g., [120, 200] Output: img: recovered image - ''' - pixelist = np.arange( img_shape[0] * img_shape[1] ) - pixely = pixelist%img_shape[1] -center[1] - pixelx = pixelist//img_shape[1] - center[0] - r= np.hypot(pixelx, pixely) #leave as float. - #r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 - return (np.interp( r, qp, iq )).reshape( img_shape ) - - -def average_array_withNan( array, axis=0, mask=None): - '''YG. Jan 23, 2018 - Average array invovling np.nan along axis - - Input: - array: ND array, actually should be oneD or twoD at this stage..TODOLIST for ND - axis: the average axis - mask: bool, same shape as array, if None, will mask all the nan values - Output: - avg: averaged array along axis - ''' + """ + pixelist = np.arange(img_shape[0] * img_shape[1]) + pixely = pixelist % img_shape[1] - center[1] + pixelx = pixelist // img_shape[1] - center[0] + r = np.hypot(pixelx, pixely) # leave as float. + # r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 + return (np.interp(r, qp, iq)).reshape(img_shape) + + +def average_array_withNan(array, axis=0, mask=None): + """YG. Jan 23, 2018 + Average array invovling np.nan along axis + + Input: + array: ND array, actually should be oneD or twoD at this stage..TODOLIST for ND + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + avg: averaged array along axis + """ shape = array.shape if mask == None: mask = np.isnan(array) - #mask = np.ma.masked_invalid(array).mask + # mask = np.ma.masked_invalid(array).mask array_ = np.ma.masked_array(array, mask=mask) try: - sums = np.array( np.ma.sum( array_[:,:], axis= axis ) ) + sums = np.array(np.ma.sum(array_[:, :], axis=axis)) except: - sums = np.array( np.ma.sum( array_[:], axis= axis ) ) + sums = np.array(np.ma.sum(array_[:], axis=axis)) - cts = np.sum(~mask,axis=axis) - #print(cts) - return sums/cts + cts = np.sum(~mask, axis=axis) + # print(cts) + return sums / cts -def deviation_array_withNan( array, axis=0, mask=None): - '''YG. Jan 23, 2018 - Get the deviation of array invovling np.nan along axis - Input: - array: ND array - axis: the average axis - mask: bool, same shape as array, if None, will mask all the nan values - Output: - dev: the deviation of array along axis - ''' - avg2 = average_array_withNan( array**2, axis = axis, mask = mask ) - avg = average_array_withNan( array, axis = axis, mask = mask ) - return np.sqrt( avg2 - avg**2 ) +def deviation_array_withNan(array, axis=0, mask=None): + """YG. Jan 23, 2018 + Get the deviation of array invovling np.nan along axis + Input: + array: ND array + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + dev: the deviation of array along axis + """ + avg2 = average_array_withNan(array**2, axis=axis, mask=mask) + avg = average_array_withNan(array, axis=axis, mask=mask) + return np.sqrt(avg2 - avg**2) -def refine_roi_mask( roi_mask, pixel_num_thres=10): - '''YG Dev Jan20,2018 +def refine_roi_mask(roi_mask, pixel_num_thres=10): + """YG Dev Jan20,2018 remove bad roi which pixel numbe is lower pixel_num_thres roi_mask: array, pixel_num_thres: integer, the low limit pixel number in each roi of the combined mask, i.e., if the pixel number in one roi of the combined mask smaller than pixel_num_thres, that roi will be considered as bad one and be removed. - ''' - new_mask = np.zeros_like( roi_mask ) + """ + new_mask = np.zeros_like(roi_mask) qind, pixelist = roi.extract_label_indices(roi_mask) noqs = len(np.unique(qind)) - nopr = np.bincount(qind, minlength=(noqs+1))[1:] - good_ind = np.where( nopr >= pixel_num_thres)[0] +1 + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + good_ind = np.where(nopr >= pixel_num_thres)[0] + 1 l = len(good_ind) - new_ind = np.arange( 1, l+1 ) - for i, gi in enumerate( good_ind ): - new_mask.ravel()[ - np.where( roi_mask.ravel() == gi)[0] ] = new_ind[i] - return new_mask, good_ind -1 - -def shrink_image_stack( imgs, bins): - '''shrink imgs by bins - imgs: shape as [Nimg, imx, imy] ''' + new_ind = np.arange(1, l + 1) + for i, gi in enumerate(good_ind): + new_mask.ravel()[np.where(roi_mask.ravel() == gi)[0]] = new_ind[i] + return new_mask, good_ind - 1 + + +def shrink_image_stack(imgs, bins): + """shrink imgs by bins + imgs: shape as [Nimg, imx, imy]""" Nimg, imx, imy = imgs.shape bx, by = bins - imgsk = np.zeros( [Nimg, imx//bx, imy//by] ) + imgsk = np.zeros([Nimg, imx // bx, imy // by]) N = len(imgs) for i in range(N): - imgsk[i] = shrink_image(imgs[i], bins ) + imgsk[i] = shrink_image(imgs[i], bins) return imgsk -def shrink_image(img, bins ): - '''YG Dec 12, 2017 dev@CHX shrink a two-d image by factor as bins, i.e., bins_x, bins_y + +def shrink_image(img, bins): + """YG Dec 12, 2017 dev@CHX shrink a two-d image by factor as bins, i.e., bins_x, bins_y input: img: 2d array, bins: integer list, eg. [2,2] output: imgb: binned img - ''' - m,n = img.shape + """ + m, n = img.shape bx, by = bins - Nx, Ny = m//bx, n//by - #print(Nx*bx, Ny*by) - return img[:Nx*bx, :Ny*by].reshape( Nx,bx, Ny, by).mean(axis=(1,3) ) + Nx, Ny = m // bx, n // by + # print(Nx*bx, Ny*by) + return img[: Nx * bx, : Ny * by].reshape(Nx, bx, Ny, by).mean(axis=(1, 3)) -def get_diff_fv( g2_fit_paras, qval_dict, ang_init=137.2): - '''YG@CHX Nov 9,2017 - Get flow velocity and diff from g2_fit_paras ''' +def get_diff_fv(g2_fit_paras, qval_dict, ang_init=137.2): + """YG@CHX Nov 9,2017 + Get flow velocity and diff from g2_fit_paras""" g2_fit_para_ = g2_fit_paras.copy() - qr = np.array( [qval_dict[k][0] for k in sorted( qval_dict.keys())] ) - qang = np.array( [qval_dict[k][1] for k in sorted( qval_dict.keys())] ) - #x=g2_fit_para_.pop( 'relaxation_rate' ) - #x=g2_fit_para_.pop( 'flow_velocity' ) - g2_fit_para_['diff'] = g2_fit_paras[ 'relaxation_rate' ]/qr**2 - cos_part = np.abs( np.cos( np.radians( qang - ang_init)) ) - g2_fit_para_['fv'] = g2_fit_paras[ 'flow_velocity' ]/cos_part/qr + qr = np.array([qval_dict[k][0] for k in sorted(qval_dict.keys())]) + qang = np.array([qval_dict[k][1] for k in sorted(qval_dict.keys())]) + # x=g2_fit_para_.pop( 'relaxation_rate' ) + # x=g2_fit_para_.pop( 'flow_velocity' ) + g2_fit_para_["diff"] = g2_fit_paras["relaxation_rate"] / qr**2 + cos_part = np.abs(np.cos(np.radians(qang - ang_init))) + g2_fit_para_["fv"] = g2_fit_paras["flow_velocity"] / cos_part / qr return g2_fit_para_ - - # function to get indices of local extrema (=indices of speckle echo maximum amplitudes): -def get_echos(dat_arr,min_distance=10): +def get_echos(dat_arr, min_distance=10): """ getting local maxima and minima from 1D data -> e.g. speckle echos strategy: using peak_local_max (from skimage) with min_distance parameter to find well defined local maxima @@ -1372,19 +1499,20 @@ def get_echos(dat_arr,min_distance=10): by LW 10/23/2018 """ from skimage.feature import peak_local_max - max_ind=peak_local_max(dat_arr, min_distance) # !!! careful, skimage function reverses the order (wtf?) - min_ind=[] + + max_ind = peak_local_max(dat_arr, min_distance) # !!! careful, skimage function reverses the order (wtf?) + min_ind = [] for i in range(len(max_ind[:-1])): - min_ind.append(max_ind[i+1][0]+np.argmin(dat_arr[max_ind[i+1][0]:max_ind[i][0]])) - #unfortunately, skimage function fu$$s up the format: max_ind is an array of a list of lists...fix this: - mmax_ind=[] + min_ind.append(max_ind[i + 1][0] + np.argmin(dat_arr[max_ind[i + 1][0] : max_ind[i][0]])) + # unfortunately, skimage function fu$$s up the format: max_ind is an array of a list of lists...fix this: + mmax_ind = [] for l in max_ind: mmax_ind.append(l[0]) - #return [mmax_ind,min_ind] - return [list(reversed(mmax_ind)),list(reversed(min_ind))] + # return [mmax_ind,min_ind] + return [list(reversed(mmax_ind)), list(reversed(min_ind))] -def pad_length(arr,pad_val=np.nan): +def pad_length(arr, pad_val=np.nan): """ arr: 2D matrix pad_val: values being padded @@ -1394,76 +1522,77 @@ def pad_length(arr,pad_val=np.nan): update June 2023: remove use of np.shape and np.size that doesn't work (anymore?) on arrays with inhomogenous size by LW 12/30/2017 """ - max_len=[] + max_len = [] for i in range(len(arr)): max_len.append([len(arr[i])]) - max_len=np.max(max_len) + max_len = np.max(max_len) for l in range(len(arr)): - arr[l]=np.pad(arr[l]*1.,(0,max_len-np.size(arr[l])),mode='constant',constant_values=pad_val) + arr[l] = np.pad(arr[l] * 1.0, (0, max_len - np.size(arr[l])), mode="constant", constant_values=pad_val) return arr - def save_array_to_tiff(array, output, verbose=True): - '''Y.G. Nov 1, 2017 + """Y.G. Nov 1, 2017 Save array to a tif file - ''' + """ img = PIL.Image.fromarray(array) - img.save( output ) + img.save(output) if verbose: - print( 'The data is save to: %s.'%( output )) - + print("The data is save to: %s." % (output)) def load_pilatus(filename): - '''Y.G. Nov 1, 2017 + """Y.G. Nov 1, 2017 Load a pilatus 2D image - ''' - return np.array( PIL.Image.open(filename).convert('I') ) + """ + return np.array(PIL.Image.open(filename).convert("I")) -def ls_dir(inDir, have_list=[], exclude_list=[] ): - '''Y.G. Aug 1, 2019 + +def ls_dir(inDir, have_list=[], exclude_list=[]): + """Y.G. Aug 1, 2019 List all filenames in a filefolder inDir: fullpath of the inDir have_string: only retrun filename containing the string exclude_string: only retrun filename not containing the string - ''' + """ from os import listdir from os.path import isfile, join - tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] ) + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) tifs_ = [] for tif in tifs: - flag=1 + flag = 1 for string in have_list: if string not in tif: - flag *=0 - for string in exclude_list: + flag *= 0 + for string in exclude_list: if string in tif: - flag *=0 + flag *= 0 if flag: - tifs_.append( tif ) + tifs_.append(tif) - return np.array( tifs_ ) + return np.array(tifs_) def ls_dir2(inDir, string=None): - '''Y.G. Nov 1, 2017 + """Y.G. Nov 1, 2017 List all filenames in a filefolder (not include hidden files and subfolders) inDir: fullpath of the inDir string: if not None, only retrun filename containing the string - ''' + """ from os import listdir from os.path import isfile, join - if string == None: - tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] ) + + if string == None: + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) else: - tifs = np.array( [f for f in listdir(inDir) if (isfile(join(inDir, f)))&(string in f) ] ) + tifs = np.array([f for f in listdir(inDir) if (isfile(join(inDir, f))) & (string in f)]) return tifs -def re_filename( old_filename, new_filename, inDir=None, verbose=True ): - '''Y.G. Nov 28, 2017 + +def re_filename(old_filename, new_filename, inDir=None, verbose=True): + """Y.G. Nov 28, 2017 Rename old_filename with new_filename in a inDir inDir: fullpath of the inDir, if None, the filename should have the fullpath old_filename/ new_filename: string @@ -1472,30 +1601,31 @@ def re_filename( old_filename, new_filename, inDir=None, verbose=True ): 'uid=run17_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', '/home/yuzhang/Analysis/Timepix/2017_3/Results/run17/run17_pos1/' ) - ''' + """ if inDir != None: - os.rename(inDir + old_filename, inDir+new_filename) + os.rename(inDir + old_filename, inDir + new_filename) else: - os.rename( old_filename, new_filename) - print('The file: %s is changed to: %s.'%(old_filename, new_filename)) + os.rename(old_filename, new_filename) + print("The file: %s is changed to: %s." % (old_filename, new_filename)) -def re_filename_dir( old_pattern, new_pattern, inDir,verbose=True ): - '''Y.G. Nov 28, 2017 +def re_filename_dir(old_pattern, new_pattern, inDir, verbose=True): + """Y.G. Nov 28, 2017 Rename all filenames with old_pattern with new_pattern in a inDir inDir: fullpath of the inDir, if None, the filename should have the fullpath old_pattern, new_pattern an example, re_filename_dir('20_', '17_', inDir ) - ''' + """ fps = ls_dir(inDir) for fp in fps: if old_pattern in fp: old_filename = fp new_filename = fp.replace(old_pattern, new_pattern) - re_filename( old_filename, new_filename, inDir,verbose= verbose ) + re_filename(old_filename, new_filename, inDir, verbose=verbose) + -def get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False,q_thresh=0, p_thresh=0, silent=True, qprecision=5): +def get_roi_nr(qdict, q, phi, q_nr=True, phi_nr=False, q_thresh=0, p_thresh=0, silent=True, qprecision=5): """ function to return roi number from qval_dict, corresponding Q and phi, lists (sets) of all available Qs and phis [roi_nr,Q,phi,Q_list,phi_list]=get_roi_nr(..) @@ -1513,40 +1643,54 @@ def get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False,q_thresh=0, p_thresh=0, silent """ import collections from collections import OrderedDict + qdict = collections.OrderedDict(sorted(qdict.items())) - qs=[] - phis=[] + qs = [] + phis = [] for i in qdict.keys(): qs.append(qdict[i][0]) - phis.append(qdict[i][1]) - qslist=list(OrderedDict.fromkeys(qs)) - qslist = np.unique( np.round(qslist, qprecision ) ) - phislist=list(OrderedDict.fromkeys(phis)) - qslist=list(np.sort(qslist)) - phislist=list(np.sort(phislist)) + phis.append(qdict[i][1]) + qslist = list(OrderedDict.fromkeys(qs)) + qslist = np.unique(np.round(qslist, qprecision)) + phislist = list(OrderedDict.fromkeys(phis)) + qslist = list(np.sort(qslist)) + phislist = list(np.sort(phislist)) if q_nr: - qinterest=qslist[q] - qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] + qinterest = qslist[q] + qindices = [i for i, x in enumerate(qs) if np.abs(x - qinterest) < q_thresh] else: - qinterest=q - qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] # new + qinterest = q + qindices = [i for i, x in enumerate(qs) if np.abs(x - qinterest) < q_thresh] # new if phi_nr: - phiinterest=phislist[phi] - phiindices = [i for i,x in enumerate(phis) if x == phiinterest] + phiinterest = phislist[phi] + phiindices = [i for i, x in enumerate(phis) if x == phiinterest] else: - phiinterest=phi - phiindices = [i for i,x in enumerate(phis) if np.abs(x-phiinterest) < p_thresh] # new - ret_list=[list(set(qindices).intersection(phiindices))[0],qinterest,phiinterest,qslist,phislist] #-> this is the original + phiinterest = phi + phiindices = [i for i, x in enumerate(phis) if np.abs(x - phiinterest) < p_thresh] # new + ret_list = [ + list(set(qindices).intersection(phiindices))[0], + qinterest, + phiinterest, + qslist, + phislist, + ] # -> this is the original if silent == False: - print('list of available Qs:') + print("list of available Qs:") print(qslist) - print('list of available phis:') + print("list of available phis:") print(phislist) - print('Roi number for Q= '+str(ret_list[1])+' and phi= '+str(ret_list[2])+': '+str(ret_list[0])) + print("Roi number for Q= " + str(ret_list[1]) + " and phi= " + str(ret_list[2]) + ": " + str(ret_list[0])) return ret_list -def get_fit_by_two_linear(x,y, mid_xpoint1, mid_xpoint2=None, xrange=None, ): - '''YG Octo 16,2017 Fit a curve with two linear func, the curve is splitted by mid_xpoint, + +def get_fit_by_two_linear( + x, + y, + mid_xpoint1, + mid_xpoint2=None, + xrange=None, +): + """YG Octo 16,2017 Fit a curve with two linear func, the curve is splitted by mid_xpoint, namely, fit the curve in two regions defined by (xmin,mid_xpoint ) and (mid_xpoint2, xmax) Input: x: 1D np.array @@ -1560,106 +1704,111 @@ def get_fit_by_two_linear(x,y, mid_xpoint1, mid_xpoint2=None, xrange=None, ): fit parameter (slope, background) of linear fit2 convinent fit class, gmfit2(x) gives yvale - ''' + """ if xrange == None: - x1,x2 = min(x), max(x) - x1,x2=xrange + x1, x2 = min(x), max(x) + x1, x2 = xrange if mid_xpoint2 == None: - mid_xpoint2= mid_xpoint1 - D1, gmfit1 = linear_fit( x,y, xrange= [ x1,mid_xpoint1 ]) - D2, gmfit2 = linear_fit( x,y, xrange= [mid_xpoint2, x2 ]) + mid_xpoint2 = mid_xpoint1 + D1, gmfit1 = linear_fit(x, y, xrange=[x1, mid_xpoint1]) + D2, gmfit2 = linear_fit(x, y, xrange=[mid_xpoint2, x2]) return D1, gmfit1, D2, gmfit2 -def get_cross_point( x, gmfit1, gmfit2 ): - '''YG Octo 16,2017 + +def get_cross_point(x, gmfit1, gmfit2): + """YG Octo 16,2017 Get croess point of two curve - ''' + """ y1 = gmfit1(x) y2 = gmfit2(x) - return x[np.argmin( np.abs(y1-y2) )] + return x[np.argmin(np.abs(y1 - y2))] + -def get_curve_turning_points( x, y, mid_xpoint1, mid_xpoint2=None, xrange=None, ): - '''YG Octo 16,2017 +def get_curve_turning_points( + x, + y, + mid_xpoint1, + mid_xpoint2=None, + xrange=None, +): + """YG Octo 16,2017 Get a turning point of a curve by doing a two-linear fit - ''' - D1, gmfit1, D2, gmfit2 = get_fit_by_two_linear(x,y, mid_xpoint1, mid_xpoint2, xrange ) - return get_cross_point( x, gmfit1, gmfit2 ) + """ + D1, gmfit1, D2, gmfit2 = get_fit_by_two_linear(x, y, mid_xpoint1, mid_xpoint2, xrange) + return get_cross_point(x, gmfit1, gmfit2) -def plot_fit_two_linear_fit(x,y, gmfit1, gmfit2, ax=None ): - '''YG Octo 16,2017 Plot data with two fitted linear func - ''' +def plot_fit_two_linear_fit(x, y, gmfit1, gmfit2, ax=None): + """YG Octo 16,2017 Plot data with two fitted linear func""" if ax == None: - fig, ax =plt.subplots() - plot1D( x = x, y = y, ax =ax, c='k', legend='data', m='o', ls='')#logx=True, logy=True ) - plot1D( x = x, y = gmfit1(x), ax =ax, c='r', m='', ls='-',legend='fit1' ) - plot1D( x = x, y = gmfit2(x), ax =ax, c='b', m='', ls='-',legend='fit2' ) + fig, ax = plt.subplots() + plot1D(x=x, y=y, ax=ax, c="k", legend="data", m="o", ls="") # logx=True, logy=True ) + plot1D(x=x, y=gmfit1(x), ax=ax, c="r", m="", ls="-", legend="fit1") + plot1D(x=x, y=gmfit2(x), ax=ax, c="b", m="", ls="-", legend="fit2") return ax -def linear_fit( x,y, xrange=None): - '''YG Octo 16,2017 copied from XPCS_SAXS +def linear_fit(x, y, xrange=None): + """YG Octo 16,2017 copied from XPCS_SAXS a linear fit - ''' + """ if xrange != None: xmin, xmax = xrange - x1,x2 = find_index( x,xmin,tolerance= None),find_index( x,xmax,tolerance= None) + x1, x2 = find_index(x, xmin, tolerance=None), find_index(x, xmax, tolerance=None) x_ = x[x1:x2] y_ = y[x1:x2] else: - x_=x - y_=y + x_ = x + y_ = y D0 = np.polyfit(x_, y_, 1) gmfit = np.poly1d(D0) return D0, gmfit -def find_index( x,x0,tolerance= None): - '''YG Octo 16,2017 copied from SAXS +def find_index(x, x0, tolerance=None): + """YG Octo 16,2017 copied from SAXS find index of x0 in x #find the position of P in a list (plist) with tolerance - ''' + """ - N=len(x) - i=0 + N = len(x) + i = 0 if x0 > max(x): - position= len(x) -1 - elif x0 max(x): - position= len(x) -1 - elif x0 di: try: els = line.split() - if good_cols == None: - temp = np.array( els, dtype=float ) + if good_cols == None: + temp = np.array(els, dtype=float) else: - temp= np.array( [els[j] for j in good_cols], dtype=float ) - data=np.vstack( (data,temp)) + temp = np.array([els[j] for j in good_cols], dtype=float) + data = np.vstack((data, temp)) except: pass if labels == None: labels = np.arange(data.shape[1]) - df = pds.DataFrame( data, index= np.arange(data.shape[0]), columns= labels ) + df = pds.DataFrame(data, index=np.arange(data.shape[0]), columns=labels) return df - -def get_print_uids( start_time, stop_time, return_all_info=False): - '''Update Feb 20, 2018 also return full uids +def get_print_uids(start_time, stop_time, return_all_info=False): + """Update Feb 20, 2018 also return full uids YG. Octo 3, 2017@CHX Get full uids and print uid plus Measurement contents by giving start_time, stop_time - ''' - hdrs = list( db(start_time= start_time, stop_time = stop_time) ) - fuids = np.zeros( len(hdrs),dtype=object) - uids = np.zeros( len(hdrs),dtype=object) - sids = np.zeros( len(hdrs), dtype=object) - n=0 - all_info = np.zeros( len(hdrs), dtype=object) - for i in range(len(hdrs)): - fuid = hdrs[-i-1]['start']['uid'] #reverse order - uid = fuid[:6] #reverse order - sid = hdrs[-i-1]['start']['scan_id'] - fuids[n]=fuid - uids[n]=uid - sids[n]=sid - date = time.ctime(hdrs[-i-1]['start']['time']) + """ + hdrs = list(db(start_time=start_time, stop_time=stop_time)) + fuids = np.zeros(len(hdrs), dtype=object) + uids = np.zeros(len(hdrs), dtype=object) + sids = np.zeros(len(hdrs), dtype=object) + n = 0 + all_info = np.zeros(len(hdrs), dtype=object) + for i in range(len(hdrs)): + fuid = hdrs[-i - 1]["start"]["uid"] # reverse order + uid = fuid[:6] # reverse order + sid = hdrs[-i - 1]["start"]["scan_id"] + fuids[n] = fuid + uids[n] = uid + sids[n] = sid + date = time.ctime(hdrs[-i - 1]["start"]["time"]) try: - m = hdrs[-i-1]['start']['Measurement'] + m = hdrs[-i - 1]["start"]["Measurement"] except: - m='' - info = "%3d: uid = '%s' ##%s #%s: %s-- %s "%(i,uid,date,sid,m, fuid) - print( info ) + m = "" + info = "%3d: uid = '%s' ##%s #%s: %s-- %s " % (i, uid, date, sid, m, fuid) + print(info) if return_all_info: - all_info[n]=info - n +=1 + all_info[n] = info + n += 1 if not return_all_info: return fuids, uids, sids else: return fuids, uids, sids, all_info - -def get_last_uids( n=-1 ): - '''YG Sep 26, 2017 - A Convinient function to copy uid to jupyter for analysis''' - uid = db[n]['start']['uid'][:8] - sid = db[n]['start']['scan_id'] - m = db[n]['start']['Measurement'] - return " uid = '%s' #(scan num: %s (Measurement: %s "%(uid,sid,m) +def get_last_uids(n=-1): + """YG Sep 26, 2017 + A Convinient function to copy uid to jupyter for analysis""" + uid = db[n]["start"]["uid"][:8] + sid = db[n]["start"]["scan_id"] + m = db[n]["start"]["Measurement"] + return " uid = '%s' #(scan num: %s (Measurement: %s " % (uid, sid, m) - -def get_base_all_filenames( inDir, base_filename_cut_length = -7 ): - '''YG Sep 26, 2017 +def get_base_all_filenames(inDir, base_filename_cut_length=-7): + """YG Sep 26, 2017 Get base filenames and their related all filenames Input: inDir, str, input data dir @@ -1886,12 +2043,13 @@ def get_base_all_filenames( inDir, base_filename_cut_length = -7 ): Output: dict: keys, base filename vales, all realted filename - ''' + """ from os import listdir from os.path import isfile, join - tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] ) + + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) tifsc = list(tifs.copy()) - utifs = np.sort( np.unique( np.array([ f[:base_filename_cut_length] for f in tifs] ) ) )[::-1] + utifs = np.sort(np.unique(np.array([f[:base_filename_cut_length] for f in tifs])))[::-1] files = {} for uf in utifs: files[uf] = [] @@ -1899,15 +2057,15 @@ def get_base_all_filenames( inDir, base_filename_cut_length = -7 ): reName = [] for i in range(len(tifsc)): if uf in tifsc[i]: - files[uf].append( tifsc[i] ) + files[uf].append(tifsc[i]) reName.append(tifsc[i]) for fn in reName: tifsc.remove(fn) return files -def create_ring_mask( shape, r1, r2, center, mask=None): - '''YG. Sep 20, 2017 Develop@CHX +def create_ring_mask(shape, r1, r2, center, mask=None): + """YG. Sep 20, 2017 Develop@CHX Create 2D ring mask input: shape: two integer number list, mask shape, e.g., [100,100] @@ -1916,32 +2074,34 @@ def create_ring_mask( shape, r1, r2, center, mask=None): center: two integer number list, [cx,cy], ring center, e.g., [30,50] output: 2D numpy array, 0,1 type - ''' + """ - m = np.zeros( shape, dtype= bool) - rr,cc = disk((center[1], center[0]), r2, shape=shape ) - m[rr,cc] = 1 - rr,cc = disk((center[1], center[0]), r1,shape=shape ) - m[rr,cc] = 0 + m = np.zeros(shape, dtype=bool) + rr, cc = disk((center[1], center[0]), r2, shape=shape) + m[rr, cc] = 1 + rr, cc = disk((center[1], center[0]), r1, shape=shape) + m[rr, cc] = 0 if mask != None: m += mask return m + def get_image_edge(img): - ''' + """ Y.G. Developed at Sep 8, 2017 @CHX Get sharp edges of an image img: two-D array, e.g., a roi mask - ''' - edg_ = prewitt(img/1.0) + """ + edg_ = prewitt(img / 1.0) edg = np.zeros_like(edg_) w = np.where(edg_ > 1e-10) edg[w] = img[w] - edg[np.where(edg==0)] = 1 + edg[np.where(edg == 0)] = 1 return edg -def get_image_with_roi( img, roi_mask, scale_factor = 2): - ''' + +def get_image_with_roi(img, roi_mask, scale_factor=2): + """ Y.G. Developed at Sep 8, 2017 @CHX Get image with edges of roi_mask by doing i) get edges of roi_mask by function get_image_edge @@ -1949,24 +2109,22 @@ def get_image_with_roi( img, roi_mask, scale_factor = 2): img: two-D array for image roi_mask: two-D array for ROI scale_factor: scaling factor of ROI in image - ''' - edg = get_image_edge( roi_mask ) + """ + edg = get_image_edge(roi_mask) img_ = img.copy() w = np.where(roi_mask) - img_[w] = img[w] * scale_factor + img_[w] = img[w] * scale_factor return img_ * edg - - - -def get_today_date( ): +def get_today_date(): from time import gmtime, strftime - return strftime("%m-%d-%Y", gmtime() ) + + return strftime("%m-%d-%Y", gmtime()) -def move_beamstop( mask, xshift, yshift ): - '''Y.G. Developed at July 18, 2017 @CHX +def move_beamstop(mask, xshift, yshift): + """Y.G. Developed at July 18, 2017 @CHX Create new mask by shift the old one with xshift, yshift Input --- @@ -1977,135 +2135,150 @@ def move_beamstop( mask, xshift, yshift ): Output --- mask, 2D numpy array, - ''' + """ m = np.ones_like(mask) - W,H = mask.shape - w = np.where(mask==0) - nx, ny = w[0]+ int(yshift), w[1]+ int(xshift ) - gw = np.where( (nx >= 0) & (nx= 0) & (ny= 0) & (nx < W) & (ny >= 0) & (ny < H)) + nx = nx[gw] + ny = ny[gw] + m[nx, ny] = 0 return m - def validate_uid(uid): - '''check uid whether be able to load data''' + """check uid whether be able to load data""" try: sud = get_sid_filenames(db[uid]) print(sud) - md = get_meta_data( uid ) - imgs = load_data( uid, md['detector'], reverse= True ) + md = get_meta_data(uid) + imgs = load_data(uid, md["detector"], reverse=True) print(imgs) return 1 except: - print("Can't load this uid=%s!"%uid) + print("Can't load this uid=%s!" % uid) return 0 -def validate_uid_dict( uid_dict ): - ''' Y.G. developed July 17, 2017 @CHX + +def validate_uid_dict(uid_dict): + """Y.G. developed July 17, 2017 @CHX Check each uid in a dict can load data or not uids: dict, val: meaningful decription, key: a list of uids - ''' + """ badn = 0 - badlist=[] + badlist = [] for k in list(uids.keys()): for uid in uids[k]: flag = validate_uid(uid) if not flag: badn += 1 - badlist.append( uid ) - print( 'There are %s bad uids:%s in this uid_dict.'%(badn, badlist)) + badlist.append(uid) + print("There are %s bad uids:%s in this uid_dict." % (badn, badlist)) + def get_mass_center_one_roi(FD, roi_mask, roi_ind): - '''Get the mass center (in pixel unit) of one roi in a time series FD + """Get the mass center (in pixel unit) of one roi in a time series FD FD: handler for a compressed time series roi_mask: the roi array roi_ind: the interest index of the roi - ''' + """ import scipy - m = (roi_mask == roi_ind) - cx, cy = np.zeros( int( ( FD.end - FD.beg)/1 ) ), np.zeros( int( ( FD.end - FD.beg)/1 ) ) - n =0 - for i in tqdm(range( FD.beg, FD.end, 1 ), desc= 'Get mass center of one ROI of each frame' ): + + m = roi_mask == roi_ind + cx, cy = np.zeros(int((FD.end - FD.beg) / 1)), np.zeros(int((FD.end - FD.beg) / 1)) + n = 0 + for i in tqdm(range(FD.beg, FD.end, 1), desc="Get mass center of one ROI of each frame"): img = FD.rdframe(i) * m c = scipy.ndimage.measurements.center_of_mass(img) cx[n], cy[n] = int(c[0]), int(c[1]) - n +=1 - return cx,cy - - + n += 1 + return cx, cy def get_current_pipeline_filename(NOTEBOOK_FULL_PATH): - '''Y.G. April 25, 2017 - Get the current running pipeline filename and path - Assume the piple is located in /XF11ID/ - Return, path and filename - ''' + """Y.G. April 25, 2017 + Get the current running pipeline filename and path + Assume the piple is located in /XF11ID/ + Return, path and filename + """ from IPython.core.magics.display import Javascript + if False: - Javascript( ''' + Javascript( + """ var nb = IPython.notebook; var kernel = IPython.notebook.kernel; var command = "NOTEBOOK_FULL_PATH = '" + nb.base_url + nb.notebook_path + "'"; kernel.execute(command); - ''' ) + """ + ) print(NOTEBOOK_FULL_PATH) - filename = NOTEBOOK_FULL_PATH.split('/')[-1] - path = '/XF11ID/' - for s in NOTEBOOK_FULL_PATH.split('/')[3:-1]: - path += s + '/' + filename = NOTEBOOK_FULL_PATH.split("/")[-1] + path = "/XF11ID/" + for s in NOTEBOOK_FULL_PATH.split("/")[3:-1]: + path += s + "/" return path, filename + def get_current_pipeline_fullpath(NOTEBOOK_FULL_PATH): - '''Y.G. April 25, 2017 - Get the current running pipeline full filepath - Assume the piple is located in /XF11ID/ - Return, the fullpath (path + filename) - ''' - p,f = get_current_pipeline_filename(NOTEBOOK_FULL_PATH) + """Y.G. April 25, 2017 + Get the current running pipeline full filepath + Assume the piple is located in /XF11ID/ + Return, the fullpath (path + filename) + """ + p, f = get_current_pipeline_filename(NOTEBOOK_FULL_PATH) return p + f + def save_current_pipeline(NOTEBOOK_FULL_PATH, outDir): - '''Y.G. April 25, 2017 - Save the current running pipeline to outDir - The save pipeline should be the snapshot of the current state. - ''' + """Y.G. April 25, 2017 + Save the current running pipeline to outDir + The save pipeline should be the snapshot of the current state. + """ - import shutil - path, fp = get_current_pipeline_filename(NOTEBOOK_FULL_PATH) - shutil.copyfile( path + fp, outDir + fp ) + import shutil - print('This pipeline: %s is saved in %s.'%(fp, outDir)) + path, fp = get_current_pipeline_filename(NOTEBOOK_FULL_PATH) + shutil.copyfile(path + fp, outDir + fp) + print("This pipeline: %s is saved in %s." % (fp, outDir)) -def plot_g1( taus, g2, g2_fit_paras, qr=None, ylim=[0,1], title=''): - '''Dev Apr 19, 2017, - Plot one-time correlation, giving taus, g2, g2_fit''' +def plot_g1(taus, g2, g2_fit_paras, qr=None, ylim=[0, 1], title=""): + """Dev Apr 19, 2017, + Plot one-time correlation, giving taus, g2, g2_fit""" noqs = g2.shape[1] - fig,ax=plt.subplots() + fig, ax = plt.subplots() if qr == None: qr = np.arange(noqs) for i in range(noqs): - b = g2_fit_paras['baseline'][i] - beta = g2_fit_paras['beta'][i] - y= np.sqrt( np.abs(g2[1:,i] - b)/beta ) - plot1D( x = taus[1:], y= y, ax=ax, legend= 'q=%s'%qr[i], ls='-', lw=2, - m=markers[i], c= colors[i], title=title, ylim=ylim, - logx=True, legend_size= 8 ) - ax.set_ylabel( r"$g_1$" + '(' + r'$\tau$' + ')' ) + b = g2_fit_paras["baseline"][i] + beta = g2_fit_paras["beta"][i] + y = np.sqrt(np.abs(g2[1:, i] - b) / beta) + plot1D( + x=taus[1:], + y=y, + ax=ax, + legend="q=%s" % qr[i], + ls="-", + lw=2, + m=markers[i], + c=colors[i], + title=title, + ylim=ylim, + logx=True, + legend_size=8, + ) + ax.set_ylabel(r"$g_1$" + "(" + r"$\tau$" + ")") ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) return ax - -def filter_roi_mask( filter_dict, roi_mask, avg_img, filter_type= 'ylim' ): - '''Remove bad pixels in roi_mask. The bad pixel is defined by the filter_dict, +def filter_roi_mask(filter_dict, roi_mask, avg_img, filter_type="ylim"): + """Remove bad pixels in roi_mask. The bad pixel is defined by the filter_dict, if filter_type ='ylim', the filter_dict wit key as q and each value gives a high and low limit thresholds. The value of the pixels in avg_img above or below the limit are considered as bad pixels. if filter_type='badpix': the filter_dict wit key as q and each value gives a list of bad pixel. @@ -2114,116 +2287,107 @@ def filter_roi_mask( filter_dict, roi_mask, avg_img, filter_type= 'ylim' ): filter_dict: keys, as roi_mask integer, value, by default is [None,None], is the limit, example, {2:[4,5], 10:[0.1,1.1]} NOTE: first q = 1 (not 0) - ''' - rm = roi_mask.copy() - rf = np.ravel(rm) + """ + rm = roi_mask.copy() + rf = np.ravel(rm) for k in list(filter_dict.keys()): - pixel = roi.roi_pixel_values(avg_img, roi_mask, [k] )[0][0] - #print( np.max(pixel), np.min(pixel) ) - if filter_type == 'ylim': - xmin,xmax = filter_dict[k] - badp =np.where( (pixel>= xmax) | ( pixel <= xmin) )[0] + pixel = roi.roi_pixel_values(avg_img, roi_mask, [k])[0][0] + # print( np.max(pixel), np.min(pixel) ) + if filter_type == "ylim": + xmin, xmax = filter_dict[k] + badp = np.where((pixel >= xmax) | (pixel <= xmin))[0] else: badp = filter_dict[k] - if len(badp)!=0: - pls = np.where([rf==k])[1] - rf[ pls[badp] ] = 0 + if len(badp) != 0: + pls = np.where([rf == k])[1] + rf[pls[badp]] = 0 return rm ## -#Dev at March 31 for create Eiger chip mask -def create_chip_edges_mask( det='1M' ): - ''' Create a chip edge mask for Eiger detector - - ''' - if det == '1M': +# Dev at March 31 for create Eiger chip mask +def create_chip_edges_mask(det="1M"): + """Create a chip edge mask for Eiger detector""" + if det == "1M": shape = [1065, 1030] w = 4 - mask = np.ones( shape , dtype = np.int32) - cx = [ 1030//4 *i for i in range(1,4) ] - #cy = [ 1065//4 *i for i in range(1,4) ] - cy = [808, 257 ] - #print (cx, cy ) + mask = np.ones(shape, dtype=np.int32) + cx = [1030 // 4 * i for i in range(1, 4)] + # cy = [ 1065//4 *i for i in range(1,4) ] + cy = [808, 257] + # print (cx, cy ) for c in cx: - mask[:, c-w//2:c+w//2 ] = 0 + mask[:, c - w // 2 : c + w // 2] = 0 for c in cy: - mask[ c-w//2:c+w//2, : ] = 0 + mask[c - w // 2 : c + w // 2, :] = 0 return mask -def create_ellipse_donut( cx, cy , wx_inner, wy_inner, wx_outer, wy_outer, roi_mask, gap=0): - Nmax = np.max( np.unique( roi_mask ) ) - rr1, cc1 = ellipse( cy,cx, wy_inner, wx_inner ) - rr2, cc2 = ellipse( cy, cx, wy_inner + gap, wx_inner +gap ) - rr3, cc3 = ellipse( cy, cx, wy_outer,wx_outer ) - roi_mask[rr3,cc3] = 2 + Nmax - roi_mask[rr2,cc2] = 0 - roi_mask[rr1,cc1] = 1 + Nmax - return roi_mask -def create_box( cx, cy, wx, wy, roi_mask): - Nmax = np.max( np.unique( roi_mask ) ) - for i, [cx_,cy_] in enumerate(list( zip( cx,cy ))): #create boxes - x = np.array( [ cx_-wx, cx_+wx, cx_+wx, cx_-wx]) - y = np.array( [ cy_-wy, cy_-wy, cy_+wy, cy_+wy]) - rr, cc = polygon( y,x) - roi_mask[rr,cc] = i +1 + Nmax +def create_ellipse_donut(cx, cy, wx_inner, wy_inner, wx_outer, wy_outer, roi_mask, gap=0): + Nmax = np.max(np.unique(roi_mask)) + rr1, cc1 = ellipse(cy, cx, wy_inner, wx_inner) + rr2, cc2 = ellipse(cy, cx, wy_inner + gap, wx_inner + gap) + rr3, cc3 = ellipse(cy, cx, wy_outer, wx_outer) + roi_mask[rr3, cc3] = 2 + Nmax + roi_mask[rr2, cc2] = 0 + roi_mask[rr1, cc1] = 1 + Nmax return roi_mask +def create_box(cx, cy, wx, wy, roi_mask): + Nmax = np.max(np.unique(roi_mask)) + for i, [cx_, cy_] in enumerate(list(zip(cx, cy))): # create boxes + x = np.array([cx_ - wx, cx_ + wx, cx_ + wx, cx_ - wx]) + y = np.array([cy_ - wy, cy_ - wy, cy_ + wy, cy_ + wy]) + rr, cc = polygon(y, x) + roi_mask[rr, cc] = i + 1 + Nmax + return roi_mask -def create_folder( base_folder, sub_folder ): - ''' +def create_folder(base_folder, sub_folder): + """ Crate a subfolder under base folder Input: base_folder: full path of the base folder sub_folder: sub folder name to be created Return: Created full path of the created folder - ''' + """ - data_dir0 = os.path.join( base_folder, sub_folder ) + data_dir0 = os.path.join(base_folder, sub_folder) ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' os.makedirs(data_dir0, exist_ok=True) - print('Results from this analysis will be stashed in the directory %s' % data_dir0) + print("Results from this analysis will be stashed in the directory %s" % data_dir0) return data_dir0 - - - -def create_user_folder( CYCLE, username=None, default_dir= '/XF11ID/analysis/' ): - ''' +def create_user_folder(CYCLE, username=None, default_dir="/XF11ID/analysis/"): + """ Crate a folder for saving user data analysis result Input: CYCLE: run cycle username: if None, get username from the jupyter username Return: Created folder name - ''' - if username !='Default': + """ + if username != "Default": if username == None: username = getpass.getuser() - data_dir0 = os.path.join(default_dir, CYCLE, username, 'Results/') + data_dir0 = os.path.join(default_dir, CYCLE, username, "Results/") else: - data_dir0 = os.path.join(default_dir, CYCLE +'/') + data_dir0 = os.path.join(default_dir, CYCLE + "/") ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' os.makedirs(data_dir0, exist_ok=True) - print('Results from this analysis will be stashed in the directory %s' % data_dir0) + print("Results from this analysis will be stashed in the directory %s" % data_dir0) return data_dir0 - - - - ################################## #########For dose analysis ####### ################################## -def get_fra_num_by_dose( exp_dose, exp_time, att=1, dead_time =2 ): - ''' +def get_fra_num_by_dose(exp_dose, exp_time, att=1, dead_time=2): + """ Calculate the frame number to be correlated by giving a X-ray exposure dose Paramters: @@ -2238,12 +2402,12 @@ def get_fra_num_by_dose( exp_dose, exp_time, att=1, dead_time =2 ): exp_time = 1.34, dead_time = 2) --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) - ''' - return np.int_( np.array( exp_dose )/( exp_time + dead_time)/ att ) + """ + return np.int_(np.array(exp_dose) / (exp_time + dead_time) / att) -def get_multi_tau_lag_steps( fra_max, num_bufs = 8 ): - ''' +def get_multi_tau_lag_steps(fra_max, num_bufs=8): + """ Get taus in log steps ( a multi-taus defined taus ) for a time series with max frame number as fra_max Parameters: fra_max: integer, the maximun frame number @@ -2254,16 +2418,14 @@ def get_multi_tau_lag_steps( fra_max, num_bufs = 8 ): e.g., get_multi_tau_lag_steps( 20, 8 ) --> array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]) - ''' - num_levels = int(np.log( fra_max/(num_bufs-1))/np.log(2) +1) +1 + """ + num_levels = int(np.log(fra_max / (num_bufs - 1)) / np.log(2) + 1) + 1 tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) return lag_steps[lag_steps < fra_max] - -def get_series_g2_taus( fra_max_list, acq_time=1, max_fra_num=None, log_taus = True, - num_bufs = 8): - ''' +def get_series_g2_taus(fra_max_list, acq_time=1, max_fra_num=None, log_taus=True, num_bufs=8): + """ Get taus for dose dependent analysis Parameters: fra_max_list: a list, a lsit of largest available frame number @@ -2280,30 +2442,30 @@ def get_series_g2_taus( fra_max_list, acq_time=1, max_fra_num=None, log_taus = T 40: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32]) } - ''' + """ tausd = {} for n in fra_max_list: if max_fra_num != None: L = max_fra_num else: L = np.infty - if n>L: - warnings.warn("Warning: the dose value is too large, and please" - "check the maxium dose in this data set and give a smaller dose value." - "We will use the maxium dose of the data.") + if n > L: + warnings.warn( + "Warning: the dose value is too large, and please" + "check the maxium dose in this data set and give a smaller dose value." + "We will use the maxium dose of the data." + ) n = L if log_taus: - lag_steps = get_multi_tau_lag_steps(n, num_bufs) + lag_steps = get_multi_tau_lag_steps(n, num_bufs) else: - lag_steps = np.arange( n ) + lag_steps = np.arange(n) tausd[n] = lag_steps * acq_time return tausd - - -def check_lost_metadata(md, Nimg=None, inc_x0 =None, inc_y0= None, pixelsize=7.5*10*(-5) ): - '''Y.G. Dec 31, 2016, check lost metadata +def check_lost_metadata(md, Nimg=None, inc_x0=None, inc_y0=None, pixelsize=7.5 * 10 * (-5)): + """Y.G. Dec 31, 2016, check lost metadata Parameter: md: dict, meta data dictionay @@ -2317,56 +2479,55 @@ def check_lost_metadata(md, Nimg=None, inc_x0 =None, inc_y0= None, pixelsize=7.5 timeperframe: acquisition time is sec center: list, [x,y], incident beam center in pixel Will also update md - ''' + """ mdn = md.copy() - if 'number of images' not in list(md.keys()): - md['number of images'] = Nimg - if 'x_pixel_size' not in list(md.keys()): - md['x_pixel_size'] = 7.5000004e-05 - dpix = md['x_pixel_size'] * 1000. #in mm, eiger 4m is 0.075 mm + if "number of images" not in list(md.keys()): + md["number of images"] = Nimg + if "x_pixel_size" not in list(md.keys()): + md["x_pixel_size"] = 7.5000004e-05 + dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm try: - lambda_ =md['wavelength'] + lambda_ = md["wavelength"] except: - lambda_ =md['incident_wavelength'] # wavelegth of the X-rays in Angstroms + lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms try: - Ldet = md['det_distance'] - if Ldet<=1000: - Ldet *=1000 - md['det_distance'] = Ldet + Ldet = md["det_distance"] + if Ldet <= 1000: + Ldet *= 1000 + md["det_distance"] = Ldet except: - Ldet = md['detector_distance'] - if Ldet<=1000: - Ldet *=1000 - md['detector_distance'] = Ldet + Ldet = md["detector_distance"] + if Ldet <= 1000: + Ldet *= 1000 + md["detector_distance"] = Ldet - - try:#try exp time from detector - exposuretime= md['count_time'] #exposure time in sec + try: # try exp time from detector + exposuretime = md["count_time"] # exposure time in sec except: - exposuretime= md['cam_acquire_time'] #exposure time in sec - try:#try acq time from detector - acquisition_period = md['frame_time'] + exposuretime = md["cam_acquire_time"] # exposure time in sec + try: # try acq time from detector + acquisition_period = md["frame_time"] except: try: - acquisition_period = md['acquire period'] + acquisition_period = md["acquire period"] except: - uid = md['uid'] - acquisition_period = float( db[uid]['start']['acquire period'] ) + uid = md["uid"] + acquisition_period = float(db[uid]["start"]["acquire period"]) timeperframe = acquisition_period if inc_x0 != None: - mdn['beam_center_x']= inc_y0 - print( 'Beam_center_x has been changed to %s. (no change in raw metadata): '%inc_y0) + mdn["beam_center_x"] = inc_y0 + print("Beam_center_x has been changed to %s. (no change in raw metadata): " % inc_y0) if inc_y0 != None: - mdn['beam_center_y']= inc_x0 - print( 'Beam_center_y has been changed to %s. (no change in raw metadata): '%inc_x0) - center = [ int(mdn['beam_center_x']),int( mdn['beam_center_y'] ) ] #beam center [y,x] for python image - center=[center[1], center[0]] + mdn["beam_center_y"] = inc_x0 + print("Beam_center_y has been changed to %s. (no change in raw metadata): " % inc_x0) + center = [int(mdn["beam_center_x"]), int(mdn["beam_center_y"])] # beam center [y,x] for python image + center = [center[1], center[0]] return dpix, lambda_, Ldet, exposuretime, timeperframe, center -def combine_images( filenames, outputfile, outsize=(2000, 2400)): - '''Y.G. Dec 31, 2016 +def combine_images(filenames, outputfile, outsize=(2000, 2400)): + """Y.G. Dec 31, 2016 Combine images together to one image using PIL.Image Input: filenames: list, the images names to be combined @@ -2374,45 +2535,44 @@ def combine_images( filenames, outputfile, outsize=(2000, 2400)): outsize: the combined image size Output: save a combined image file - ''' - N = len( filenames) - #nx = np.int( np.ceil( np.sqrt(N)) ) - #ny = np.int( np.ceil( N / float(nx) ) ) - - ny = int( np.ceil( np.sqrt(N)) ) - nx = int( np.ceil( N / float(ny) ) ) - - #print(nx,ny) - result = Image.new("RGB", outsize, color=(255,255,255,0)) - basewidth = int( outsize[0]/nx ) - hsize = int( outsize[1]/ny ) + """ + N = len(filenames) + # nx = np.int( np.ceil( np.sqrt(N)) ) + # ny = np.int( np.ceil( N / float(nx) ) ) + + ny = int(np.ceil(np.sqrt(N))) + nx = int(np.ceil(N / float(ny))) + + # print(nx,ny) + result = Image.new("RGB", outsize, color=(255, 255, 255, 0)) + basewidth = int(outsize[0] / nx) + hsize = int(outsize[1] / ny) for index, file in enumerate(filenames): path = os.path.expanduser(file) img = Image.open(path) bands = img.split() - ratio = img.size[1]/ img.size[0] #h/w + ratio = img.size[1] / img.size[0] # h/w if hsize > basewidth * ratio: basewidth_ = basewidth - hsize_ = int( basewidth * ratio ) + hsize_ = int(basewidth * ratio) else: - basewidth_ = int( hsize/ratio ) - hsize_ = hsize - #print( index, file, basewidth, hsize ) - size = (basewidth_,hsize_) + basewidth_ = int(hsize / ratio) + hsize_ = hsize + # print( index, file, basewidth, hsize ) + size = (basewidth_, hsize_) bands = [b.resize(size, Image.Resampling.BILINEAR) for b in bands] - img = Image.merge('RGBA', bands) + img = Image.merge("RGBA", bands) x = index % nx * basewidth y = index // nx * hsize w, h = img.size - #print('pos {0},{1} size {2},{3}'.format(x, y, w, h)) - result.paste(img, (x, y, x + w, y + h )) - result.save( outputfile,quality=100, optimize=True ) - print( 'The combined image is saved as: %s'%outputfile) + # print('pos {0},{1} size {2},{3}'.format(x, y, w, h)) + result.paste(img, (x, y, x + w, y + h)) + result.save(outputfile, quality=100, optimize=True) + print("The combined image is saved as: %s" % outputfile) -def get_qval_dict( qr_center, qz_center=None, qval_dict = None, multi_qr_for_one_qz= True, - one_qz_multi_qr = True): - '''Y.G. Dec 27, 2016 +def get_qval_dict(qr_center, qz_center=None, qval_dict=None, multi_qr_for_one_qz=True, one_qz_multi_qr=True): + """Y.G. Dec 27, 2016 Map the roi label array with qr or (qr,qz) or (q//, q|-) values Parameters: qr_center: list, a list of qr @@ -2427,314 +2587,386 @@ def get_qval_dict( qr_center, qz_center=None, qval_dict = None, multi_qr_for_on Return: qval_dict, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) - ''' + """ if qval_dict == None: qval_dict = {} maxN = 0 else: - maxN = np.max( list( qval_dict.keys() ) ) +1 + maxN = np.max(list(qval_dict.keys())) + 1 if qz_center != None: if multi_qr_for_one_qz: if one_qz_multi_qr: - for qzind in range( len( qz_center)): - for qrind in range( len( qr_center)): - qval_dict[ maxN + qzind* len( qr_center) + qrind ] = np.array( [qr_center[qrind], qz_center[qzind] ] ) + for qzind in range(len(qz_center)): + for qrind in range(len(qr_center)): + qval_dict[maxN + qzind * len(qr_center) + qrind] = np.array( + [qr_center[qrind], qz_center[qzind]] + ) else: - for qrind in range( len( qr_center)): - for qzind in range( len( qz_center)): - qval_dict[ maxN + qrind* len( qz_center) + qzind ] = np.array( [qr_center[qrind], qz_center[qzind] ] ) - + for qrind in range(len(qr_center)): + for qzind in range(len(qz_center)): + qval_dict[maxN + qrind * len(qz_center) + qzind] = np.array( + [qr_center[qrind], qz_center[qzind]] + ) else: - for i, [qr, qz] in enumerate(zip( qr_center, qz_center)): - qval_dict[ maxN + i ] = np.array( [ qr, qz ] ) + for i, [qr, qz] in enumerate(zip(qr_center, qz_center)): + qval_dict[maxN + i] = np.array([qr, qz]) else: - for qrind in range( len( qr_center)): - qval_dict[ maxN + qrind ] = np.array( [ qr_center[qrind] ] ) + for qrind in range(len(qr_center)): + qval_dict[maxN + qrind] = np.array([qr_center[qrind]]) return qval_dict -def update_qval_dict( qval_dict1, qval_dict2 ): - ''' Y.G. Dec 31, 2016 +def update_qval_dict(qval_dict1, qval_dict2): + """Y.G. Dec 31, 2016 Update qval_dict1 with qval_dict2 Input: qval_dict1, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) qval_dict2, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) Output: qval_dict, a dict, with the same key as dict1, and all key in dict2 but which key plus max(dict1.keys()) - ''' - maxN = np.max( list( qval_dict1.keys() ) ) +1 + """ + maxN = np.max(list(qval_dict1.keys())) + 1 qval_dict = {} - qval_dict.update( qval_dict1 ) - for k in list( qval_dict2.keys() ): - qval_dict[k + maxN ] = qval_dict2[k] + qval_dict.update(qval_dict1) + for k in list(qval_dict2.keys()): + qval_dict[k + maxN] = qval_dict2[k] return qval_dict -def update_roi_mask( roi_mask1, roi_mask2 ): - ''' Y.G. Dec 31, 2016 + +def update_roi_mask(roi_mask1, roi_mask2): + """Y.G. Dec 31, 2016 Update qval_dict1 with qval_dict2 Input: roi_mask1, 2d-array, label array, same shape as xpcs frame, roi_mask2, 2d-array, label array, same shape as xpcs frame, Output: roi_mask, 2d-array, label array, same shape as xpcs frame, update roi_mask1 with roi_mask2 - ''' + """ roi_mask = roi_mask1.copy() - w= np.where( roi_mask2 ) - roi_mask[w] = roi_mask2[w] + np.max( roi_mask ) + w = np.where(roi_mask2) + roi_mask[w] = roi_mask2[w] + np.max(roi_mask) return roi_mask -def check_bad_uids(uids, mask, img_choice_N = 10, bad_uids_index = None ): - '''Y.G. Dec 22, 2016 - Find bad uids by checking the average intensity by a selection of the number img_choice_N of frames for the uid. If the average intensity is zeros, the uid will be considered as bad uid. - Parameters: - uids: list, a list of uid - mask: array, bool type numpy.array - img_choice_N: random select number of the uid - bad_uids_index: a list of known bad uid list, default is None - Return: - guids: list, good uids - buids, list, bad uids - ''' +def check_bad_uids(uids, mask, img_choice_N=10, bad_uids_index=None): + """Y.G. Dec 22, 2016 + Find bad uids by checking the average intensity by a selection of the number img_choice_N of frames for the uid. If the average intensity is zeros, the uid will be considered as bad uid. + Parameters: + uids: list, a list of uid + mask: array, bool type numpy.array + img_choice_N: random select number of the uid + bad_uids_index: a list of known bad uid list, default is None + Return: + guids: list, good uids + buids, list, bad uids + """ import random + buids = [] - guids = list( uids ) - #print( guids ) + guids = list(uids) + # print( guids ) if bad_uids_index == None: bad_uids_index = [] for i, uid in enumerate(uids): - #print( i, uid ) + # print( i, uid ) if i not in bad_uids_index: - detector = get_detector( db[uid ] ) - imgs = load_data( uid, detector ) - img_samp_index = random.sample( range(len(imgs)), img_choice_N) - imgsa = apply_mask( imgs, mask ) - avg_img = get_avg_img( imgsa, img_samp_index, plot_ = False, uid =uid) + detector = get_detector(db[uid]) + imgs = load_data(uid, detector) + img_samp_index = random.sample(range(len(imgs)), img_choice_N) + imgsa = apply_mask(imgs, mask) + avg_img = get_avg_img(imgsa, img_samp_index, plot_=False, uid=uid) if avg_img.max() == 0: - buids.append( uid ) - guids.pop( list( np.where( np.array(guids) == uid)[0] )[0] ) - print( 'The bad uid is: %s'%uid ) + buids.append(uid) + guids.pop(list(np.where(np.array(guids) == uid)[0])[0]) + print("The bad uid is: %s" % uid) else: - guids.pop( list( np.where( np.array(guids) == uid)[0] )[0] ) - buids.append( uid ) - print( 'The bad uid is: %s'%uid ) - print( 'The total and bad uids number are %s and %s, repsectively.'%( len(uids), len(buids) ) ) + guids.pop(list(np.where(np.array(guids) == uid)[0])[0]) + buids.append(uid) + print("The bad uid is: %s" % uid) + print("The total and bad uids number are %s and %s, repsectively." % (len(uids), len(buids))) return guids, buids +def find_uids(start_time, stop_time): + """Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end time + Return: + sids: list, scan id + uids: list, uid with 8 character length + fuids: list, uid with full length -def find_uids(start_time, stop_time ): - '''Y.G. Dec 22, 2016 - A wrap funciton to find uids by giving start and end time - Return: - sids: list, scan id - uids: list, uid with 8 character length - fuids: list, uid with full length - - ''' - hdrs = db(start_time= start_time, stop_time = stop_time) + """ + hdrs = db(start_time=start_time, stop_time=stop_time) try: - print ('Totally %s uids are found.'%(len(list(hdrs)))) + print("Totally %s uids are found." % (len(list(hdrs)))) except: pass - sids=[] - uids=[] - fuids=[] + sids = [] + uids = [] + fuids = [] for hdr in hdrs: - s= get_sid_filenames( hdr) - #print (s[1][:8]) - sids.append( s[0] ) - uids.append( s[1][:8] ) - fuids.append( s[1] ) - sids=sids[::-1] - uids=uids[::-1] - fuids=fuids[::-1] + s = get_sid_filenames(hdr) + # print (s[1][:8]) + sids.append(s[0]) + uids.append(s[1][:8]) + fuids.append(s[1]) + sids = sids[::-1] + uids = uids[::-1] + fuids = fuids[::-1] return np.array(sids), np.array(uids), np.array(fuids) -def ployfit( y, x=None, order = 20 ): - ''' +def ployfit(y, x=None, order=20): + """ fit data (one-d array) by a ploynominal function return the fitted one-d array - ''' + """ if x == None: x = range(len(y)) pol = np.polyfit(x, y, order) return np.polyval(pol, x) -def check_bad_data_points( data, fit=True, polyfit_order = 30, legend_size = 12, - plot=True, scale=1.0, good_start=None, good_end=None, path=None, return_ylim=False ): - ''' + +def check_bad_data_points( + data, + fit=True, + polyfit_order=30, + legend_size=12, + plot=True, + scale=1.0, + good_start=None, + good_end=None, + path=None, + return_ylim=False, +): + """ data: 1D array scale: the scale of deviation fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve - ''' - if good_start == None: - good_start=0 + """ + if good_start == None: + good_start = 0 if good_end == None: - good_end = len( data ) + good_end = len(data) bd1 = [i for i in range(0, good_start)] - bd3 = [i for i in range(good_end,len( data ) )] + bd3 = [i for i in range(good_end, len(data))] d_ = data[good_start:good_end] if fit: - pfit = ployfit( d_, order = polyfit_order) + pfit = ployfit(d_, order=polyfit_order) d = d_ - pfit else: d = d_ pfit = np.ones_like(d) * data.mean() - ymin = d.mean()-scale *d.std() - ymax = d.mean()+scale *d.std() + ymin = d.mean() - scale * d.std() + ymax = d.mean() + scale * d.std() if plot: - fig = plt.figure( ) - ax = fig.add_subplot(2,1,1 ) - plot1D( d_, ax = ax, color='k', legend='data',legend_size=legend_size ) - plot1D( pfit,ax=ax, color='b', legend='ploy-fit', title='Find Bad Points',legend_size=legend_size ) - - ax2 = fig.add_subplot(2,1,2 ) - plot1D( d, ax = ax2,legend='difference',marker='s', color='b', ) - - #print('here') - plot1D(x=[0,len(d_)], y=[ymin,ymin], ax = ax2, ls='--',lw= 3, marker='o', color='r', legend='low_thresh', legend_size=legend_size ) - - plot1D(x=[0,len(d_)], y=[ymax,ymax], ax = ax2 , ls='--', lw= 3,marker='o', color='r',legend='high_thresh',title='',legend_size=legend_size ) + fig = plt.figure() + ax = fig.add_subplot(2, 1, 1) + plot1D(d_, ax=ax, color="k", legend="data", legend_size=legend_size) + plot1D(pfit, ax=ax, color="b", legend="ploy-fit", title="Find Bad Points", legend_size=legend_size) + + ax2 = fig.add_subplot(2, 1, 2) + plot1D( + d, + ax=ax2, + legend="difference", + marker="s", + color="b", + ) + + # print('here') + plot1D( + x=[0, len(d_)], + y=[ymin, ymin], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="low_thresh", + legend_size=legend_size, + ) + + plot1D( + x=[0, len(d_)], + y=[ymax, ymax], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="high_thresh", + title="", + legend_size=legend_size, + ) if path != None: - fp = path + '%s'%( uid ) + '_find_bad_points' + '.png' - plt.savefig( fp, dpi=fig.dpi) - bd2= list( np.where( np.abs(d -d.mean()) > scale *d.std() )[0] + good_start ) + fp = path + "%s" % (uid) + "_find_bad_points" + ".png" + plt.savefig(fp, dpi=fig.dpi) + bd2 = list(np.where(np.abs(d - d.mean()) > scale * d.std())[0] + good_start) if return_ylim: - return np.array( bd1 + bd2 + bd3 ), ymin, ymax,pfit + return np.array(bd1 + bd2 + bd3), ymin, ymax, pfit else: - return np.array( bd1 + bd2 + bd3 ), pfit - - - - -def get_bad_frame_list( imgsum, fit=True, polyfit_order = 30,legend_size = 12, - plot=True, scale=1.0, good_start=None, good_end=None, uid='uid',path=None, - - return_ylim=False): - ''' + return np.array(bd1 + bd2 + bd3), pfit + + +def get_bad_frame_list( + imgsum, + fit=True, + polyfit_order=30, + legend_size=12, + plot=True, + scale=1.0, + good_start=None, + good_end=None, + uid="uid", + path=None, + return_ylim=False, +): + """ imgsum: the sum intensity of a time series scale: the scale of deviation fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve - ''' - if good_start == None: - good_start=0 + """ + if good_start == None: + good_start = 0 if good_end == None: - good_end = len( imgsum ) + good_end = len(imgsum) bd1 = [i for i in range(0, good_start)] - bd3 = [i for i in range(good_end,len( imgsum ) )] + bd3 = [i for i in range(good_end, len(imgsum))] imgsum_ = imgsum[good_start:good_end] if fit: - pfit = ployfit( imgsum_, order = polyfit_order) + pfit = ployfit(imgsum_, order=polyfit_order) data = imgsum_ - pfit else: data = imgsum_ pfit = np.ones_like(data) * data.mean() - ymin = data.mean()-scale *data.std() - ymax = data.mean()+scale *data.std() + ymin = data.mean() - scale * data.std() + ymax = data.mean() + scale * data.std() if plot: - fig = plt.figure( ) - ax = fig.add_subplot(2,1,1 ) - plot1D( imgsum_, ax = ax, color='k', legend='data',legend_size=legend_size ) - plot1D( pfit,ax=ax, color='b', legend='ploy-fit', title=uid + '_imgsum',legend_size=legend_size ) - - ax2 = fig.add_subplot(2,1,2 ) - plot1D( data, ax = ax2,legend='difference',marker='s', color='b', ) - - #print('here') - plot1D(x=[0,len(imgsum_)], y=[ymin,ymin], ax = ax2, ls='--',lw= 3, marker='o', color='r', legend='low_thresh', legend_size=legend_size ) - - plot1D(x=[0,len(imgsum_)], y=[ymax,ymax], ax = ax2 , ls='--', lw= 3,marker='o', color='r',legend='high_thresh',title='imgsum_to_find_bad_frame',legend_size=legend_size ) + fig = plt.figure() + ax = fig.add_subplot(2, 1, 1) + plot1D(imgsum_, ax=ax, color="k", legend="data", legend_size=legend_size) + plot1D(pfit, ax=ax, color="b", legend="ploy-fit", title=uid + "_imgsum", legend_size=legend_size) + + ax2 = fig.add_subplot(2, 1, 2) + plot1D( + data, + ax=ax2, + legend="difference", + marker="s", + color="b", + ) + + # print('here') + plot1D( + x=[0, len(imgsum_)], + y=[ymin, ymin], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="low_thresh", + legend_size=legend_size, + ) + + plot1D( + x=[0, len(imgsum_)], + y=[ymax, ymax], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="high_thresh", + title="imgsum_to_find_bad_frame", + legend_size=legend_size, + ) if path != None: - fp = path + '%s'%( uid ) + '_imgsum_analysis' + '.png' - plt.savefig( fp, dpi=fig.dpi) - + fp = path + "%s" % (uid) + "_imgsum_analysis" + ".png" + plt.savefig(fp, dpi=fig.dpi) - - bd2= list( np.where( np.abs(data -data.mean()) > scale *data.std() )[0] + good_start ) + bd2 = list(np.where(np.abs(data - data.mean()) > scale * data.std())[0] + good_start) if return_ylim: - return np.array( bd1 + bd2 + bd3 ), ymin, ymax + return np.array(bd1 + bd2 + bd3), ymin, ymax else: - return np.array( bd1 + bd2 + bd3 ) + return np.array(bd1 + bd2 + bd3) + -def save_dict_csv( mydict, filename, mode='w'): +def save_dict_csv(mydict, filename, mode="w"): import csv + with open(filename, mode) as csv_file: spamwriter = csv.writer(csv_file) for key, value in mydict.items(): spamwriter.writerow([key, value]) - -def read_dict_csv( filename ): +def read_dict_csv(filename): import csv - with open(filename, 'r') as csv_file: + + with open(filename, "r") as csv_file: reader = csv.reader(csv_file) mydict = dict(reader) return mydict -def find_bad_pixels( FD, bad_frame_list, uid='uid'): +def find_bad_pixels(FD, bad_frame_list, uid="uid"): bpx = [] - bpy=[] + bpy = [] for n in bad_frame_list: - if n>= FD.beg and n<=FD.end: + if n >= FD.beg and n <= FD.end: f = FD.rdframe(n) - w = np.where( f == f.max()) - if len(w[0])==1: - bpx.append( w[0][0] ) - bpy.append( w[1][0] ) - - - return trans_data_to_pd( [bpx,bpy], label=[ uid+'_x', uid +'_y' ], dtype='list') - + w = np.where(f == f.max()) + if len(w[0]) == 1: + bpx.append(w[0][0]) + bpy.append(w[1][0]) + return trans_data_to_pd([bpx, bpy], label=[uid + "_x", uid + "_y"], dtype="list") +def mask_exclude_badpixel(bp, mask, uid): -def mask_exclude_badpixel( bp, mask, uid ): - - for i in range( len(bp)): - mask[ int( bp[bp.columns[0]][i] ), int( bp[bp.columns[1]][i] )]=0 + for i in range(len(bp)): + mask[int(bp[bp.columns[0]][i]), int(bp[bp.columns[1]][i])] = 0 return mask - -def print_dict( dicts, keys=None): - ''' +def print_dict(dicts, keys=None): + """ print keys: values in a dicts if keys is None: print all the keys - ''' + """ if keys == None: - keys = list( dicts.keys()) + keys = list(dicts.keys()) for k in keys: try: - print('%s--> %s'%(k, dicts[k]) ) + print("%s--> %s" % (k, dicts[k])) except: pass -def get_meta_data( uid, default_dec = 'eiger', *argv,**kwargs ): - ''' + +def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): + """ Jan 25, 2018 add default_dec opt Y.G. Dev Dec 8, 2016 @@ -2756,65 +2988,68 @@ def get_meta_data( uid, default_dec = 'eiger', *argv,**kwargs ): filename: the full path of the data start_time: the data acquisition starting time in a human readable manner And all the input metadata - ''' + """ - if 'verbose' in kwargs.keys(): # added: option to suppress output - verbose= kwargs['verbose'] + if "verbose" in kwargs.keys(): # added: option to suppress output + verbose = kwargs["verbose"] else: - verbose=True + verbose = True import time + header = db[uid] - md ={} + md = {} - md['suid'] = uid #short uid + md["suid"] = uid # short uid try: - md['filename'] = get_sid_filenames(header)[2][0] + md["filename"] = get_sid_filenames(header)[2][0] except: - md['filename'] = 'N.A.' + md["filename"] = "N.A." - devices = sorted( list(header.devices()) ) + devices = sorted(list(header.devices())) if len(devices) > 1: if verbose: # added: mute output - print( "More than one device. This would have unintented consequences.Currently, only the device contains 'default_dec=%s'."%default_dec) - #raise ValueError("More than one device. This would have unintented consequences.") + print( + "More than one device. This would have unintented consequences.Currently, only the device contains 'default_dec=%s'." + % default_dec + ) + # raise ValueError("More than one device. This would have unintented consequences.") dec = devices[0] for dec_ in devices: if default_dec in dec_: dec = dec_ - #print(dec) - #detector_names = sorted( header.start['detectors'] ) - detector_names = sorted( get_detectors(db[uid]) ) - #if len(detector_names) > 1: + # print(dec) + # detector_names = sorted( header.start['detectors'] ) + detector_names = sorted(get_detectors(db[uid])) + # if len(detector_names) > 1: # raise ValueError("More than one det. This would have unintented consequences.") detector_name = detector_names[0] - #md['detector'] = detector_name - md['detector'] = get_detector( header ) - #print( md['detector'] ) - new_dict = header.config_data(dec)['primary'][0] + # md['detector'] = detector_name + md["detector"] = get_detector(header) + # print( md['detector'] ) + new_dict = header.config_data(dec)["primary"][0] for key, val in new_dict.items(): - newkey = key.replace(detector_name+"_", "") + newkey = key.replace(detector_name + "_", "") md[newkey] = val # for k,v in ev['descriptor']['configuration'][dec]['data'].items(): # md[ k[len(dec)+1:] ]= v try: - md.update(header.start['plan_args'].items()) - md.pop('plan_args') + md.update(header.start["plan_args"].items()) + md.pop("plan_args") except: pass md.update(header.start.items()) - # print(header.start.time) - md['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(header.start['time'])) - md['stop_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime( header.stop['time'])) - try: # added: try to handle runs that don't contain image data + md["start_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(header.start["time"])) + md["stop_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(header.stop["time"])) + try: # added: try to handle runs that don't contain image data if "primary" in header.v2: descriptor = header.v2["primary"].descriptors[0] - md['img_shape'] = descriptor['data_keys'][md['detector']]['shape'][:2][::-1] + md["img_shape"] = descriptor["data_keys"][md["detector"]]["shape"][:2][::-1] except: if verbose: print("couldn't find image shape...skip!") @@ -2822,15 +3057,14 @@ def get_meta_data( uid, default_dec = 'eiger', *argv,**kwargs ): pass md.update(kwargs) - #for k, v in sorted(md.items()): - # ... + # for k, v in sorted(md.items()): + # ... # print(f'{k}: {v}') return md - -def get_max_countc(FD, labeled_array ): +def get_max_countc(FD, labeled_array): """YG. 2016, Nov 18 Compute the max intensity of ROIs in the compressed file (FD) @@ -2853,27 +3087,29 @@ def get_max_countc(FD, labeled_array ): The labels for each element of the `mean_intensity` list """ - qind, pixelist = roi.extract_label_indices( labeled_array ) - timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) - timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + qind, pixelist = roi.extract_label_indices(labeled_array) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) - if labeled_array.shape != ( FD.md['ncols'],FD.md['nrows']): + if labeled_array.shape != (FD.md["ncols"], FD.md["nrows"]): raise ValueError( - " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( FD.md['ncols'],FD.md['nrows'], labeled_array.shape[0], labeled_array.shape[1]) ) + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" + % (FD.md["ncols"], FD.md["nrows"], labeled_array.shape[0], labeled_array.shape[1]) + ) - max_inten =0 - for i in tqdm(range( FD.beg, FD.end, 1 ), desc= 'Get max intensity of ROIs in all frames' ): + max_inten = 0 + for i in tqdm(range(FD.beg, FD.end, 1), desc="Get max intensity of ROIs in all frames"): try: - (p,v) = FD.rdrawframe(i) - w = np.where( timg[p] )[0] - max_inten = max( max_inten, np.max(v[w]) ) + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + max_inten = max(max_inten, np.max(v[w])) except: pass return max_inten -def create_polygon_mask( image, xcorners, ycorners ): - ''' +def create_polygon_mask(image, xcorners, ycorners): + """ Give image and x/y coners to create a polygon mask image: 2d array xcorners, list, points of x coners @@ -2884,18 +3120,19 @@ def create_polygon_mask( image, xcorners, ycorners ): Example: - ''' - from skimage.draw import line_aa, line, polygon, disk + """ + from skimage.draw import disk, line, line_aa, polygon + imy, imx = image.shape - bst_mask = np.zeros_like( image , dtype = bool) - rr, cc = polygon( ycorners,xcorners,shape = image.shape) - bst_mask[rr,cc] =1 - #full_mask= ~bst_mask + bst_mask = np.zeros_like(image, dtype=bool) + rr, cc = polygon(ycorners, xcorners, shape=image.shape) + bst_mask[rr, cc] = 1 + # full_mask= ~bst_mask return bst_mask -def create_rectangle_mask( image, xcorners, ycorners ): - ''' +def create_rectangle_mask(image, xcorners, ycorners): + """ Give image and x/y coners to create a rectangle mask image: 2d array xcorners, list, points of x coners @@ -2906,18 +3143,19 @@ def create_rectangle_mask( image, xcorners, ycorners ): Example: - ''' - from skimage.draw import line_aa, line, polygon, disk + """ + from skimage.draw import disk, line, line_aa, polygon + imy, imx = image.shape - bst_mask = np.zeros_like( image , dtype = bool) - rr, cc = polygon( ycorners,xcorners,shape = image.shape) - bst_mask[rr,cc] =1 - #full_mask= ~bst_mask + bst_mask = np.zeros_like(image, dtype=bool) + rr, cc = polygon(ycorners, xcorners, shape=image.shape) + bst_mask[rr, cc] = 1 + # full_mask= ~bst_mask return bst_mask -def create_multi_rotated_rectangle_mask( image, center=None, length=100, width=50, angles=[0] ): - ''' Developed at July 10, 2017 by Y.G.@CHX, NSLS2 +def create_multi_rotated_rectangle_mask(image, center=None, length=100, width=50, angles=[0]): + """Developed at July 10, 2017 by Y.G.@CHX, NSLS2 Create multi rectangle-shaped mask by rotating a rectangle with a list of angles The original rectangle is defined by four corners, i.e., [ (center[1] - width//2, center[0]), @@ -2935,57 +3173,59 @@ def create_multi_rotated_rectangle_mask( image, center=None, length=100, width= Return: mask: 2D bool-type numpy array - ''' + """ - from skimage.draw import polygon + from skimage.draw import polygon from skimage.transform import rotate - cx,cy = center + + cx, cy = center imy, imx = image.shape - mask = np.zeros( image.shape, dtype = bool) - wy = length - wx = width - x = np.array( [ max(0, cx - wx//2), min(imx, cx+wx//2), min(imx, cx+wx//2), max(0,cx-wx//2 ) ]) - y = np.array( [ cy, cy, min( imy, cy + wy) , min(imy, cy + wy) ]) - rr, cc = polygon( y,x, shape = image.shape) - mask[rr,cc] =1 - mask_rot= np.zeros( image.shape, dtype = bool) + mask = np.zeros(image.shape, dtype=bool) + wy = length + wx = width + x = np.array([max(0, cx - wx // 2), min(imx, cx + wx // 2), min(imx, cx + wx // 2), max(0, cx - wx // 2)]) + y = np.array([cy, cy, min(imy, cy + wy), min(imy, cy + wy)]) + rr, cc = polygon(y, x, shape=image.shape) + mask[rr, cc] = 1 + mask_rot = np.zeros(image.shape, dtype=bool) for angle in angles: - mask_rot += np.array( rotate( mask, angle, center= center ), dtype=bool) #, preserve_range=True) - return ~mask_rot + mask_rot += np.array(rotate(mask, angle, center=center), dtype=bool) # , preserve_range=True) + return ~mask_rot -def create_wedge( image, center, radius, wcors, acute_angle=True) : - '''YG develop at June 18, 2017, @CHX - Create a wedge by a combination of disk and a triangle defined by center and wcors - wcors: [ [x1,x2,x3...], [y1,y2,y3..] - ''' - from skimage.draw import line_aa, line, polygon, disk +def create_wedge(image, center, radius, wcors, acute_angle=True): + """YG develop at June 18, 2017, @CHX + Create a wedge by a combination of disk and a triangle defined by center and wcors + wcors: [ [x1,x2,x3...], [y1,y2,y3..] + + """ + from skimage.draw import disk, line, line_aa, polygon + imy, imx = image.shape - cy,cx = center - x = [cx] + list(wcors[0]) - y = [cy] + list(wcors[1]) - - maskc = np.zeros_like( image , dtype = bool) - rr, cc = disk((cy, cx), radius, shape = image.shape) - maskc[rr,cc] =1 - - maskp = np.zeros_like( image , dtype = bool) - x = np.array( x ) - y = np.array( y ) - print(x,y) - rr, cc = polygon( y,x, shape = image.shape) - maskp[rr,cc] =1 + cy, cx = center + x = [cx] + list(wcors[0]) + y = [cy] + list(wcors[1]) + + maskc = np.zeros_like(image, dtype=bool) + rr, cc = disk((cy, cx), radius, shape=image.shape) + maskc[rr, cc] = 1 + + maskp = np.zeros_like(image, dtype=bool) + x = np.array(x) + y = np.array(y) + print(x, y) + rr, cc = polygon(y, x, shape=image.shape) + maskp[rr, cc] = 1 if acute_angle: - return maskc*maskp + return maskc * maskp else: - return maskc*~maskp - + return maskc * ~maskp -def create_cross_mask( image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4, - center_disk = True, center_radius=10 - ): - ''' +def create_cross_mask( + image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4, center_disk=True, center_radius=10 +): + """ Give image and the beam center to create a cross-shaped mask wy_left: the width of left h-line wy_right: the width of rigth h-line @@ -2995,69 +3235,66 @@ def create_cross_mask( image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4 Return: the cross mask - ''' - from skimage.draw import line_aa, line, polygon, disk + """ + from skimage.draw import disk, line, line_aa, polygon imy, imx = image.shape - cx,cy = center - bst_mask = np.zeros_like( image , dtype = bool) + cx, cy = center + bst_mask = np.zeros_like(image, dtype=bool) ### - #for right part + # for right part wy = wy_right - x = np.array( [ cx, imx, imx, cx ]) - y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy]) - rr, cc = polygon( y,x, shape = image.shape) - bst_mask[rr,cc] =1 + x = np.array([cx, imx, imx, cx]) + y = np.array([cy - wy, cy - wy, cy + wy, cy + wy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 ### - #for left part + # for left part wy = wy_left - x = np.array( [0, cx, cx,0 ]) - y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy]) - rr, cc = polygon( y,x, shape = image.shape) - bst_mask[rr,cc] =1 + x = np.array([0, cx, cx, 0]) + y = np.array([cy - wy, cy - wy, cy + wy, cy + wy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 ### - #for up part + # for up part wx = wx_up - x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) - y = np.array( [ cy, cy, imy, imy]) - rr, cc = polygon( y,x, shape = image.shape) - bst_mask[rr,cc] =1 + x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) + y = np.array([cy, cy, imy, imy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 ### - #for low part + # for low part wx = wx_down - x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) - y = np.array( [ 0,0, cy, cy]) - rr, cc = polygon( y,x, shape = image.shape) - bst_mask[rr,cc] =1 + x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) + y = np.array([0, 0, cy, cy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 - if center_radius!=0: - rr, cc = disk((cy, cx), center_radius, shape = bst_mask.shape) - bst_mask[rr,cc] =1 + if center_radius != 0: + rr, cc = disk((cy, cx), center_radius, shape=bst_mask.shape) + bst_mask[rr, cc] = 1 - - full_mask= ~bst_mask + full_mask = ~bst_mask return full_mask - - - -def generate_edge( centers, width): - '''YG. 10/14/2016 - give centers and width (number or list) to get edges''' - edges = np.zeros( [ len(centers),2]) - edges[:,0] = centers - width - edges[:,1] = centers + width +def generate_edge(centers, width): + """YG. 10/14/2016 + give centers and width (number or list) to get edges""" + edges = np.zeros([len(centers), 2]) + edges[:, 0] = centers - width + edges[:, 1] = centers + width return edges -def export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'], - path='/XF11ID/analysis/2016_3/commissioning/Results/' ): - '''YG. 10/17/2016 +def export_scan_scalar( + uid, x="dcm_b", y=["xray_eye1_stats1_total"], path="/XF11ID/analysis/2016_3/commissioning/Results/" +): + """YG. 10/17/2016 export uid data to a txt file uid: unique scan id x: the x-col @@ -3069,73 +3306,76 @@ def export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'], A plot for the data: d.plot(x='dcm_b', y = 'xray_eye1_stats1_total', marker='o', ls='-', color='r') - ''' + """ from databroker import DataBroker as db - from pyCHX.chx_generic_functions import trans_data_to_pd + + from pyCHX.chx_generic_functions import trans_data_to_pd hdr = db[uid] print(hdr.fields()) data = db[uid].table() xp = data[x] - datap = np.zeros( [len(xp), len(y)+1]) - datap[:,0] = xp + datap = np.zeros([len(xp), len(y) + 1]) + datap[:, 0] = xp for i, yi in enumerate(y): - datap[:,i+1] = data[yi] + datap[:, i + 1] = data[yi] - datap = trans_data_to_pd( datap, label=[x] + [yi for yi in y]) - datap.to_csv( path + 'uid=%s.csv'%uid) + datap = trans_data_to_pd(datap, label=[x] + [yi for yi in y]) + datap.to_csv(path + "uid=%s.csv" % uid) return datap - - ##### -#load data by databroker +# load data by databroker -def get_flatfield( uid, reverse=False ): + +def get_flatfield(uid, reverse=False): import h5py - detector = get_detector( db[uid ] ) + + detector = get_detector(db[uid]) sud = get_sid_filenames(db[uid]) - master_path = '%s_master.h5'%(sud[2][0]) - print( master_path) - f= h5py.File(master_path, 'r') - k= 'entry/instrument/detector/detectorSpecific/' #data_collection_date' - d= np.array( f[ k]['flatfield'] ) + master_path = "%s_master.h5" % (sud[2][0]) + print(master_path) + f = h5py.File(master_path, "r") + k = "entry/instrument/detector/detectorSpecific/" # data_collection_date' + d = np.array(f[k]["flatfield"]) f.close() if reverse: - d = reverse_updown( d ) + d = reverse_updown(d) return d - -def get_detector( header ): - '''Get the first detector image string by giving header ''' +def get_detector(header): + """Get the first detector image string by giving header""" keys = get_detectors(header) for k in keys: - if 'eiger' in k: + if "eiger" in k: return k -def get_detectors( header ): - '''Get all the detector image strings by giving header ''' + +def get_detectors(header): + """Get all the detector image strings by giving header""" if "primary" in header.v2: descriptor = header.v2["primary"].descriptors[0] - keys = [k for k, v in descriptor['data_keys'].items() if 'external' in v] + keys = [k for k, v in descriptor["data_keys"].items() if "external" in v] return sorted(set(keys)) - return [] + return [] -def get_full_data_path( uid ): - '''A dirty way to get full data path''' + +def get_full_data_path(uid): + """A dirty way to get full data path""" header = db[uid] d = header.db - s = list(d.get_documents( db[uid ])) - #print(s[2]) - p = s[2][1]['resource_path'] - p2 = s[3][1]['datum_kwargs']['seq_id'] - #print(p,p2) - return p + '_' + str(p2) + '_master.h5' + s = list(d.get_documents(db[uid])) + # print(s[2]) + p = s[2][1]["resource_path"] + p2 = s[3][1]["datum_kwargs"]["seq_id"] + # print(p,p2) + return p + "_" + str(p2) + "_master.h5" + -def get_sid_filenames(hdr,verbose=False): +def get_sid_filenames(hdr, verbose=False): """ get scan_id, uid and detector filename from databroker get_sid_filenames(hdr,verbose=False) @@ -3144,33 +3384,57 @@ def get_sid_filenames(hdr,verbose=False): LW 04/30/2024 """ import glob - from time import strftime, localtime + from time import localtime, strftime + start_doc = hdr.start stop_doc = hdr.stop success = False - - ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{start_doc['data path']}*_{start_doc['sequence id']}_master.h5")) # looking for (eiger) datafile at the path specified in metadata - if len(ret[2])==0: - if verbose: print('could not find detector filename from "data_path" in metadata: %s'%start_doc['data path']) + + ret = ( + start_doc["scan_id"], + start_doc["uid"], + glob.glob(f"{start_doc['data path']}*_{start_doc['sequence id']}_master.h5"), + ) # looking for (eiger) datafile at the path specified in metadata + if len(ret[2]) == 0: + if verbose: + print('could not find detector filename from "data_path" in metadata: %s' % start_doc["data path"]) else: - if verbose: print('Found detector filename from "data_path" in metadata!');success=True - - if not success: # looking at path in metadata, but taking the date from the run start document - data_path=start_doc['data path'][:-11]+strftime("%Y/%m/%d/",localtime(start_doc['time'])) - ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5")) - if len(ret[2])==0: - if verbose: print('could not find detector filename in %s'%data_path) + if verbose: + print('Found detector filename from "data_path" in metadata!') + success = True + + if not success: # looking at path in metadata, but taking the date from the run start document + data_path = start_doc["data path"][:-11] + strftime("%Y/%m/%d/", localtime(start_doc["time"])) + ret = ( + start_doc["scan_id"], + start_doc["uid"], + glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5"), + ) + if len(ret[2]) == 0: + if verbose: + print("could not find detector filename in %s" % data_path) else: - if verbose: print('Found detector filename in %s'%data_path);success=True - - if not success: # looking at path in metadata, but taking the date from the run stop document (in case the date rolled over between creating the start doc and staging the detector) - data_path=start_doc['data path'][:-11]+strftime("%Y/%m/%d/",localtime(stop_doc['time'])) - ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5")) - if len(ret[2])==0: - if verbose: print('Sorry, could not find detector filename....') + if verbose: + print("Found detector filename in %s" % data_path) + success = True + + if ( + not success + ): # looking at path in metadata, but taking the date from the run stop document (in case the date rolled over between creating the start doc and staging the detector) + data_path = start_doc["data path"][:-11] + strftime("%Y/%m/%d/", localtime(stop_doc["time"])) + ret = ( + start_doc["scan_id"], + start_doc["uid"], + glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5"), + ) + if len(ret[2]) == 0: + if verbose: + print("Sorry, could not find detector filename....") else: - if verbose: print('Found detector filename in %s'%data_path);success=True - return ret + if verbose: + print("Found detector filename in %s" % data_path) + success = True + return ret # def get_sid_filenames(header): @@ -3217,7 +3481,8 @@ def get_sid_filenames(hdr,verbose=False): # filepaths.extend(new_filepaths) # return header.start['scan_id'], header.start['uid'], filepaths -def load_dask_data(uid,detector,mask_path_full,reverse=False,rot90=False): + +def load_dask_data(uid, detector, mask_path_full, reverse=False, rot90=False): """ load data as dask-array get image md (direct beam, wavelength, sample-detector distance,...) from databroker documents (no need to read an actual image) @@ -3230,36 +3495,46 @@ def load_dask_data(uid,detector,mask_path_full,reverse=False,rot90=False): LW 04/26/2024 """ import dask - hdr=db[uid] - det=detector.split('_image')[0] - # collect image metadata from loading single image - img_md_dict={'detector_distance':'det_distance','incident_wavelength':'wavelength','frame_time':'cam_acquire_period','count_time':'cam_acquire_time','num_images':'cam_num_images','beam_center_x':'beam_center_x','beam_center_y':'beam_center_y'} - img_md={} + + hdr = db[uid] + det = detector.split("_image")[0] + # collect image metadata from loading single image + img_md_dict = { + "detector_distance": "det_distance", + "incident_wavelength": "wavelength", + "frame_time": "cam_acquire_period", + "count_time": "cam_acquire_time", + "num_images": "cam_num_images", + "beam_center_x": "beam_center_x", + "beam_center_y": "beam_center_y", + } + img_md = {} for k in list(img_md_dict.keys()): - img_md[k]=hdr.config_data(det)['primary'][0]['%s_%s'%(det,img_md_dict[k])] - if md['detector'] in ['eiger4m_single_image','eiger1m_single_image','eiger500K_single_image']: - img_md.update({'y_pixel_size': 7.5e-05, 'x_pixel_size': 7.5e-05}) - got_pixel_mask=True + img_md[k] = hdr.config_data(det)["primary"][0]["%s_%s" % (det, img_md_dict[k])] + if md["detector"] in ["eiger4m_single_image", "eiger1m_single_image", "eiger500K_single_image"]: + img_md.update({"y_pixel_size": 7.5e-05, "x_pixel_size": 7.5e-05}) + got_pixel_mask = True else: - img_md.update({'y_pixel_size': None, 'x_pixel_size': None}) - got_pixel_mask=False + img_md.update({"y_pixel_size": None, "x_pixel_size": None}) + got_pixel_mask = False # load pixel mask from static location - if got_pixel_mask: - json_open=open(_mask_path_+'pixel_masks/pixel_mask_compression_%s.json'%detector.split('_')[0]) - mask_dict=json.load(json_open) - img_md['pixel_mask']=np.array(mask_dict['pixel_mask']) - img_md['binary_mask']=np.array(mask_dict['binary_mask']) + if got_pixel_mask: + json_open = open(_mask_path_ + "pixel_masks/pixel_mask_compression_%s.json" % detector.split("_")[0]) + mask_dict = json.load(json_open) + img_md["pixel_mask"] = np.array(mask_dict["pixel_mask"]) + img_md["binary_mask"] = np.array(mask_dict["binary_mask"]) del mask_dict # load image data as dask-arry: - dimg=hdr.xarray_dask()[md['detector']][0] + dimg = hdr.xarray_dask()[md["detector"]][0] if reverse: - dimg=dask.array.flip(dimg,axis=(0,1)) + dimg = dask.array.flip(dimg, axis=(0, 1)) if rot90: - dimg=dask.array.rot90(dimg,axes=(1,2)) - return dimg,img_md + dimg = dask.array.rot90(dimg, axes=(1, 2)) + return dimg, img_md -def load_data(uid, detector='eiger4m_single_image', fill=True, reverse=False, rot90=False): + +def load_data(uid, detector="eiger4m_single_image", fill=True, reverse=False, rot90=False): """load bluesky scan data by giveing uid and detector Parameters @@ -3284,11 +3559,11 @@ def load_data(uid, detector='eiger4m_single_image', fill=True, reverse=False, ro ATTEMPTS = 0 for attempt in range(ATTEMPTS): try: - ev, = hdr.events(fields=[detector], fill=fill) + (ev,) = hdr.events(fields=[detector], fill=fill) break except Exception: - print ('Trying again ...!') + print("Trying again ...!") if attempt == ATTEMPTS - 1: # We're out of attempts. Raise the exception to help with debugging. raise @@ -3299,54 +3574,51 @@ def load_data(uid, detector='eiger4m_single_image', fill=True, reverse=False, ro # TODO(mrakitin): replace with the lazy loader (when it's implemented): imgs = list(hdr.data(detector)) - if len(imgs[0])>=1: + if len(imgs[0]) >= 1: md = imgs[0].md imgs = pims.pipeline(lambda img: img)(imgs[0]) imgs.md = md if reverse: md = imgs.md - imgs = reverse_updown( imgs ) # Why not np.flipud? + imgs = reverse_updown(imgs) # Why not np.flipud? imgs.md = md if rot90: md = imgs.md - imgs = rot90_clockwise( imgs ) # Why not np.flipud? + imgs = rot90_clockwise(imgs) # Why not np.flipud? imgs.md = md return imgs -def mask_badpixels( mask, detector ): - ''' +def mask_badpixels(mask, detector): + """ Mask known bad pixel from the giveing mask - ''' - if detector =='eiger1m_single_image': - #to be determined + """ + if detector == "eiger1m_single_image": + # to be determined mask = mask - elif detector =='eiger4m_single_image' or detector == 'image': - mask[513:552,:] =0 - mask[1064:1103,:] =0 - mask[1615:1654,:] =0 - mask[:,1029:1041] = 0 - mask[:, 0] =0 - mask[0:, 2069] =0 - mask[0] =0 - mask[2166] =0 - - elif detector =='eiger500K_single_image': - #to be determined + elif detector == "eiger4m_single_image" or detector == "image": + mask[513:552, :] = 0 + mask[1064:1103, :] = 0 + mask[1615:1654, :] = 0 + mask[:, 1029:1041] = 0 + mask[:, 0] = 0 + mask[0:, 2069] = 0 + mask[0] = 0 + mask[2166] = 0 + + elif detector == "eiger500K_single_image": + # to be determined mask = mask else: mask = mask return mask - - - -def load_data2( uid , detector = 'eiger4m_single_image' ): +def load_data2(uid, detector="eiger4m_single_image"): """load bluesky scan data by giveing uid and detector Parameters @@ -3364,54 +3636,52 @@ def load_data2( uid , detector = 'eiger4m_single_image' ): md = imgs.md """ hdr = db[uid] - flag =1 - while flag<4 and flag !=0: + flag = 1 + while flag < 4 and flag != 0: try: - ev, = hdr.events(fields=[detector]) - flag =0 + (ev,) = hdr.events(fields=[detector]) + flag = 0 except: flag += 1 - print ('Trying again ...!') + print("Trying again ...!") if flag: - print ("Can't Load Data!") - uid = '00000' #in case of failling load data + print("Can't Load Data!") + uid = "00000" # in case of failling load data imgs = 0 else: - imgs = ev['data'][detector] + imgs = ev["data"][detector] - #print (imgs) + # print (imgs) return imgs - -def psave_obj(obj, filename ): - '''save an object with filename by pickle.dump method +def psave_obj(obj, filename): + """save an object with filename by pickle.dump method This function automatically add '.pkl' as filename extension Input: obj: the object to be saved filename: filename (with full path) to be saved Return: None - ''' - with open( filename + '.pkl', 'wb') as f: + """ + with open(filename + ".pkl", "wb") as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) -def pload_obj(filename ): - '''load a pickled filename + +def pload_obj(filename): + """load a pickled filename This function automatically add '.pkl' to filename extension Input: filename: filename (with full path) to be saved Return: load the object by pickle.load method - ''' - with open( filename + '.pkl', 'rb') as f: + """ + with open(filename + ".pkl", "rb") as f: return pickle.load(f) - -def load_mask( path, mask_name, plot_ = False, reverse=False, rot90=False, *argv,**kwargs): - +def load_mask(path, mask_name, plot_=False, reverse=False, rot90=False, *argv, **kwargs): """load a mask file the mask is a numpy binary file (.npy) @@ -3430,103 +3700,132 @@ def load_mask( path, mask_name, plot_ = False, reverse=False, rot90=False, *argv mask = load_mask( path, mask_name, plot_ = True ) """ - mask = np.load( path + mask_name ) - mask = np.array(mask, dtype = np.int32) + mask = np.load(path + mask_name) + mask = np.array(mask, dtype=np.int32) if reverse: - mask = mask[::-1,:] + mask = mask[::-1, :] if rot90: - mask = np.rot90( mask ) + mask = np.rot90(mask) if plot_: - show_img( mask, *argv,**kwargs) + show_img(mask, *argv, **kwargs) return mask +def create_hot_pixel_mask(img, threshold, center=None, center_radius=300, outer_radius=0): + """create a hot pixel mask by giving threshold + Input: + img: the image to create hot pixel mask + threshold: the threshold above which will be considered as hot pixels + center: optional, default=None + else, as a two-element list (beam center), i.e., [center_x, center_y] + if center is not None, the hot pixel will not include a disk region + which is defined by center and center_radius ( in unit of pixel) + Output: + a bool types numpy array (mask), 1 is good and 0 is excluded -def create_hot_pixel_mask(img, threshold, center=None, center_radius=300, outer_radius=0 ): - '''create a hot pixel mask by giving threshold - Input: - img: the image to create hot pixel mask - threshold: the threshold above which will be considered as hot pixels - center: optional, default=None - else, as a two-element list (beam center), i.e., [center_x, center_y] - if center is not None, the hot pixel will not include a disk region - which is defined by center and center_radius ( in unit of pixel) - Output: - a bool types numpy array (mask), 1 is good and 0 is excluded - - ''' - bst_mask = np.ones_like( img , dtype = bool) + """ + bst_mask = np.ones_like(img, dtype=bool) if center != None: - from skimage.draw import disk + from skimage.draw import disk + imy, imx = img.shape - cy,cx = center - rr, cc = disk((cy, cx), center_radius,shape=img.shape ) - bst_mask[rr,cc] =0 + cy, cx = center + rr, cc = disk((cy, cx), center_radius, shape=img.shape) + bst_mask[rr, cc] = 0 if outer_radius: - bst_mask = np.zeros_like( img , dtype = bool) - rr2, cc2 = disk((cy, cx), outer_radius,shape=img.shape ) - bst_mask[rr2,cc2] =1 - bst_mask[rr,cc] =0 - hmask = np.ones_like( img ) - hmask[np.where( img * bst_mask > threshold)]=0 + bst_mask = np.zeros_like(img, dtype=bool) + rr2, cc2 = disk((cy, cx), outer_radius, shape=img.shape) + bst_mask[rr2, cc2] = 1 + bst_mask[rr, cc] = 0 + hmask = np.ones_like(img) + hmask[np.where(img * bst_mask > threshold)] = 0 return hmask - - -def apply_mask( imgs, mask): - '''apply mask to imgs to produce a generator +def apply_mask(imgs, mask): + """apply mask to imgs to produce a generator Usuages: imgsa = apply_mask( imgs, mask ) good_series = apply_mask( imgs[good_start:], mask ) - ''' + """ return pims.pipeline(lambda img: np.int_(mask) * img)(imgs) # lazily apply mask -def reverse_updown( imgs): - '''reverse imgs upside down to produce a generator +def reverse_updown(imgs): + """reverse imgs upside down to produce a generator Usuages: imgsr = reverse_updown( imgs) - ''' - return pims.pipeline(lambda img: img[::-1,:])(imgs) # lazily apply mask + """ + return pims.pipeline(lambda img: img[::-1, :])(imgs) # lazily apply mask + -def rot90_clockwise( imgs): - '''reverse imgs upside down to produce a generator +def rot90_clockwise(imgs): + """reverse imgs upside down to produce a generator Usuages: imgsr = rot90_clockwise( imgs) - ''' - return pims.pipeline(lambda img: np.rot90(img) )(imgs) # lazily apply mask + """ + return pims.pipeline(lambda img: np.rot90(img))(imgs) # lazily apply mask + -def RemoveHot( img,threshold= 1E7, plot_=True ): - '''Remove hot pixel from img''' +def RemoveHot(img, threshold=1e7, plot_=True): + """Remove hot pixel from img""" - mask = np.ones_like( np.array( img ) ) - badp = np.where( np.array(img) >= threshold ) - if len(badp[0])!=0: + mask = np.ones_like(np.array(img)) + badp = np.where(np.array(img) >= threshold) + if len(badp[0]) != 0: mask[badp] = 0 if plot_: - show_img( mask ) + show_img(mask) return mask ############ ###plot data -def show_img( image, ax=None,label_array=None, alpha=0.5, interpolation='nearest', - xlim=None, ylim=None, save=False,image_name=None,path=None, - aspect=None, logs=False,vmin=None,vmax=None,return_fig=False,cmap='viridis', - show_time= False, file_name =None, ylabel=None, xlabel=None, extent=None, - show_colorbar=True, tight=True, show_ticks=True, save_format = 'png', dpi= None, - center=None,origin='lower', lab_fontsize = 16, tick_size = 12, colorbar_fontsize = 8, - use_mat_imshow=False, - *argv,**kwargs ): + +def show_img( + image, + ax=None, + label_array=None, + alpha=0.5, + interpolation="nearest", + xlim=None, + ylim=None, + save=False, + image_name=None, + path=None, + aspect=None, + logs=False, + vmin=None, + vmax=None, + return_fig=False, + cmap="viridis", + show_time=False, + file_name=None, + ylabel=None, + xlabel=None, + extent=None, + show_colorbar=True, + tight=True, + show_ticks=True, + save_format="png", + dpi=None, + center=None, + origin="lower", + lab_fontsize=16, + tick_size=12, + colorbar_fontsize=8, + use_mat_imshow=False, + *argv, + **kwargs, +): """YG. Sep26, 2017 Add label_array/alpha option to show a mask on top of image a simple function to show image by using matplotlib.plt imshow @@ -3547,81 +3846,113 @@ def show_img( image, ax=None,label_array=None, alpha=0.5, interpolation='nearest else: fig, ax = plt.subplots() else: - fig, ax=ax - + fig, ax = ax if center != None: - plot1D(center[1],center[0],ax=ax, c='b', m='o', legend='') + plot1D(center[1], center[0], ax=ax, c="b", m="o", legend="") if not logs: if not use_mat_imshow: - im=imshow(ax, image, origin=origin,cmap=cmap,interpolation=interpolation, vmin=vmin,vmax=vmax, - extent=extent) #vmin=0,vmax=1, + im = imshow( + ax, + image, + origin=origin, + cmap=cmap, + interpolation=interpolation, + vmin=vmin, + vmax=vmax, + extent=extent, + ) # vmin=0,vmax=1, else: - im=ax.imshow( image, origin=origin,cmap=cmap,interpolation=interpolation, vmin=vmin,vmax=vmax, - extent=extent) #vmin=0,vmax=1, + im = ax.imshow( + image, origin=origin, cmap=cmap, interpolation=interpolation, vmin=vmin, vmax=vmax, extent=extent + ) # vmin=0,vmax=1, else: if not use_mat_imshow: - im=imshow(ax, image, origin=origin,cmap=cmap, - interpolation=interpolation, norm=LogNorm(vmin, vmax),extent=extent) + im = imshow( + ax, + image, + origin=origin, + cmap=cmap, + interpolation=interpolation, + norm=LogNorm(vmin, vmax), + extent=extent, + ) else: - im=ax.imshow(image, origin=origin,cmap=cmap, - interpolation=interpolation, norm=LogNorm(vmin, vmax),extent=extent) + im = ax.imshow( + image, + origin=origin, + cmap=cmap, + interpolation=interpolation, + norm=LogNorm(vmin, vmax), + extent=extent, + ) if label_array != None: - im2=show_label_array(ax, label_array, alpha= alpha, cmap=cmap, interpolation=interpolation ) + im2 = show_label_array(ax, label_array, alpha=alpha, cmap=cmap, interpolation=interpolation) - ax.set_title( image_name ) + ax.set_title(image_name) if xlim != None: - ax.set_xlim( xlim ) + ax.set_xlim(xlim) if ylim != None: - ax.set_ylim( ylim ) + ax.set_ylim(ylim) if not show_ticks: ax.set_yticks([]) ax.set_xticks([]) else: - ax.tick_params(axis='both', which='major', labelsize=tick_size ) - ax.tick_params(axis='both', which='minor', labelsize=tick_size ) - #mpl.rcParams['xtick.labelsize'] = tick_size - #mpl.rcParams['ytick.labelsize'] = tick_size - #print(tick_size) + ax.tick_params(axis="both", which="major", labelsize=tick_size) + ax.tick_params(axis="both", which="minor", labelsize=tick_size) + # mpl.rcParams['xtick.labelsize'] = tick_size + # mpl.rcParams['ytick.labelsize'] = tick_size + # print(tick_size) if ylabel != None: - #ax.set_ylabel(ylabel)#, fontsize = 9) - ax.set_ylabel( ylabel , fontsize = lab_fontsize ) + # ax.set_ylabel(ylabel)#, fontsize = 9) + ax.set_ylabel(ylabel, fontsize=lab_fontsize) if xlabel != None: - ax.set_xlabel(xlabel , fontsize = lab_fontsize ) + ax.set_xlabel(xlabel, fontsize=lab_fontsize) if aspect != None: - #aspect = image.shape[1]/float( image.shape[0] ) + # aspect = image.shape[1]/float( image.shape[0] ) ax.set_aspect(aspect) else: - ax.set_aspect(aspect='auto') + ax.set_aspect(aspect="auto") if show_colorbar: - cbar = fig.colorbar(im, extend='neither', spacing='proportional', - orientation='vertical' ) + cbar = fig.colorbar(im, extend="neither", spacing="proportional", orientation="vertical") cbar.ax.tick_params(labelsize=colorbar_fontsize) fig.set_tight_layout(tight) if save: if show_time: - dt =datetime.now() - CurTime = '_%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - fp = path + '%s'%( file_name ) + CurTime + '.' + save_format + dt = datetime.now() + CurTime = "_%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) + fp = path + "%s" % (file_name) + CurTime + "." + save_format else: - fp = path + '%s'%( image_name ) + '.' + save_format + fp = path + "%s" % (image_name) + "." + save_format if dpi == None: dpi = fig.dpi - plt.savefig( fp, dpi= dpi) - #fig.set_tight_layout(tight) + plt.savefig(fp, dpi=dpi) + # fig.set_tight_layout(tight) if return_fig: - return im #fig - - - - -def plot1D( y,x=None, yerr=None, ax=None,return_fig=False, ls='-', figsize=None,legend=None, - legend_size=None, lw=None, markersize=None, tick_size=8, *argv,**kwargs): + return im # fig + + +def plot1D( + y, + x=None, + yerr=None, + ax=None, + return_fig=False, + ls="-", + figsize=None, + legend=None, + legend_size=None, + lw=None, + markersize=None, + tick_size=8, + *argv, + **kwargs, +): """a simple function to plot two-column data by using matplotlib.plot pass *argv,**kwargs to plot @@ -3645,189 +3976,212 @@ def plot1D( y,x=None, yerr=None, ax=None,return_fig=False, ls='-', figsize=None, fig, ax = plt.subplots() if legend == None: - legend = ' ' + legend = " " try: - logx = kwargs['logx'] + logx = kwargs["logx"] except: - logx=False + logx = False try: - logy = kwargs['logy'] + logy = kwargs["logy"] except: - logy=False + logy = False try: - logxy = kwargs['logxy'] + logxy = kwargs["logxy"] except: - logxy= False + logxy = False - if logx==True and logy==True: + if logx == True and logy == True: logxy = True try: - marker = kwargs['marker'] + marker = kwargs["marker"] except: try: - marker = kwargs['m'] + marker = kwargs["m"] except: - marker= next( markers_ ) + marker = next(markers_) try: - color = kwargs['color'] + color = kwargs["color"] except: try: - color = kwargs['c'] + color = kwargs["c"] except: - color = next( colors_ ) + color = next(colors_) if x == None: - x=range(len(y)) + x = range(len(y)) if yerr == None: - ax.plot(x,y, marker=marker,color=color,ls=ls,label= legend, lw=lw, - markersize=markersize, )#,*argv,**kwargs) + ax.plot( + x, + y, + marker=marker, + color=color, + ls=ls, + label=legend, + lw=lw, + markersize=markersize, + ) # ,*argv,**kwargs) else: - ax.errorbar(x,y,yerr, marker=marker,color=color,ls=ls,label= legend, - lw=lw,markersize=markersize,)#,*argv,**kwargs) + ax.errorbar( + x, + y, + yerr, + marker=marker, + color=color, + ls=ls, + label=legend, + lw=lw, + markersize=markersize, + ) # ,*argv,**kwargs) if logx: - ax.set_xscale('log') + ax.set_xscale("log") if logy: - ax.set_yscale('log') + ax.set_yscale("log") if logxy: - ax.set_xscale('log') - ax.set_yscale('log') - - - ax.tick_params(axis='both', which='major', labelsize=tick_size ) - ax.tick_params(axis='both', which='minor', labelsize=tick_size ) - - if 'xlim' in kwargs.keys(): - ax.set_xlim( kwargs['xlim'] ) - if 'ylim' in kwargs.keys(): - ax.set_ylim( kwargs['ylim'] ) - if 'xlabel' in kwargs.keys(): - ax.set_xlabel(kwargs['xlabel']) - if 'ylabel' in kwargs.keys(): - ax.set_ylabel(kwargs['ylabel']) - - if 'title' in kwargs.keys(): - title = kwargs['title'] + ax.set_xscale("log") + ax.set_yscale("log") + + ax.tick_params(axis="both", which="major", labelsize=tick_size) + ax.tick_params(axis="both", which="minor", labelsize=tick_size) + + if "xlim" in kwargs.keys(): + ax.set_xlim(kwargs["xlim"]) + if "ylim" in kwargs.keys(): + ax.set_ylim(kwargs["ylim"]) + if "xlabel" in kwargs.keys(): + ax.set_xlabel(kwargs["xlabel"]) + if "ylabel" in kwargs.keys(): + ax.set_ylabel(kwargs["ylabel"]) + + if "title" in kwargs.keys(): + title = kwargs["title"] else: - title = 'plot' - ax.set_title( title ) - #ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)') - if (legend!='') and (legend!=None): - ax.legend(loc = 'best', fontsize=legend_size ) - if 'save' in kwargs.keys(): - if kwargs['save']: - #dt =datetime.now() - #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - #fp = kwargs['path'] + '%s'%( title ) + CurTime + '.png' - fp = kwargs['path'] + '%s'%( title ) + '.png' - plt.savefig( fp, dpi=fig.dpi) + title = "plot" + ax.set_title(title) + # ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)') + if (legend != "") and (legend != None): + ax.legend(loc="best", fontsize=legend_size) + if "save" in kwargs.keys(): + if kwargs["save"]: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + # fp = kwargs['path'] + '%s'%( title ) + CurTime + '.png' + fp = kwargs["path"] + "%s" % (title) + ".png" + plt.savefig(fp, dpi=fig.dpi) if return_fig: return fig ### -def check_shutter_open( data_series, min_inten=0, time_edge = [0,10], plot_ = False, *argv,**kwargs): - '''Check the first frame with shutter open +def check_shutter_open(data_series, min_inten=0, time_edge=[0, 10], plot_=False, *argv, **kwargs): + """Check the first frame with shutter open - Parameters - ---------- - data_series: a image series - min_inten: the total intensity lower than min_inten is defined as shtter close - time_edge: the searching frame number range + Parameters + ---------- + data_series: a image series + min_inten: the total intensity lower than min_inten is defined as shtter close + time_edge: the searching frame number range - return: - shutter_open_frame: a integer, the first frame number with open shutter + return: + shutter_open_frame: a integer, the first frame number with open shutter - Usuage: - good_start = check_shutter_open( imgsa, min_inten=5, time_edge = [0,20], plot_ = False ) + Usuage: + good_start = check_shutter_open( imgsa, min_inten=5, time_edge = [0,20], plot_ = False ) - ''' - imgsum = np.array( [np.sum(img ) for img in data_series[time_edge[0]:time_edge[1]:1]] ) + """ + imgsum = np.array([np.sum(img) for img in data_series[time_edge[0] : time_edge[1] : 1]]) if plot_: fig, ax = plt.subplots() - ax.plot(imgsum,'bo') - ax.set_title('uid=%s--imgsum'%uid) - ax.set_xlabel( 'Frame' ) - ax.set_ylabel( 'Total_Intensity' ) - #plt.show() - shutter_open_frame = np.where( np.array(imgsum) > min_inten )[0][0] - print ('The first frame with open shutter is : %s'%shutter_open_frame ) + ax.plot(imgsum, "bo") + ax.set_title("uid=%s--imgsum" % uid) + ax.set_xlabel("Frame") + ax.set_ylabel("Total_Intensity") + # plt.show() + shutter_open_frame = np.where(np.array(imgsum) > min_inten)[0][0] + print("The first frame with open shutter is : %s" % shutter_open_frame) return shutter_open_frame +def get_each_frame_intensity( + data_series, sampling=50, bad_pixel_threshold=1e10, plot_=False, save=False, *argv, **kwargs +): + """Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold -def get_each_frame_intensity( data_series, sampling = 50, - bad_pixel_threshold=1e10, - plot_ = False, save= False, *argv,**kwargs): - '''Get the total intensity of each frame by sampling every N frames - Also get bad_frame_list by check whether above bad_pixel_threshold - - Usuage: - imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, - bad_pixel_threshold=1e10, plot_ = True) - ''' + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + """ - #print ( argv, kwargs ) - imgsum = np.array( [np.sum(img ) for img in tqdm( data_series[::sampling] , leave = True ) ] ) + # print ( argv, kwargs ) + imgsum = np.array([np.sum(img) for img in tqdm(data_series[::sampling], leave=True)]) if plot_: - uid = 'uid' - if 'uid' in kwargs.keys(): - uid = kwargs['uid'] + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] fig, ax = plt.subplots() - ax.plot(imgsum,'bo') - ax.set_title('uid= %s--imgsum'%uid) - ax.set_xlabel( 'Frame_bin_%s'%sampling ) - ax.set_ylabel( 'Total_Intensity' ) + ax.plot(imgsum, "bo") + ax.set_title("uid= %s--imgsum" % uid) + ax.set_xlabel("Frame_bin_%s" % sampling) + ax.set_ylabel("Total_Intensity") if save: - #dt =datetime.now() - #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - path = kwargs['path'] - if 'uid' in kwargs: - uid = kwargs['uid'] + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] else: - uid = 'uid' - #fp = path + "Uid= %s--Waterfall-"%uid + CurTime + '.png' - fp = path + "uid=%s--imgsum-"%uid + '.png' - fig.savefig( fp, dpi=fig.dpi) - #plt.show() + uid = "uid" + # fp = path + "Uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() - bad_frame_list = np.where( np.array(imgsum) > bad_pixel_threshold )[0] + bad_frame_list = np.where(np.array(imgsum) > bad_pixel_threshold)[0] if len(bad_frame_list): - print ('Bad frame list are: %s' %bad_frame_list) + print("Bad frame list are: %s" % bad_frame_list) else: - print ('No bad frames are involved.') - return imgsum,bad_frame_list - + print("No bad frames are involved.") + return imgsum, bad_frame_list - -def create_time_slice( N, slice_num, slice_width, edges=None ): - '''create a ROI time regions ''' +def create_time_slice(N, slice_num, slice_width, edges=None): + """create a ROI time regions""" if edges != None: time_edge = edges else: - if slice_num==1: - time_edge = [ [0,N] ] + if slice_num == 1: + time_edge = [[0, N]] else: tstep = N // slice_num - te = np.arange( 0, slice_num +1 ) * tstep - tc = np.int_( (te[:-1] + te[1:])/2 )[1:-1] - if slice_width%2: - sw = slice_width//2 +1 - time_edge = [ [0,slice_width], ] + [ [s-sw+1,s+sw] for s in tc ] + [ [N-slice_width,N]] + te = np.arange(0, slice_num + 1) * tstep + tc = np.int_((te[:-1] + te[1:]) / 2)[1:-1] + if slice_width % 2: + sw = slice_width // 2 + 1 + time_edge = ( + [ + [0, slice_width], + ] + + [[s - sw + 1, s + sw] for s in tc] + + [[N - slice_width, N]] + ) else: - sw= slice_width//2 - time_edge = [ [0,slice_width], ] + [ [s-sw,s+sw] for s in tc ] + [ [N-slice_width,N]] - - + sw = slice_width // 2 + time_edge = ( + [ + [0, slice_width], + ] + + [[s - sw, s + sw] for s in tc] + + [[N - slice_width, N]] + ) return np.array(time_edge) -def show_label_array(ax, label_array, cmap=None, aspect=None,interpolation='nearest',**kwargs): +def show_label_array(ax, label_array, cmap=None, aspect=None, interpolation="nearest", **kwargs): """ YG. Sep 26, 2017 Modified show_label_array(ax, label_array, cmap=None, **kwargs) @@ -3850,24 +4204,31 @@ def show_label_array(ax, label_array, cmap=None, aspect=None,interpolation='near The artist added to the axes """ if cmap == None: - cmap = 'viridis' - #print(cmap) + cmap = "viridis" + # print(cmap) _cmap = copy.copy((mcm.get_cmap(cmap))) - _cmap.set_under('w', 0) - vmin = max(.5, kwargs.pop('vmin', .5)) - im = ax.imshow(label_array, cmap=cmap, - interpolation=interpolation, - vmin=vmin, - **kwargs) + _cmap.set_under("w", 0) + vmin = max(0.5, kwargs.pop("vmin", 0.5)) + im = ax.imshow(label_array, cmap=cmap, interpolation=interpolation, vmin=vmin, **kwargs) if aspect == None: - ax.set_aspect(aspect='auto') - #ax.set_aspect('equal') + ax.set_aspect(aspect="auto") + # ax.set_aspect('equal') return im - -def show_label_array_on_image(ax, image, label_array, cmap=None,norm=None, log_img=True,alpha=0.3, vmin=0.1, vmax=5, - imshow_cmap='gray', **kwargs): #norm=LogNorm(), +def show_label_array_on_image( + ax, + image, + label_array, + cmap=None, + norm=None, + log_img=True, + alpha=0.3, + vmin=0.1, + vmax=5, + imshow_cmap="gray", + **kwargs, +): # norm=LogNorm(), """ This will plot the required ROI's(labeled array) on the image @@ -3895,178 +4256,202 @@ def show_label_array_on_image(ax, image, label_array, cmap=None,norm=None, log_i im_label : AxesImage The artist added to the axes """ - ax.set_aspect('equal') + ax.set_aspect("equal") - #print (vmin, vmax ) + # print (vmin, vmax ) if log_img: - im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',norm=LogNorm(vmin, vmax),**kwargs) #norm=norm, + im = ax.imshow( + image, cmap=imshow_cmap, interpolation="none", norm=LogNorm(vmin, vmax), **kwargs + ) # norm=norm, else: - im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',vmin=vmin, vmax=vmax,**kwargs) #norm=norm, - - im_label = mpl_plot.show_label_array(ax, label_array, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha, - **kwargs) # norm=norm, + im = ax.imshow(image, cmap=imshow_cmap, interpolation="none", vmin=vmin, vmax=vmax, **kwargs) # norm=norm, + im_label = mpl_plot.show_label_array( + ax, label_array, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha, **kwargs + ) # norm=norm, return im, im_label +def show_ROI_on_image( + image, + ROI, + center=None, + rwidth=400, + alpha=0.3, + label_on=True, + save=False, + return_fig=False, + rect_reqion=None, + log_img=True, + vmin=0.01, + vmax=5, + show_ang_cor=False, + cmap=cmap_albula, + fig_ax=None, + uid="uid", + path="", + aspect=1, + show_colorbar=True, + show_roi_edge=False, + *argv, + **kwargs, +): + """show ROI on an image + image: the data frame + ROI: the interested region + center: the plot center + rwidth: the plot range around the center -def show_ROI_on_image( image, ROI, center=None, rwidth=400,alpha=0.3, label_on = True, - save=False, return_fig = False, rect_reqion=None, log_img = True, vmin=0.01, vmax=5, - show_ang_cor = False,cmap = cmap_albula, fig_ax=None, - uid='uid', path='', aspect = 1, show_colorbar=True, show_roi_edge=False, *argv,**kwargs): - - '''show ROI on an image - image: the data frame - ROI: the interested region - center: the plot center - rwidth: the plot range around the center - - ''' - + """ if RUN_GUI: - fig = Figure(figsize=(8,8)) + fig = Figure(figsize=(8, 8)) axes = fig.add_subplot(111) elif fig_ax != None: fig, axes = fig_ax else: - fig, axes = plt.subplots( ) #plt.subplots(figsize=(8,8)) + fig, axes = plt.subplots() # plt.subplots(figsize=(8,8)) - #print( vmin, vmax) - #norm=LogNorm(vmin, vmax) + # print( vmin, vmax) + # norm=LogNorm(vmin, vmax) - axes.set_title( "%s_ROI_on_Image"%uid ) + axes.set_title("%s_ROI_on_Image" % uid) if log_img: - if vmin==0: + if vmin == 0: vmin += 1e-10 - vmax = max(1, vmax ) + vmax = max(1, vmax) if not show_roi_edge: - #print('here') - im,im_label = show_label_array_on_image(axes, image, ROI, imshow_cmap='viridis', - cmap=cmap,alpha=alpha, log_img=log_img, - vmin=vmin, vmax=vmax, origin="lower") + # print('here') + im, im_label = show_label_array_on_image( + axes, + image, + ROI, + imshow_cmap="viridis", + cmap=cmap, + alpha=alpha, + log_img=log_img, + vmin=vmin, + vmax=vmax, + origin="lower", + ) else: - edg = get_image_edge( ROI ) - image_ = get_image_with_roi( image, ROI, scale_factor = 2) - #fig, axes = plt.subplots( ) - show_img( image_, ax=[fig,axes], vmin=vmin, vmax=vmax, - logs= log_img, image_name= "%s_ROI_on_Image"%uid, - cmap = cmap ) - - - if rect_reqion == None: + edg = get_image_edge(ROI) + image_ = get_image_with_roi(image, ROI, scale_factor=2) + # fig, axes = plt.subplots( ) + show_img( + image_, + ax=[fig, axes], + vmin=vmin, + vmax=vmax, + logs=log_img, + image_name="%s_ROI_on_Image" % uid, + cmap=cmap, + ) + + if rect_reqion == None: if center != None: - x1,x2 = [center[1] - rwidth, center[1] + rwidth] - y1,y2 = [center[0] - rwidth, center[0] + rwidth] - axes.set_xlim( [x1,x2]) - axes.set_ylim( [y1,y2]) + x1, x2 = [center[1] - rwidth, center[1] + rwidth] + y1, y2 = [center[0] - rwidth, center[0] + rwidth] + axes.set_xlim([x1, x2]) + axes.set_ylim([y1, y2]) else: - x1,x2,y1,y2= rect_reqion - axes.set_xlim( [x1,x2]) - axes.set_ylim( [y1,y2]) + x1, x2, y1, y2 = rect_reqion + axes.set_xlim([x1, x2]) + axes.set_ylim([y1, y2]) if label_on: - num_qzr = len(np.unique( ROI )) -1 - for i in range( 1, num_qzr + 1 ): - ind = np.where( ROI == i)[1] - indz = np.where( ROI == i)[0] - c = '%i'%i - y_val = int( indz.mean() ) - x_val = int( ind.mean() ) - #print (xval, y) - axes.text(x_val, y_val, c, color='b',va='center', ha='center') + num_qzr = len(np.unique(ROI)) - 1 + for i in range(1, num_qzr + 1): + ind = np.where(ROI == i)[1] + indz = np.where(ROI == i)[0] + c = "%i" % i + y_val = int(indz.mean()) + x_val = int(ind.mean()) + # print (xval, y) + axes.text(x_val, y_val, c, color="b", va="center", ha="center") if show_ang_cor: - axes.text(-0.0, 0.5, '-/+180' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) - axes.text(1.0, 0.5, '0' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) - axes.text(0.5, -0.0, '-90'+ r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) - axes.text(0.5, 1.0, '90' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) + axes.text(-0.0, 0.5, "-/+180" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(1.0, 0.5, "0" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(0.5, -0.0, "-90" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(0.5, 1.0, "90" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) axes.set_aspect(aspect) - #fig.colorbar(im_label) + # fig.colorbar(im_label) if show_colorbar: if not show_roi_edge: fig.colorbar(im) if save: - fp = path + "%s_ROI_on_Image"%uid + '.png' - plt.savefig( fp, dpi=fig.dpi) - #plt.show() + fp = path + "%s_ROI_on_Image" % uid + ".png" + plt.savefig(fp, dpi=fig.dpi) + # plt.show() if return_fig: return fig, axes, im - - -def crop_image( image, crop_mask ): - - ''' Crop the non_zeros pixels of an image to a new image - - - ''' +def crop_image(image, crop_mask): + """Crop the non_zeros pixels of an image to a new image""" from skimage.util import crop, pad + pxlst = np.where(crop_mask.ravel())[0] dims = crop_mask.shape - imgwidthy = dims[1] #dimension in y, but in plot being x - imgwidthx = dims[0] #dimension in x, but in plot being y - #x and y are flipped??? - #matrix notation!!! - pixely = pxlst%imgwidthy - pixelx = pxlst//imgwidthy + imgwidthy = dims[1] # dimension in y, but in plot being x + imgwidthx = dims[0] # dimension in x, but in plot being y + # x and y are flipped??? + # matrix notation!!! + pixely = pxlst % imgwidthy + pixelx = pxlst // imgwidthy minpixelx = np.min(pixelx) minpixely = np.min(pixely) maxpixelx = np.max(pixelx) maxpixely = np.max(pixely) - crops = crop_mask*image - img_crop = crop( crops, ((minpixelx, imgwidthx - maxpixelx -1 ), - (minpixely, imgwidthy - maxpixely -1 )) ) + crops = crop_mask * image + img_crop = crop(crops, ((minpixelx, imgwidthx - maxpixelx - 1), (minpixely, imgwidthy - maxpixely - 1))) return img_crop -def get_avg_img( data_series, img_samp_index=None, sampling = 100, plot_ = False , save=False, *argv,**kwargs): - '''Get average imagef from a data_series by every sampling number to save time''' +def get_avg_img(data_series, img_samp_index=None, sampling=100, plot_=False, save=False, *argv, **kwargs): + """Get average imagef from a data_series by every sampling number to save time""" if img_samp_index == None: - avg_img = np.average(data_series[:: sampling], axis=0) + avg_img = np.average(data_series[::sampling], axis=0) else: - avg_img = np.zeros_like( data_series[0] ) - n=0 + avg_img = np.zeros_like(data_series[0]) + n = 0 for i in img_samp_index: avg_img += data_series[i] - n +=1 - avg_img = np.array( avg_img) / n + n += 1 + avg_img = np.array(avg_img) / n if plot_: fig, ax = plt.subplots() - uid = 'uid' - if 'uid' in kwargs.keys(): - uid = kwargs['uid'] - - im = ax.imshow(avg_img , cmap='viridis',origin='lower', - norm= LogNorm(vmin=0.001, vmax=1e2)) - #ax.set_title("Masked Averaged Image") - ax.set_title('uid= %s--Masked Averaged Image'%uid) + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + + im = ax.imshow(avg_img, cmap="viridis", origin="lower", norm=LogNorm(vmin=0.001, vmax=1e2)) + # ax.set_title("Masked Averaged Image") + ax.set_title("uid= %s--Masked Averaged Image" % uid) fig.colorbar(im) if save: - #dt =datetime.now() - #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - path = kwargs['path'] - if 'uid' in kwargs: - uid = kwargs['uid'] + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] else: - uid = 'uid' - #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' - fp = path + "uid=%s--avg-img-"%uid + '.png' - fig.savefig( fp, dpi=fig.dpi) - #plt.show() + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() return avg_img - -def check_ROI_intensity( avg_img, ring_mask, ring_number=3 , save=False, plot=True, *argv,**kwargs): - +def check_ROI_intensity(avg_img, ring_mask, ring_number=3, save=False, plot=True, *argv, **kwargs): """plot intensity versus pixel of a ring Parameters ---------- @@ -4079,68 +4464,73 @@ def check_ROI_intensity( avg_img, ring_mask, ring_number=3 , save=False, plot=Tr """ - #print('here') + # print('here') - uid = 'uid' - if 'uid' in kwargs.keys(): - uid = kwargs['uid'] - pixel = roi.roi_pixel_values(avg_img, ring_mask, [ring_number] ) + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + pixel = roi.roi_pixel_values(avg_img, ring_mask, [ring_number]) if plot: fig, ax = plt.subplots() - ax.set_title('%s--check-RIO-%s-intensity'%(uid, ring_number) ) - ax.plot( pixel[0][0] ,'bo', ls='-' ) - ax.set_ylabel('Intensity') - ax.set_xlabel('pixel') + ax.set_title("%s--check-RIO-%s-intensity" % (uid, ring_number)) + ax.plot(pixel[0][0], "bo", ls="-") + ax.set_ylabel("Intensity") + ax.set_xlabel("pixel") if save: - path = kwargs['path'] - fp = path + "%s_Mean_intensity_of_one_ROI"%uid + '.png' - fig.savefig( fp, dpi=fig.dpi) + path = kwargs["path"] + fp = path + "%s_Mean_intensity_of_one_ROI" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) if save: - path = kwargs['path'] - save_lists( [range( len( pixel[0][0] )), pixel[0][0]], label=['pixel_list', 'roi_intensity'], - filename="%s_Mean_intensity_of_one_ROI"%uid, path= path) - #plt.show() + path = kwargs["path"] + save_lists( + [range(len(pixel[0][0])), pixel[0][0]], + label=["pixel_list", "roi_intensity"], + filename="%s_Mean_intensity_of_one_ROI" % uid, + path=path, + ) + # plt.show() return pixel[0][0] -#from tqdm import tqdm -def cal_g2( image_series, ring_mask, bad_image_process, - bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None ): - '''calculation g2 by using a multi-tau algorithm''' +# from tqdm import tqdm + - noframes = len( image_series) # number of frames, not "no frames" - #num_buf = 8 # number of buffers +def cal_g2(image_series, ring_mask, bad_image_process, bad_frame_list=None, good_start=0, num_buf=8, num_lev=None): + """calculation g2 by using a multi-tau algorithm""" + + noframes = len(image_series) # number of frames, not "no frames" + # num_buf = 8 # number of buffers if bad_image_process: import skbeam.core.mask as mask_image - bad_img_list = np.array( bad_frame_list) - good_start - new_imgs = mask_image.bad_to_nan_gen( image_series, bad_img_list) + + bad_img_list = np.array(bad_frame_list) - good_start + new_imgs = mask_image.bad_to_nan_gen(image_series, bad_img_list) if num_lev == None: - num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 - print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) - print ('%s frames will be processed...'%(noframes)) - print( 'Bad Frames involved!') + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + print("%s frames will be processed..." % (noframes)) + print("Bad Frames involved!") - g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm( new_imgs) ) - print( 'G2 calculation DONE!') + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(new_imgs)) + print("G2 calculation DONE!") else: if num_lev == None: - num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 - print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) - print ('%s frames will be processed...'%(noframes)) - g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(image_series) ) - print( 'G2 calculation DONE!') + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + print("%s frames will be processed..." % (noframes)) + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(image_series)) + print("G2 calculation DONE!") return g2, lag_steps - def run_time(t0): - '''Calculate running time of a program + """Calculate running time of a program Parameters ---------- t0: time_string, t0=time.time() @@ -4154,17 +4544,17 @@ def run_time(t0): t0=time.time() .....(the running code) run_time(t0) - ''' + """ elapsed_time = time.time() - t0 - if elapsed_time<60: - print ('Total time: %.3f sec' %(elapsed_time )) + if elapsed_time < 60: + print("Total time: %.3f sec" % (elapsed_time)) else: - print ('Total time: %.3f min' %(elapsed_time/60.)) + print("Total time: %.3f min" % (elapsed_time / 60.0)) -def trans_data_to_pd(data, label=None,dtype='array'): - ''' +def trans_data_to_pd(data, label=None, dtype="array"): + """ convert data into pandas.DataFrame Input: data: list or np.array @@ -4172,29 +4562,32 @@ def trans_data_to_pd(data, label=None,dtype='array'): dtype: list or array [[NOT WORK or dict (for dict only save the scalar not arrays values)]] Output: a pandas.DataFrame - ''' - #lists a [ list1, list2...] all the list have the same length - from numpy import arange,array - import pandas as pd,sys - if dtype == 'list': - data=array(data).T - N,M=data.shape - elif dtype == 'array': - data=array(data) - N,M=data.shape + """ + # lists a [ list1, list2...] all the list have the same length + import sys + + import pandas as pd + from numpy import arange, array + + if dtype == "list": + data = array(data).T + N, M = data.shape + elif dtype == "array": + data = array(data) + N, M = data.shape else: print("Wrong data type! Now only support 'list' and 'array' tpye") - - index = arange( N ) - if label == None:label=['data%s'%i for i in range(M)] - #print label - df = pd.DataFrame( data, index=index, columns= label ) + index = arange(N) + if label == None: + label = ["data%s" % i for i in range(M)] + # print label + df = pd.DataFrame(data, index=index, columns=label) return df -def save_lists( data, label=None, filename=None, path=None, return_res = False, verbose=False): - ''' +def save_lists(data, label=None, filename=None, path=None, return_res=False, verbose=False): + """ save_lists( data, label=None, filename=None, path=None) save lists to a CSV file with filename in path @@ -4207,55 +4600,55 @@ def save_lists( data, label=None, filename=None, path=None, return_res = False, Example: save_arrays( [q,iq], label= ['q_A-1', 'Iq'], filename='uid=%s-q-Iq'%uid, path= data_dir ) - ''' + """ - M,N = len(data[0]),len(data) - d = np.zeros( [N,M] ) + M, N = len(data[0]), len(data) + d = np.zeros([N, M]) for i in range(N): d[i] = data[i] - df = trans_data_to_pd(d.T, label, 'array') - #dt =datetime.now() - #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + df = trans_data_to_pd(d.T, label, "array") + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) if filename == None: - filename = 'data' - filename = os.path.join(path, filename )#+'.csv') + filename = "data" + filename = os.path.join(path, filename) # +'.csv') df.to_csv(filename) if verbose: - print('The data was saved in: %s.'%filename) + print("The data was saved in: %s." % filename) if return_res: return df -def get_pos_val_overlap( p1, v1, p2,v2, Nl): - '''get the overlap of v1 and v2 - p1: the index of array1 in array with total length as Nl - v1: the corresponding value of p1 - p2: the index of array2 in array with total length as Nl - v2: the corresponding value of p2 - Return: - The values in v1 with the position in overlap of p1 and p2 - The values in v2 with the position in overlap of p1 and p2 - - An example: - Nl =10 - p1= np.array( [1,3,4,6,8] ) - v1 = np.array( [10,20,30,40,50]) - p2= np.array( [ 0,2,3,5,7,8]) - v2=np.array( [10,20,30,40,50,60,70]) - - get_pos_val_overlap( p1, v1, p2,v2, Nl) - - ''' - ind = np.zeros( Nl, dtype=np.int32 ) - ind[p1] = np.arange( len(p1) ) +1 - w2 = np.where( ind[p2] )[0] - w1 = ind[ p2[w2]] -1 - return v1[w1], v2[w2] +def get_pos_val_overlap(p1, v1, p2, v2, Nl): + """get the overlap of v1 and v2 + p1: the index of array1 in array with total length as Nl + v1: the corresponding value of p1 + p2: the index of array2 in array with total length as Nl + v2: the corresponding value of p2 + Return: + The values in v1 with the position in overlap of p1 and p2 + The values in v2 with the position in overlap of p1 and p2 + + An example: + Nl =10 + p1= np.array( [1,3,4,6,8] ) + v1 = np.array( [10,20,30,40,50]) + p2= np.array( [ 0,2,3,5,7,8]) + v2=np.array( [10,20,30,40,50,60,70]) + get_pos_val_overlap( p1, v1, p2,v2, Nl) -def save_arrays( data, label=None, dtype='array', filename=None, path=None, return_res = False,verbose=False): - ''' + """ + ind = np.zeros(Nl, dtype=np.int32) + ind[p1] = np.arange(len(p1)) + 1 + w2 = np.where(ind[p2])[0] + w1 = ind[p2[w2]] - 1 + return v1[w1], v2[w2] + + +def save_arrays(data, label=None, dtype="array", filename=None, path=None, return_res=False, verbose=False): + """ July 10, 2016, Y.G.@CHX save_arrays( data, label=None, dtype='array', filename=None, path=None): save data to a CSV file with filename in path @@ -4272,22 +4665,23 @@ def save_arrays( data, label=None, dtype='array', filename=None, path=None, retu save_arrays( qiq, label= ['q_A-1', 'Iq'], dtype='array', filename='uid=%s-q-Iq'%uid, path= data_dir ) - ''' - df = trans_data_to_pd(data, label,dtype) - #dt =datetime.now() - #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + """ + df = trans_data_to_pd(data, label, dtype) + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) if filename == None: - filename = 'data' - filename_ = os.path.join(path, filename)# +'.csv') + filename = "data" + filename_ = os.path.join(path, filename) # +'.csv') df.to_csv(filename_) if verbose: - print( 'The file: %s is saved in %s'%(filename, path) ) - #print( 'The g2 of uid= %s is saved in %s with filename as g2-%s-%s.csv'%(uid, path, uid, CurTime)) + print("The file: %s is saved in %s" % (filename, path)) + # print( 'The g2 of uid= %s is saved in %s with filename as g2-%s-%s.csv'%(uid, path, uid, CurTime)) if return_res: return df -def cal_particle_g2( radius, viscosity, qr, taus, beta=0.2, T=298): - '''YG Dev Nov 20, 2017@CHX + +def cal_particle_g2(radius, viscosity, qr, taus, beta=0.2, T=298): + """YG Dev Nov 20, 2017@CHX calculate particle g2 fucntion by giving particle radius, Q , and solution viscosity using a simple exponetional model Input: @@ -4302,73 +4696,75 @@ def cal_particle_g2( radius, viscosity, qr, taus, beta=0.2, T=298): cal_particle_g2( radius=125 *10**(-9), qr=[0.01,0.015], viscosity= 8.9*1e-4) - ''' - D0 = get_diffusion_coefficient( viscosity, radius, T=T) - g2_q1 = np.zeros(len(qr), dtype = object) + """ + D0 = get_diffusion_coefficient(viscosity, radius, T=T) + g2_q1 = np.zeros(len(qr), dtype=object) for i, q1 in enumerate(qr): relaxation_rate = D0 * q1**2 - g2_q1[i] = simple_exponential( taus, beta=beta, relaxation_rate = relaxation_rate, baseline=1) + g2_q1[i] = simple_exponential(taus, beta=beta, relaxation_rate=relaxation_rate, baseline=1) return g2_q1 -def get_Reynolds_number( flow_rate, flow_radius, fluid_density, fluid_viscosity ): - '''May 10, 2019, Y.G.@CHX - get Reynolds_number , the ratio of the inertial to viscous forces, V*Dia*density/eta - Reynolds_number << 1000 gives a laminar flow - flow_rate: ul/s - flow_radius: mm - fluid_density: Kg/m^3 ( for water, 1000 Kg/m^3 = 1 g/cm^3 ) - fliud_viscosity: N*s/m^2 ( Kg /(s*m) ) - - return Reynolds_number - ''' - return flow_rate * 1e-6 * flow_radius * 1e-3 *2 * fluid_density/ fluid_viscosity - -def get_Deborah_number( flow_rate, beam_size, q_vector, diffusion_coefficient ): - '''May 10, 2019, Y.G.@CHX - get Deborah_number, the ratio of transit time to diffusion time, (V/beam_size)/ ( D*q^2) - flow_rate: ul/s - beam_size: ul - q_vector: A-1 - diffusion_coefficient: A^2/s - - return Deborah_number - ''' - return (flow_rate /beam_size) / ( diffusion_coefficient * q_vector**2 ) - - - -def get_viscosity( diffusion_coefficient , radius, T=298): - '''May 10, 2019, Y.G.@CHX - get visocity of a Brownian motion particle with radius in fuild with diffusion_coefficient - diffusion_coefficient in unit of A^2/s - radius: m - T: K - k: 1.38064852(79)*10**(−23) J/T, Boltzmann constant - return visosity: N*s/m^2 (water at 25K = 8.9*10**(-4) ) - ''' +def get_Reynolds_number(flow_rate, flow_radius, fluid_density, fluid_viscosity): + """May 10, 2019, Y.G.@CHX + get Reynolds_number , the ratio of the inertial to viscous forces, V*Dia*density/eta + Reynolds_number << 1000 gives a laminar flow + flow_rate: ul/s + flow_radius: mm + fluid_density: Kg/m^3 ( for water, 1000 Kg/m^3 = 1 g/cm^3 ) + fliud_viscosity: N*s/m^2 ( Kg /(s*m) ) - k= 1.38064852*10**(-23) - return k*T / ( 6*np.pi* diffusion_coefficient * radius) * 10**20 + return Reynolds_number + """ + return flow_rate * 1e-6 * flow_radius * 1e-3 * 2 * fluid_density / fluid_viscosity -def get_diffusion_coefficient( viscosity, radius, T=298): - '''July 10, 2016, Y.G.@CHX - get diffusion_coefficient of a Brownian motion particle with radius in fuild with visocity - viscosity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) - radius: m - T: K - k: 1.38064852(79)×10−23 J/T, Boltzmann constant - return diffusion_coefficient in unit of A^2/s - e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: - 1.38064852*10**(−23) *298 / ( 6*np.pi* 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10^5 A2/s +def get_Deborah_number(flow_rate, beam_size, q_vector, diffusion_coefficient): + """May 10, 2019, Y.G.@CHX + get Deborah_number, the ratio of transit time to diffusion time, (V/beam_size)/ ( D*q^2) + flow_rate: ul/s + beam_size: ul + q_vector: A-1 + diffusion_coefficient: A^2/s + + return Deborah_number + """ + return (flow_rate / beam_size) / (diffusion_coefficient * q_vector**2) - get_diffusion_coefficient( 0.20871, 250 *10**(-9), T=298) - ''' +def get_viscosity(diffusion_coefficient, radius, T=298): + """May 10, 2019, Y.G.@CHX + get visocity of a Brownian motion particle with radius in fuild with diffusion_coefficient + diffusion_coefficient in unit of A^2/s + radius: m + T: K + k: 1.38064852(79)*10**(−23) J/T, Boltzmann constant + + return visosity: N*s/m^2 (water at 25K = 8.9*10**(-4) ) + """ - k= 1.38064852*10**(-23) - return k*T / ( 6*np.pi* viscosity * radius) * 10**20 + k = 1.38064852 * 10 ** (-23) + return k * T / (6 * np.pi * diffusion_coefficient * radius) * 10**20 + + +def get_diffusion_coefficient(viscosity, radius, T=298): + """July 10, 2016, Y.G.@CHX + get diffusion_coefficient of a Brownian motion particle with radius in fuild with visocity + viscosity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) + radius: m + T: K + k: 1.38064852(79)×10−23 J/T, Boltzmann constant + + return diffusion_coefficient in unit of A^2/s + e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: + 1.38064852*10**(−23) *298 / ( 6*np.pi* 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10^5 A2/s + + get_diffusion_coefficient( 0.20871, 250 *10**(-9), T=298) + + """ + + k = 1.38064852 * 10 ** (-23) + return k * T / (6 * np.pi * viscosity * radius) * 10**20 def ring_edges(inner_radius, width, spacing=0, num_rings=None): @@ -4380,7 +4776,7 @@ def ring_edges(inner_radius, width, spacing=0, num_rings=None): The number of rings, their widths, and any spacing between rings can be specified. They can be uniform or varied. - + LW 04/02/2024: fixed checking whether width and spacing are iterable Parameters @@ -4425,19 +4821,20 @@ def ring_edges(inner_radius, width, spacing=0, num_rings=None): # num_rings are self-consistent and complete. try: iter(width) - width_is_list=True - except: width_is_list=False + width_is_list = True + except: + width_is_list = False try: iter(spacing) - spacing_is_list=True - except: spacing_is_list=False - + spacing_is_list = True + except: + spacing_is_list = False + # width_is_list = isinstance(width, collections.Iterable) # spacing_is_list = isinstance(spacing, collections.Iterable) - if (width_is_list and spacing_is_list): + if width_is_list and spacing_is_list: if len(width) != len(spacing) + 1: - raise ValueError("List of spacings must be one less than list " - "of widths.") + raise ValueError("List of spacings must be one less than list " "of widths.") if num_rings == None: try: num_rings = len(width) @@ -4445,15 +4842,17 @@ def ring_edges(inner_radius, width, spacing=0, num_rings=None): try: num_rings = len(spacing) + 1 except TypeError: - raise ValueError("Since width and spacing are constant, " - "num_rings cannot be inferred and must be " - "specified.") + raise ValueError( + "Since width and spacing are constant, " + "num_rings cannot be inferred and must be " + "specified." + ) else: if width_is_list: if num_rings != len(width): raise ValueError("num_rings does not match width list") if spacing_is_list: - if num_rings-1 != len(spacing): + if num_rings - 1 != len(spacing): raise ValueError("num_rings does not match spacing list") # Now regularlize the input. if not width_is_list: @@ -4471,16 +4870,20 @@ def ring_edges(inner_radius, width, spacing=0, num_rings=None): return edges - -def get_non_uniform_edges( centers, width = 4, number_rings=1, spacing=0, ): - ''' +def get_non_uniform_edges( + centers, + width=4, + number_rings=1, + spacing=0, +): + """ YG CHX Spe 6 get_non_uniform_edges( centers, width = 4, number_rings=3 ) Calculate the inner and outer radius of a set of non uniform distributed rings by giving ring centers For each center, there are number_rings with each of width - + LW 04/02/2024: fixed checking whether 'width' is iterable Parameters @@ -4502,276 +4905,295 @@ def get_non_uniform_edges( centers, width = 4, number_rings=1, spacing=0, ): ------- edges : array inner and outer radius for each ring - ''' + """ - if number_rings == None: + if number_rings == None: number_rings = 1 - edges = np.zeros( [len(centers)*number_rings, 2] ) - + edges = np.zeros([len(centers) * number_rings, 2]) + try: iter(width) except: - width = np.ones_like( centers ) * width + width = np.ones_like(centers) * width for i, c in enumerate(centers): - edges[i*number_rings:(i+1)*number_rings,:] = ring_edges( inner_radius = c - width[i]*number_rings/2, - width= width[i], spacing= spacing, num_rings=number_rings) + edges[i * number_rings : (i + 1) * number_rings, :] = ring_edges( + inner_radius=c - width[i] * number_rings / 2, width=width[i], spacing=spacing, num_rings=number_rings + ) return edges -def trans_tf_to_td(tf, dtype = 'dframe'): - '''July 02, 2015, Y.G.@CHX +def trans_tf_to_td(tf, dtype="dframe"): + """July 02, 2015, Y.G.@CHX Translate epoch time to string - ''' - import pandas as pd - import numpy as np + """ import datetime - '''translate time.float to time.date, + + import numpy as np + import pandas as pd + + """translate time.float to time.date, td.type dframe: a dataframe td.type list, a list - ''' - if dtype == 'dframe':ind = tf.index - else:ind = range(len(tf)) - td = np.array([ datetime.datetime.fromtimestamp(tf[i]) for i in ind ]) + """ + if dtype == "dframe": + ind = tf.index + else: + ind = range(len(tf)) + td = np.array([datetime.datetime.fromtimestamp(tf[i]) for i in ind]) return td - -def trans_td_to_tf(td, dtype = 'dframe'): - '''July 02, 2015, Y.G.@CHX +def trans_td_to_tf(td, dtype="dframe"): + """July 02, 2015, Y.G.@CHX Translate string to epoch time - ''' + """ import time + import numpy as np - '''translate time.date to time.float, + + """translate time.date to time.float, td.type dframe: a dataframe td.type list, a list - ''' - if dtype == 'dframe':ind = td.index - else:ind = range(len(td)) - #tf = np.array([ time.mktime(td[i].timetuple()) for i in range(len(td)) ]) - tf = np.array([ time.mktime(td[i].timetuple()) for i in ind]) + """ + if dtype == "dframe": + ind = td.index + else: + ind = range(len(td)) + # tf = np.array([ time.mktime(td[i].timetuple()) for i in range(len(td)) ]) + tf = np.array([time.mktime(td[i].timetuple()) for i in ind]) return tf +def get_averaged_data_from_multi_res( + multi_res, keystr="g2", different_length=True, verbose=False, cal_errorbar=False +): + """Y.G. Dec 22, 2016 + get average data from multi-run analysis result + Parameters: + multi_res: dict, generated by function run_xpcs_xsvs_single + each key is a uid, inside each uid are also dict with key as 'g2','g4' et.al. + keystr: string, get the averaged keystr + different_length: if True, do careful average for different length results + return: + array, averaged results -def get_averaged_data_from_multi_res( multi_res, keystr='g2', different_length= True, verbose=False, - cal_errorbar=False): - '''Y.G. Dec 22, 2016 - get average data from multi-run analysis result - Parameters: - multi_res: dict, generated by function run_xpcs_xsvs_single - each key is a uid, inside each uid are also dict with key as 'g2','g4' et.al. - keystr: string, get the averaged keystr - different_length: if True, do careful average for different length results - return: - array, averaged results - - ''' + """ maxM = 0 mkeys = multi_res.keys() if not different_length: - n=0 - for i, key in enumerate( list( mkeys) ): + n = 0 + for i, key in enumerate(list(mkeys)): keystri = multi_res[key][keystr] - if i ==0: + if i == 0: keystr_average = keystri else: keystr_average += keystri - n +=1 - keystr_average /=n + n += 1 + keystr_average /= n else: length_dict = {} - D= 1 - for i, key in enumerate( list( mkeys) ): + D = 1 + for i, key in enumerate(list(mkeys)): if verbose: - print(i,key) + print(i, key) shapes = multi_res[key][keystr].shape - M=shapes[0] - if i ==0: - if len(shapes)==2: - D=2 + M = shapes[0] + if i == 0: + if len(shapes) == 2: + D = 2 maxN = shapes[1] - elif len(shapes)==3: - D=3 - maxN = shapes[2] #in case of two-time correlation + elif len(shapes) == 3: + D = 3 + maxN = shapes[2] # in case of two-time correlation if (M) not in length_dict: - length_dict[(M) ] =1 + length_dict[(M)] = 1 else: - length_dict[(M) ] += 1 - maxM = max( maxM, M ) - #print( length_dict ) + length_dict[(M)] += 1 + maxM = max(maxM, M) + # print( length_dict ) avg_count = {} - sk = np.array( sorted(length_dict) ) - for i, k in enumerate( sk ): - avg_count[k] = np.sum( np.array( [ length_dict[k] for k in sk[i:] ] ) ) - #print(length_dict, avg_count) - if D==2: - #print('here') - keystr_average = np.zeros( [maxM, maxN] ) - elif D==3: - keystr_average = np.zeros( [maxM, maxM, maxN ] ) + sk = np.array(sorted(length_dict)) + for i, k in enumerate(sk): + avg_count[k] = np.sum(np.array([length_dict[k] for k in sk[i:]])) + # print(length_dict, avg_count) + if D == 2: + # print('here') + keystr_average = np.zeros([maxM, maxN]) + elif D == 3: + keystr_average = np.zeros([maxM, maxM, maxN]) else: - keystr_average = np.zeros( [maxM] ) - for i, key in enumerate( list( mkeys) ): + keystr_average = np.zeros([maxM]) + for i, key in enumerate(list(mkeys)): keystri = multi_res[key][keystr] Mi = keystri.shape[0] - if D!=3: - keystr_average[:Mi] += keystri + if D != 3: + keystr_average[:Mi] += keystri else: - keystr_average[:Mi,:Mi,:] += keystri - if D!=3: - keystr_average[:sk[0]] /= avg_count[sk[0]] + keystr_average[:Mi, :Mi, :] += keystri + if D != 3: + keystr_average[: sk[0]] /= avg_count[sk[0]] else: - keystr_average[:sk[0],:sk[0], : ] /= avg_count[sk[0]] - for i in range( 0, len(sk)-1 ): - if D!=3: - keystr_average[sk[i]:sk[i+1]] /= avg_count[sk[i+1]] + keystr_average[: sk[0], : sk[0], :] /= avg_count[sk[0]] + for i in range(0, len(sk) - 1): + if D != 3: + keystr_average[sk[i] : sk[i + 1]] /= avg_count[sk[i + 1]] else: - keystr_average[sk[i]:sk[i+1],sk[i]:sk[i+1],:] /= avg_count[sk[i+1]] + keystr_average[sk[i] : sk[i + 1], sk[i] : sk[i + 1], :] /= avg_count[sk[i + 1]] return keystr_average -def save_g2_general( g2, taus, qr=None, qz=None, uid='uid', path=None, return_res= False ): +def save_g2_general(g2, taus, qr=None, qz=None, uid="uid", path=None, return_res=False): + """Y.G. Dec 29, 2016 - '''Y.G. Dec 29, 2016 + save g2 results, + res_pargs should contain + g2: one-time correlation function + taus, lags of g2 + qr: the qr center, same length as g2 + qz: the qz or angle center, same length as g2 + path: + uid: - save g2 results, - res_pargs should contain - g2: one-time correlation function - taus, lags of g2 - qr: the qr center, same length as g2 - qz: the qz or angle center, same length as g2 - path: - uid: - - ''' + """ - df = DataFrame( np.hstack( [ (taus).reshape( len(g2),1) , g2] ) ) - t,qs = g2.shape + df = DataFrame(np.hstack([(taus).reshape(len(g2), 1), g2])) + t, qs = g2.shape if qr is None: - qr = range( qs ) + qr = range(qs) if qz is None: - df.columns = ( ['tau'] + [str(qr_) for qr_ in qr ] ) + df.columns = ["tau"] + [str(qr_) for qr_ in qr] else: - df.columns = ( ['tau'] + [ str(qr_) +'_'+ str(qz_) for (qr_,qz_) in zip(qr,qz) ] ) + df.columns = ["tau"] + [str(qr_) + "_" + str(qz_) for (qr_, qz_) in zip(qr, qz)] - #dt =datetime.now() - #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - #if filename is None: + # if filename is None: filename = uid - #filename = 'uid=%s--g2.csv' % (uid) - #filename += '-uid=%s-%s.csv' % (uid,CurTime) - #filename += '-uid=%s.csv' % (uid) + # filename = 'uid=%s--g2.csv' % (uid) + # filename += '-uid=%s-%s.csv' % (uid,CurTime) + # filename += '-uid=%s.csv' % (uid) filename1 = os.path.join(path, filename) df.to_csv(filename1) - print( 'The correlation function is saved in %s with filename as %s'%( path, filename)) + print("The correlation function is saved in %s with filename as %s" % (path, filename)) if return_res: return df ########### -#*for g2 fit and plot +# *for g2 fit and plot + def stretched_auto_corr_scat_factor(x, beta, relaxation_rate, alpha=1.0, baseline=1): - return beta * np.exp(-2 * (relaxation_rate * x)**alpha ) + baseline + return beta * np.exp(-2 * (relaxation_rate * x) ** alpha) + baseline + -def simple_exponential(x, beta, relaxation_rate, baseline=1): - '''relation_rate: unit 1/s ''' +def simple_exponential(x, beta, relaxation_rate, baseline=1): + """relation_rate: unit 1/s""" return beta * np.exp(-2 * relaxation_rate * x) + baseline -def simple_exponential_with_vibration(x, beta, relaxation_rate, freq, amp, baseline=1): - return beta * (1 + amp*np.cos( 2*np.pi*freq* x) )* np.exp(-2 * relaxation_rate * x) + baseline +def simple_exponential_with_vibration(x, beta, relaxation_rate, freq, amp, baseline=1): + return beta * (1 + amp * np.cos(2 * np.pi * freq * x)) * np.exp(-2 * relaxation_rate * x) + baseline -def stretched_auto_corr_scat_factor_with_vibration(x, beta, relaxation_rate, alpha, freq, amp, baseline=1): - return beta * (1 + amp*np.cos( 2*np.pi*freq* x) )* np.exp(-2 * (relaxation_rate * x)**alpha ) + baseline +def stretched_auto_corr_scat_factor_with_vibration(x, beta, relaxation_rate, alpha, freq, amp, baseline=1): + return beta * (1 + amp * np.cos(2 * np.pi * freq * x)) * np.exp(-2 * (relaxation_rate * x) ** alpha) + baseline -def flow_para_function_with_vibration( x, beta, relaxation_rate, flow_velocity, freq, amp, baseline=1): - vibration_part = (1 + amp*np.cos( 2*np.pi*freq* x) ) - Diff_part= np.exp(-2 * relaxation_rate * x) - Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 - return beta* vibration_part* Diff_part * Flow_part + baseline -def flow_para_function( x, beta, relaxation_rate, flow_velocity, baseline=1): - '''flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )''' +def flow_para_function_with_vibration(x, beta, relaxation_rate, flow_velocity, freq, amp, baseline=1): + vibration_part = 1 + amp * np.cos(2 * np.pi * freq * x) + Diff_part = np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 + return beta * vibration_part * Diff_part * Flow_part + baseline - Diff_part= np.exp(-2 * relaxation_rate * x) - Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 - return beta*Diff_part * Flow_part + baseline +def flow_para_function(x, beta, relaxation_rate, flow_velocity, baseline=1): + """flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )""" -def flow_para_function_explicitq( x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0 ): - '''Nov 9, 2017 Basically, make q vector to (qr, angle), + Diff_part = np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 + return beta * Diff_part * Flow_part + baseline + + +def flow_para_function_explicitq(x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0): + """Nov 9, 2017 Basically, make q vector to (qr, angle), ###relaxation_rate is actually a diffusion rate flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) Diffusion part: np.exp( -2*D q^2 *tau ) q_ang: would be np.radians( ang - 90 ) - ''' + """ - Diff_part= np.exp(-2 * ( diffusion* qr**2 * x)**alpha ) - if flow_velocity !=0: - if np.cos( q_ang ) >= 1e-8: - Flow_part = np.pi**2/(16*x*flow_velocity*qr* abs(np.cos(q_ang)) ) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity * qr* abs(np.cos(q_ang)) ) ) )**2 + Diff_part = np.exp(-2 * (diffusion * qr**2 * x) ** alpha) + if flow_velocity != 0: + if np.cos(q_ang) >= 1e-8: + Flow_part = ( + np.pi**2 + / (16 * x * flow_velocity * qr * abs(np.cos(q_ang))) + * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity * qr * abs(np.cos(q_ang))))) ** 2 + ) else: Flow_part = 1 else: Flow_part = 1 - return beta*Diff_part * Flow_part + baseline + return beta * Diff_part * Flow_part + baseline +def get_flow_velocity(average_velocity, shape_factor): -def get_flow_velocity( average_velocity, shape_factor): + return average_velocity * (1 - shape_factor) / (1 + shape_factor) - return average_velocity * (1- shape_factor)/(1+ shape_factor) -def stretched_flow_para_function( x, beta, relaxation_rate, alpha, flow_velocity, baseline=1): - ''' +def stretched_flow_para_function(x, beta, relaxation_rate, alpha, flow_velocity, baseline=1): + """ flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) - ''' - Diff_part= np.exp(-2 * (relaxation_rate * x)**alpha ) - Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 - return beta*Diff_part * Flow_part + baseline + """ + Diff_part = np.exp(-2 * (relaxation_rate * x) ** alpha) + Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 + return beta * Diff_part * Flow_part + baseline -def get_g2_fit_general_two_steps( g2, taus, function='simple_exponential', - second_fit_range=[0,20], - sequential_fit=False, *argv,**kwargs): - ''' +def get_g2_fit_general_two_steps( + g2, taus, function="simple_exponential", second_fit_range=[0, 20], sequential_fit=False, *argv, **kwargs +): + """ Fit g2 in two steps, i) Using the "function" to fit whole g2 to get baseline and beta (contrast) ii) Then using the obtained baseline and beta to fit g2 in a "second_fit_range" by using simple_exponential function - ''' - g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, function, sequential_fit, *argv,**kwargs) + """ + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general(g2, taus, function, sequential_fit, *argv, **kwargs) guess_values = {} - for k in list (g2_fit_result[0].params.keys()): - guess_values[k] = np.array( [ g2_fit_result[i].params[k].value - for i in range( g2.shape[1] ) ]) + for k in list(g2_fit_result[0].params.keys()): + guess_values[k] = np.array([g2_fit_result[i].params[k].value for i in range(g2.shape[1])]) - if 'guess_limits' in kwargs: - guess_limits = kwargs['guess_limits'] + if "guess_limits" in kwargs: + guess_limits = kwargs["guess_limits"] else: - guess_limits = dict( baseline =[1, 1.8], alpha=[0, 2], - beta = [0., 1], relaxation_rate= [0.001, 10000]) - - g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, function ='simple_exponential', - sequential_fit= sequential_fit, fit_range=second_fit_range, - fit_variables={'baseline':False, 'beta': False, 'alpha':False,'relaxation_rate':True}, - guess_values= guess_values, guess_limits = guess_limits ) + guess_limits = dict(baseline=[1, 1.8], alpha=[0, 2], beta=[0.0, 1], relaxation_rate=[0.001, 10000]) + + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( + g2, + taus, + function="simple_exponential", + sequential_fit=sequential_fit, + fit_range=second_fit_range, + fit_variables={"baseline": False, "beta": False, "alpha": False, "relaxation_rate": True}, + guess_values=guess_values, + guess_limits=guess_limits, + ) return g2_fit_result, taus_fit, g2_fit -def get_g2_fit_general( g2, taus, function='simple_exponential', - sequential_fit=False, qval_dict = None, - ang_init = 90, *argv,**kwargs): - ''' +def get_g2_fit_general( + g2, taus, function="simple_exponential", sequential_fit=False, qval_dict=None, ang_init=90, *argv, **kwargs +): + """ Nov 9, 2017, give qval_dict for using function of flow_para_function_explicitq qval_dict: a dict with qr and ang (in unit of degrees).") @@ -4827,275 +5249,308 @@ def get_g2_fit_general( g2, taus, function='simple_exponential', g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_g2_fit_paras.csv', path=data_dir ) - ''' + """ - if 'fit_range' in kwargs.keys(): - fit_range = kwargs['fit_range'] + if "fit_range" in kwargs.keys(): + fit_range = kwargs["fit_range"] else: - fit_range=None - + fit_range = None num_rings = g2.shape[1] - if 'fit_variables' in kwargs: - additional_var = kwargs['fit_variables'] - _vars =[ k for k in list( additional_var.keys()) if additional_var[k] == False] + if "fit_variables" in kwargs: + additional_var = kwargs["fit_variables"] + _vars = [k for k in list(additional_var.keys()) if additional_var[k] == False] else: _vars = [] - if function=='simple_exponential' or function=='simple': - _vars = np.unique ( _vars + ['alpha']) - mod = Model(stretched_auto_corr_scat_factor)#, independent_vars= list( _vars) ) - elif function=='stretched_exponential' or function=='stretched': - mod = Model(stretched_auto_corr_scat_factor)#, independent_vars= _vars) - elif function=='stretched_vibration': - mod = Model(stretched_auto_corr_scat_factor_with_vibration)#, independent_vars= _vars) - elif function=='flow_para_function' or function=='flow_para': - mod = Model(flow_para_function)#, independent_vars= _vars) - elif function=='flow_para_function_explicitq' or function=='flow_para_qang': - mod = Model(flow_para_function_explicitq)#, independent_vars= _vars) - elif function=='flow_para_function_with_vibration' or function=='flow_vibration': - mod = Model( flow_para_function_with_vibration ) + if function == "simple_exponential" or function == "simple": + _vars = np.unique(_vars + ["alpha"]) + mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= list( _vars) ) + elif function == "stretched_exponential" or function == "stretched": + mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= _vars) + elif function == "stretched_vibration": + mod = Model(stretched_auto_corr_scat_factor_with_vibration) # , independent_vars= _vars) + elif function == "flow_para_function" or function == "flow_para": + mod = Model(flow_para_function) # , independent_vars= _vars) + elif function == "flow_para_function_explicitq" or function == "flow_para_qang": + mod = Model(flow_para_function_explicitq) # , independent_vars= _vars) + elif function == "flow_para_function_with_vibration" or function == "flow_vibration": + mod = Model(flow_para_function_with_vibration) else: - print ("The %s is not supported.The supported functions include simple_exponential and stretched_exponential"%function) - - mod.set_param_hint( 'baseline', min=0.5, max= 2.5 ) - mod.set_param_hint( 'beta', min=0.0, max=1.0 ) - mod.set_param_hint( 'alpha', min=0.0 ) - mod.set_param_hint( 'relaxation_rate', min=0.0, max= 1000 ) - mod.set_param_hint( 'flow_velocity', min=0) - mod.set_param_hint( 'diffusion', min=0.0, max= 2e8 ) - - if 'guess_limits' in kwargs: - guess_limits = kwargs['guess_limits'] - for k in list( guess_limits.keys() ): - mod.set_param_hint( k, min= guess_limits[k][0], max= guess_limits[k][1] ) - - if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration': - mod.set_param_hint( 'flow_velocity', min=0) - if function=='flow_para_function_explicitq' or function=='flow_para_qang': - mod.set_param_hint( 'flow_velocity', min=0) - mod.set_param_hint( 'diffusion', min=0.0, max= 2e8 ) - if function=='stretched_vibration' or function=='flow_vibration': - mod.set_param_hint( 'freq', min=0) - mod.set_param_hint( 'amp', min=0) - - _guess_val = dict( beta=.1, alpha=1.0, relaxation_rate =0.005, baseline=1.0) - if 'guess_values' in kwargs: - guess_values = kwargs['guess_values'] - _guess_val.update( guess_values ) - - _beta=_guess_val['beta'] - _alpha=_guess_val['alpha'] - _relaxation_rate = _guess_val['relaxation_rate'] - _baseline= _guess_val['baseline'] - if isinstance( _beta, (np.ndarray, list) ): - _beta_=_beta[0] + print( + "The %s is not supported.The supported functions include simple_exponential and stretched_exponential" + % function + ) + + mod.set_param_hint("baseline", min=0.5, max=2.5) + mod.set_param_hint("beta", min=0.0, max=1.0) + mod.set_param_hint("alpha", min=0.0) + mod.set_param_hint("relaxation_rate", min=0.0, max=1000) + mod.set_param_hint("flow_velocity", min=0) + mod.set_param_hint("diffusion", min=0.0, max=2e8) + + if "guess_limits" in kwargs: + guess_limits = kwargs["guess_limits"] + for k in list(guess_limits.keys()): + mod.set_param_hint(k, min=guess_limits[k][0], max=guess_limits[k][1]) + + if function == "flow_para_function" or function == "flow_para" or function == "flow_vibration": + mod.set_param_hint("flow_velocity", min=0) + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + mod.set_param_hint("flow_velocity", min=0) + mod.set_param_hint("diffusion", min=0.0, max=2e8) + if function == "stretched_vibration" or function == "flow_vibration": + mod.set_param_hint("freq", min=0) + mod.set_param_hint("amp", min=0) + + _guess_val = dict(beta=0.1, alpha=1.0, relaxation_rate=0.005, baseline=1.0) + if "guess_values" in kwargs: + guess_values = kwargs["guess_values"] + _guess_val.update(guess_values) + + _beta = _guess_val["beta"] + _alpha = _guess_val["alpha"] + _relaxation_rate = _guess_val["relaxation_rate"] + _baseline = _guess_val["baseline"] + if isinstance(_beta, (np.ndarray, list)): + _beta_ = _beta[0] else: - _beta_=_beta - if isinstance( _baseline, (np.ndarray, list) ): + _beta_ = _beta + if isinstance(_baseline, (np.ndarray, list)): _baseline_ = _baseline[0] else: _baseline_ = _baseline - if isinstance( _relaxation_rate, (np.ndarray, list) ): - _relaxation_rate_= _relaxation_rate[0] + if isinstance(_relaxation_rate, (np.ndarray, list)): + _relaxation_rate_ = _relaxation_rate[0] else: - _relaxation_rate_= _relaxation_rate - if isinstance( _alpha, (np.ndarray, list) ): + _relaxation_rate_ = _relaxation_rate + if isinstance(_alpha, (np.ndarray, list)): _alpha_ = _alpha[0] else: _alpha_ = _alpha - pars = mod.make_params( beta=_beta_, alpha=_alpha_, - relaxation_rate =_relaxation_rate_, baseline= _baseline_) + pars = mod.make_params(beta=_beta_, alpha=_alpha_, relaxation_rate=_relaxation_rate_, baseline=_baseline_) - if function=='flow_para_function' or function=='flow_para': - _flow_velocity =_guess_val['flow_velocity'] - if isinstance( _flow_velocity, (np.ndarray, list) ): + if function == "flow_para_function" or function == "flow_para": + _flow_velocity = _guess_val["flow_velocity"] + if isinstance(_flow_velocity, (np.ndarray, list)): _flow_velocity_ = _flow_velocity[0] else: _flow_velocity_ = _flow_velocity - pars = mod.make_params( beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_, - relaxation_rate =_relaxation_rate_, baseline= _baseline_) - - if function=='flow_para_function_explicitq' or function=='flow_para_qang': - _flow_velocity =_guess_val['flow_velocity'] - _diffusion =_guess_val['diffusion'] - _guess_val['qr'] = 1 - _guess_val['q_ang'] = 0 - if isinstance( _flow_velocity, (np.ndarray, list) ): + pars = mod.make_params( + beta=_beta_, + alpha=_alpha_, + flow_velocity=_flow_velocity_, + relaxation_rate=_relaxation_rate_, + baseline=_baseline_, + ) + + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + _flow_velocity = _guess_val["flow_velocity"] + _diffusion = _guess_val["diffusion"] + _guess_val["qr"] = 1 + _guess_val["q_ang"] = 0 + if isinstance(_flow_velocity, (np.ndarray, list)): _flow_velocity_ = _flow_velocity[0] else: _flow_velocity_ = _flow_velocity - if isinstance( _diffusion, (np.ndarray, list) ): + if isinstance(_diffusion, (np.ndarray, list)): _diffusion_ = _diffusion[0] else: _diffusion_ = _diffusion - pars = mod.make_params( beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_, - diffusion =_diffusion_, baseline= _baseline_, - qr=1, q_ang=0 - ) - - if function=='stretched_vibration': - _freq =_guess_val['freq'] - _amp = _guess_val['amp'] - pars = mod.make_params( beta=_beta, alpha=_alpha, freq=_freq, amp = _amp, - relaxation_rate =_relaxation_rate, baseline= _baseline) - - if function=='flow_vibration': - _flow_velocity =_guess_val['flow_velocity'] - _freq =_guess_val['freq'] - _amp = _guess_val['amp'] - pars = mod.make_params( beta=_beta, freq=_freq, amp = _amp,flow_velocity=_flow_velocity, - relaxation_rate =_relaxation_rate, baseline= _baseline) + pars = mod.make_params( + beta=_beta_, + alpha=_alpha_, + flow_velocity=_flow_velocity_, + diffusion=_diffusion_, + baseline=_baseline_, + qr=1, + q_ang=0, + ) + + if function == "stretched_vibration": + _freq = _guess_val["freq"] + _amp = _guess_val["amp"] + pars = mod.make_params( + beta=_beta, alpha=_alpha, freq=_freq, amp=_amp, relaxation_rate=_relaxation_rate, baseline=_baseline + ) + + if function == "flow_vibration": + _flow_velocity = _guess_val["flow_velocity"] + _freq = _guess_val["freq"] + _amp = _guess_val["amp"] + pars = mod.make_params( + beta=_beta, + freq=_freq, + amp=_amp, + flow_velocity=_flow_velocity, + relaxation_rate=_relaxation_rate, + baseline=_baseline, + ) for v in _vars: - pars['%s'%v].vary = False - #print( pars ) + pars["%s" % v].vary = False + # print( pars ) fit_res = [] model_data = [] for i in range(num_rings): if fit_range != None: - y_=g2[1:, i][fit_range[0]:fit_range[1]] - lags_=taus[1:][fit_range[0]:fit_range[1]] + y_ = g2[1:, i][fit_range[0] : fit_range[1]] + lags_ = taus[1:][fit_range[0] : fit_range[1]] else: - y_=g2[1:, i] - lags_=taus[1:] + y_ = g2[1:, i] + lags_ = taus[1:] mm = ~np.isnan(y_) - y = y_[mm] + y = y_[mm] lags = lags_[mm] - #print( i, mm.shape, y.shape, y_.shape, lags.shape, lags_.shape ) - #y=y_ - #lags=lags_ - #print( _relaxation_rate ) + # print( i, mm.shape, y.shape, y_.shape, lags.shape, lags_.shape ) + # y=y_ + # lags=lags_ + # print( _relaxation_rate ) for k in list(pars.keys()): - #print(k, _guess_val[k] ) + # print(k, _guess_val[k] ) try: - if isinstance( _guess_val[k], (np.ndarray, list) ): + if isinstance(_guess_val[k], (np.ndarray, list)): pars[k].value = _guess_val[k][i] except: pass if True: - if isinstance( _beta, (np.ndarray, list) ): - #pars['beta'].value = _guess_val['beta'][i] - _beta_ = _guess_val['beta'][i] - if isinstance( _baseline, (np.ndarray, list) ): - #pars['baseline'].value = _guess_val['baseline'][i] - _baseline_ = _guess_val['baseline'][i] - if isinstance( _relaxation_rate, (np.ndarray, list) ): - #pars['relaxation_rate'].value = _guess_val['relaxation_rate'][i] - _relaxation_rate_ = _guess_val['relaxation_rate'][i] - if isinstance( _alpha, (np.ndarray, list) ): - #pars['alpha'].value = _guess_val['alpha'][i] - _alpha_ = _guess_val['alpha'][i] - #for k in list(pars.keys()): - #print(k, _guess_val[k] ) + if isinstance(_beta, (np.ndarray, list)): + # pars['beta'].value = _guess_val['beta'][i] + _beta_ = _guess_val["beta"][i] + if isinstance(_baseline, (np.ndarray, list)): + # pars['baseline'].value = _guess_val['baseline'][i] + _baseline_ = _guess_val["baseline"][i] + if isinstance(_relaxation_rate, (np.ndarray, list)): + # pars['relaxation_rate'].value = _guess_val['relaxation_rate'][i] + _relaxation_rate_ = _guess_val["relaxation_rate"][i] + if isinstance(_alpha, (np.ndarray, list)): + # pars['alpha'].value = _guess_val['alpha'][i] + _alpha_ = _guess_val["alpha"][i] + # for k in list(pars.keys()): + # print(k, _guess_val[k] ) # pars[k].value = _guess_val[k][i] - if function=='flow_para_function_explicitq' or function=='flow_para_qang': + if function == "flow_para_function_explicitq" or function == "flow_para_qang": if qval_dict == None: print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") else: - pars = mod.make_params( - beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_, - diffusion =_diffusion_, baseline= _baseline_, - qr = qval_dict[i][0], q_ang = abs(np.radians( qval_dict[i][1] - ang_init) ) ) - - - pars['qr'].vary = False - pars['q_ang'].vary = False + pars = mod.make_params( + beta=_beta_, + alpha=_alpha_, + flow_velocity=_flow_velocity_, + diffusion=_diffusion_, + baseline=_baseline_, + qr=qval_dict[i][0], + q_ang=abs(np.radians(qval_dict[i][1] - ang_init)), + ) + + pars["qr"].vary = False + pars["q_ang"].vary = False for v in _vars: - pars['%s'%v].vary = False + pars["%s" % v].vary = False - #if i==20: + # if i==20: # print(pars) - #print( pars ) - result1 = mod.fit(y, pars, x =lags ) - #print(qval_dict[i][0], qval_dict[i][1], y) + # print( pars ) + result1 = mod.fit(y, pars, x=lags) + # print(qval_dict[i][0], qval_dict[i][1], y) if sequential_fit: for k in list(pars.keys()): - #print( pars ) + # print( pars ) if k in list(result1.best_values.keys()): pars[k].value = result1.best_values[k] - fit_res.append( result1) - #model_data.append( result1.best_fit ) - yf=result1.model.eval(params=result1.params, x= lags_ ) - model_data.append( yf ) - return fit_res, lags_, np.array( model_data ).T - - - - -def get_short_long_labels_from_qval_dict(qval_dict, geometry='saxs'): - '''Y.G. 2016, Dec 26 - Get short/long labels from a qval_dict - Parameters - ---------- - qval_dict, dict, with key as roi number, - format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs - format as {1: [qr1], 2: [qr2] ...} for saxs - format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs - geometry: - 'saxs': a saxs with Qr partition - 'ang_saxs': a saxs with Qr and angular partition - 'gi_saxs': gisaxs with Qz, Qr - ''' - - Nqs = len( qval_dict.keys()) - len_qrz = len( list( qval_dict.values() )[0] ) - #qr_label = sorted( np.array( list( qval_dict.values() ) )[:,0] ) - qr_label = np.array( list( qval_dict.values() ) )[:,0] - if geometry=='gi_saxs' or geometry=='ang_saxs':# or geometry=='gi_waxs': + fit_res.append(result1) + # model_data.append( result1.best_fit ) + yf = result1.model.eval(params=result1.params, x=lags_) + model_data.append(yf) + return fit_res, lags_, np.array(model_data).T + + +def get_short_long_labels_from_qval_dict(qval_dict, geometry="saxs"): + """Y.G. 2016, Dec 26 + Get short/long labels from a qval_dict + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + geometry: + 'saxs': a saxs with Qr partition + 'ang_saxs': a saxs with Qr and angular partition + 'gi_saxs': gisaxs with Qz, Qr + """ + + Nqs = len(qval_dict.keys()) + len_qrz = len(list(qval_dict.values())[0]) + # qr_label = sorted( np.array( list( qval_dict.values() ) )[:,0] ) + qr_label = np.array(list(qval_dict.values()))[:, 0] + if geometry == "gi_saxs" or geometry == "ang_saxs": # or geometry=='gi_waxs': if len_qrz < 2: - print( "please give qz or qang for the q-label") + print("please give qz or qang for the q-label") else: - #qz_label = sorted( np.array( list( qval_dict.values() ) )[:,1] ) - qz_label = np.array( list( qval_dict.values() ) )[:,1] + # qz_label = sorted( np.array( list( qval_dict.values() ) )[:,1] ) + qz_label = np.array(list(qval_dict.values()))[:, 1] else: - qz_label = np.array( [0] ) + qz_label = np.array([0]) - uqz_label = np.unique( qz_label ) - num_qz = len( uqz_label) + uqz_label = np.unique(qz_label) + num_qz = len(uqz_label) - uqr_label = np.unique( qr_label ) - num_qr = len( uqr_label) + uqr_label = np.unique(qr_label) + num_qr = len(uqr_label) - #print( uqr_label, uqz_label ) - if len( uqr_label ) >= len( uqz_label ): - master_plot= 'qz' #one qz for many sub plots of each qr + # print( uqr_label, uqz_label ) + if len(uqr_label) >= len(uqz_label): + master_plot = "qz" # one qz for many sub plots of each qr else: - master_plot= 'qr' + master_plot = "qr" - mastp= master_plot - if geometry == 'ang_saxs': - mastp= 'ang' + mastp = master_plot + if geometry == "ang_saxs": + mastp = "ang" num_short = min(num_qz, num_qr) - num_long = max(num_qz, num_qr) + num_long = max(num_qz, num_qr) - #print( mastp, num_short, num_long) + # print( mastp, num_short, num_long) if num_qz != num_qr: - short_label = [qz_label,qr_label][ np.argmin( [num_qz, num_qr] ) ] - long_label = [qz_label,qr_label][ np.argmax( [num_qz, num_qr] ) ] - short_ulabel = [uqz_label,uqr_label][ np.argmin( [num_qz, num_qr] ) ] - long_ulabel = [uqz_label,uqr_label][ np.argmax( [num_qz, num_qr] ) ] + short_label = [qz_label, qr_label][np.argmin([num_qz, num_qr])] + long_label = [qz_label, qr_label][np.argmax([num_qz, num_qr])] + short_ulabel = [uqz_label, uqr_label][np.argmin([num_qz, num_qr])] + long_ulabel = [uqz_label, uqr_label][np.argmax([num_qz, num_qr])] else: short_label = qz_label - long_label = qr_label + long_label = qr_label short_ulabel = uqz_label - long_ulabel = uqr_label - #print( long_ulabel ) - #print( qz_label,qr_label ) - #print( short_label, long_label ) + long_ulabel = uqr_label + # print( long_ulabel ) + # print( qz_label,qr_label ) + # print( short_label, long_label ) - if geometry == 'saxs' or geometry == 'gi_waxs': - ind_long = [ range( num_long ) ] + if geometry == "saxs" or geometry == "gi_waxs": + ind_long = [range(num_long)] else: - ind_long = [ np.where( short_label == i)[0] for i in short_ulabel ] - - - if Nqs == 1: - long_ulabel = list( qval_dict.values() )[0] - long_label = list( qval_dict.values() )[0] - return qr_label, qz_label, num_qz, num_qr, num_short,num_long, short_label, long_label,short_ulabel,long_ulabel, ind_long, master_plot, mastp + ind_long = [np.where(short_label == i)[0] for i in short_ulabel] + + if Nqs == 1: + long_ulabel = list(qval_dict.values())[0] + long_label = list(qval_dict.values())[0] + return ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) ############################################ @@ -5103,17 +5558,32 @@ def get_short_long_labels_from_qval_dict(qval_dict, geometry='saxs'): ############################################ - - -def plot_g2_general( g2_dict, taus_dict, qval_dict, g2_err_dict = None, - fit_res=None, geometry='saxs',filename='g2', - path=None, function='simple_exponential', g2_labels=None, - fig_ysize= 12, qth_interest = None, - ylabel='g2', return_fig=False, append_name='', outsize=(2000, 2400), - max_plotnum_fig=16, figsize=(10, 12), show_average_ang_saxs=True, - qphi_analysis = False, fontsize_sublabel = 12, - *argv,**kwargs): - ''' +def plot_g2_general( + g2_dict, + taus_dict, + qval_dict, + g2_err_dict=None, + fit_res=None, + geometry="saxs", + filename="g2", + path=None, + function="simple_exponential", + g2_labels=None, + fig_ysize=12, + qth_interest=None, + ylabel="g2", + return_fig=False, + append_name="", + outsize=(2000, 2400), + max_plotnum_fig=16, + figsize=(10, 12), + show_average_ang_saxs=True, + qphi_analysis=False, + fontsize_sublabel=12, + *argv, + **kwargs, +): + """ Jan 10, 2018 add g2_err_dict option to plot g2 with error bar Oct31, 2017 add qth_interest option @@ -5156,370 +5626,387 @@ def plot_g2_general( g2_dict, taus_dict, qval_dict, g2_err_dict = None, ToDoList: plot an average g2 for ang_saxs for each q - ''' + """ - if ylabel=='g2': - ylabel='g_2' - if ylabel=='g4': - ylabel='g_4' + if ylabel == "g2": + ylabel = "g_2" + if ylabel == "g4": + ylabel = "g_4" - if geometry =='saxs': + if geometry == "saxs": if qphi_analysis: - geometry = 'ang_saxs' + geometry = "ang_saxs" if qth_interest != None: if not isinstance(qth_interest, list): - print('Please give a list for qth_interest') + print("Please give a list for qth_interest") else: - #g2_dict0, taus_dict0, qval_dict0, fit_res0= g2_dict, taus_dict, qval_dict, fit_res - qth_interest = np.array( qth_interest ) -1 + # g2_dict0, taus_dict0, qval_dict0, fit_res0= g2_dict, taus_dict, qval_dict, fit_res + qth_interest = np.array(qth_interest) - 1 g2_dict_ = {} - #taus_dict_ = {} + # taus_dict_ = {} for k in list(g2_dict.keys()): - g2_dict_[k] = g2_dict[k][:,[i for i in qth_interest]] - #for k in list(taus_dict.keys()): + g2_dict_[k] = g2_dict[k][:, [i for i in qth_interest]] + # for k in list(taus_dict.keys()): # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] taus_dict_ = taus_dict - qval_dict_ = {k:qval_dict[k] for k in qth_interest} + qval_dict_ = {k: qval_dict[k] for k in qth_interest} if fit_res != None: - fit_res_ = [ fit_res[k] for k in qth_interest ] + fit_res_ = [fit_res[k] for k in qth_interest] else: fit_res_ = None else: g2_dict_, taus_dict_, qval_dict_, fit_res_ = g2_dict, taus_dict, qval_dict, fit_res - (qr_label, qz_label, num_qz, num_qr, num_short, - num_long, short_label, long_label,short_ulabel, - long_ulabel,ind_long, master_plot, - mastp) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) fps = [] - #$print( num_short, num_long ) + # $print( num_short, num_long ) - for s_ind in range( num_short ): - ind_long_i = ind_long[ s_ind ] - num_long_i = len( ind_long_i ) - #if show_average_ang_saxs: + for s_ind in range(num_short): + ind_long_i = ind_long[s_ind] + num_long_i = len(ind_long_i) + # if show_average_ang_saxs: # if geometry=='ang_saxs': # num_long_i += 1 if RUN_GUI: fig = Figure(figsize=(10, 12)) else: - #fig = plt.figure( ) - if num_long_i <=4: - if master_plot != 'qz': + # fig = plt.figure( ) + if num_long_i <= 4: + if master_plot != "qz": fig = plt.figure(figsize=(8, 6)) else: - if num_short>1: + if num_short > 1: fig = plt.figure(figsize=(8, 4)) else: fig = plt.figure(figsize=(10, 6)) - #print('Here') + # print('Here') elif num_long_i > max_plotnum_fig: - num_fig = int(np.ceil(num_long_i/max_plotnum_fig)) #num_long_i //16 - fig = [ plt.figure(figsize=figsize) for i in range(num_fig) ] - #print( figsize ) + num_fig = int(np.ceil(num_long_i / max_plotnum_fig)) # num_long_i //16 + fig = [plt.figure(figsize=figsize) for i in range(num_fig)] + # print( figsize ) else: - #print('Here') - if master_plot != 'qz': + # print('Here') + if master_plot != "qz": fig = plt.figure(figsize=figsize) else: fig = plt.figure(figsize=(10, 10)) - if master_plot == 'qz': - if geometry=='ang_saxs': - title_short = 'Angle= %.2f'%( short_ulabel[s_ind] ) + r'$^\circ$' - elif geometry=='gi_saxs': - title_short = r'$Q_z= $' + '%.4f'%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' + if master_plot == "qz": + if geometry == "ang_saxs": + title_short = "Angle= %.2f" % (short_ulabel[s_ind]) + r"$^\circ$" + elif geometry == "gi_saxs": + title_short = r"$Q_z= $" + "%.4f" % (short_ulabel[s_ind]) + r"$\AA^{-1}$" else: - title_short = '' - else: #qr - if geometry=='ang_saxs' or geometry=='gi_saxs': - title_short = r'$Q_r= $' + '%.5f '%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' + title_short = "" + else: # qr + if geometry == "ang_saxs" or geometry == "gi_saxs": + title_short = r"$Q_r= $" + "%.5f " % (short_ulabel[s_ind]) + r"$\AA^{-1}$" else: - title_short='' - #print(geometry) - #filename ='' - til = '%s:--->%s'%(filename, title_short ) - if num_long_i <=4: - plt.title( til,fontsize= 14, y =1.15) - #plt.title( til,fontsize=20, y =1.06) - #print('here') + title_short = "" + # print(geometry) + # filename ='' + til = "%s:--->%s" % (filename, title_short) + if num_long_i <= 4: + plt.title(til, fontsize=14, y=1.15) + # plt.title( til,fontsize=20, y =1.06) + # print('here') else: - plt.title( til,fontsize=20, y =1.06) - #print( num_long ) - if num_long!=1: - #print( 'here') - plt.axis('off') - #sy = min(num_long_i,4) - sy = min(num_long_i, int( np.ceil( min(max_plotnum_fig,num_long_i)/4)) ) - #fig.set_size_inches(10, 12) - #fig.set_size_inches(10, fig_ysize ) + plt.title(til, fontsize=20, y=1.06) + # print( num_long ) + if num_long != 1: + # print( 'here') + plt.axis("off") + # sy = min(num_long_i,4) + sy = min(num_long_i, int(np.ceil(min(max_plotnum_fig, num_long_i) / 4))) + # fig.set_size_inches(10, 12) + # fig.set_size_inches(10, fig_ysize ) else: - sy =1 - #fig.set_size_inches(8,6) - #plt.axis('off') - sx = min(4, int( np.ceil( min(max_plotnum_fig,num_long_i)/float(sy) ) )) + sy = 1 + # fig.set_size_inches(8,6) + # plt.axis('off') + sx = min(4, int(np.ceil(min(max_plotnum_fig, num_long_i) / float(sy)))) temp = sy sy = sx sx = temp - #print( num_long_i, sx, sy ) - #print( master_plot ) - #print(ind_long_i, len(ind_long_i) ) + # print( num_long_i, sx, sy ) + # print( master_plot ) + # print(ind_long_i, len(ind_long_i) ) - for i, l_ind in enumerate( ind_long_i ): + for i, l_ind in enumerate(ind_long_i): if num_long_i <= max_plotnum_fig: - #if s_ind ==2: + # if s_ind ==2: # print('Here') # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) - ax = fig.add_subplot(sx,sy, i + 1 ) - if sx==1: - if sy==1: - plt.axis('on') + ax = fig.add_subplot(sx, sy, i + 1) + if sx == 1: + if sy == 1: + plt.axis("on") else: - #fig_subnum = l_ind//max_plotnum_fig - #ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) - fig_subnum = i//max_plotnum_fig - #print( i, sx,sy, fig_subnum, max_plotnum_fig, i + 1 - fig_subnum*max_plotnum_fig ) - ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) - + # fig_subnum = l_ind//max_plotnum_fig + # ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) + fig_subnum = i // max_plotnum_fig + # print( i, sx,sy, fig_subnum, max_plotnum_fig, i + 1 - fig_subnum*max_plotnum_fig ) + ax = fig[fig_subnum].add_subplot(sx, sy, i + 1 - fig_subnum * max_plotnum_fig) - ax.set_ylabel( r"$%s$"%ylabel + '(' + r'$\tau$' + ')' ) + ax.set_ylabel(r"$%s$" % ylabel + "(" + r"$\tau$" + ")") ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) - if master_plot == 'qz' or master_plot == 'angle': - if geometry!='gi_waxs': - title_long = r'$Q_r= $'+'%.5f '%( long_label[l_ind] ) + r'$\AA^{-1}$' + if master_plot == "qz" or master_plot == "angle": + if geometry != "gi_waxs": + title_long = r"$Q_r= $" + "%.5f " % (long_label[l_ind]) + r"$\AA^{-1}$" else: - title_long = r'$Q_r= $'+'%i '%( long_label[l_ind] ) - #print( title_long,long_label,l_ind ) + title_long = r"$Q_r= $" + "%i " % (long_label[l_ind]) + # print( title_long,long_label,l_ind ) else: - if geometry=='ang_saxs': - #title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) + r'$^\circ$' + '( %d )'%(l_ind) - title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) #+ r'$^\circ$' + '( %d )'%(l_ind) - elif geometry=='gi_saxs': - title_long = r'$Q_z= $'+ '%.5f '%( long_label[l_ind] ) + r'$\AA^{-1}$' + if geometry == "ang_saxs": + # title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) + r'$^\circ$' + '( %d )'%(l_ind) + title_long = "Ang= " + "%.2f" % (long_label[l_ind]) # + r'$^\circ$' + '( %d )'%(l_ind) + elif geometry == "gi_saxs": + title_long = r"$Q_z= $" + "%.5f " % (long_label[l_ind]) + r"$\AA^{-1}$" else: - title_long = '' - #print( master_plot ) - if master_plot != 'qz': - ax.set_title(title_long + ' (%s )'%(1+l_ind), y =1.1, fontsize=12) + title_long = "" + # print( master_plot ) + if master_plot != "qz": + ax.set_title(title_long + " (%s )" % (1 + l_ind), y=1.1, fontsize=12) else: - ax.set_title(title_long + ' (%s )'%(1+l_ind), y =1.05, fontsize= fontsize_sublabel) - #print( geometry ) - #print( title_long ) - if qth_interest != None:#it might have a bug here, todolist!!! + ax.set_title(title_long + " (%s )" % (1 + l_ind), y=1.05, fontsize=fontsize_sublabel) + # print( geometry ) + # print( title_long ) + if qth_interest != None: # it might have a bug here, todolist!!! lab = sorted(list(qval_dict_.keys())) - #print( lab, l_ind) - ax.set_title(title_long + ' (%s )'%( lab[l_ind] +1), y =1.05, fontsize= 12) - for ki, k in enumerate( list(g2_dict_.keys()) ): - if ki==0: - c='b' + # print( lab, l_ind) + ax.set_title(title_long + " (%s )" % (lab[l_ind] + 1), y=1.05, fontsize=12) + for ki, k in enumerate(list(g2_dict_.keys())): + if ki == 0: + c = "b" if fit_res == None: - m='-o' + m = "-o" else: - m='o' - elif ki==1: - c='r' + m = "o" + elif ki == 1: + c = "r" if fit_res == None: - m='s' + m = "s" else: - m='-' - elif ki==2: - c='g' - m='-D' + m = "-" + elif ki == 2: + c = "g" + m = "-D" else: - c = colors[ki+2] - m= '-%s'%markers[ki+2] + c = colors[ki + 2] + m = "-%s" % markers[ki + 2] try: dumy = g2_dict_[k].shape - #print( 'here is the shape' ) + # print( 'here is the shape' ) islist = False except: - islist_n = len( g2_dict_[k] ) + islist_n = len(g2_dict_[k]) islist = True - #print( 'here is the list' ) + # print( 'here is the list' ) if islist: - for nlst in range( islist_n ): - m = '-%s'%markers[ nlst ] - #print(m) - y=g2_dict_[k][nlst][:, l_ind ] + for nlst in range(islist_n): + m = "-%s" % markers[nlst] + # print(m) + y = g2_dict_[k][nlst][:, l_ind] x = taus_dict_[k][nlst] - if ki==0: - ymin,ymax = min(y), max(y[1:]) + if ki == 0: + ymin, ymax = min(y), max(y[1:]) if g2_err_dict == None: if g2_labels == None: - ax.semilogx(x, y, m, color=c, markersize=6) + ax.semilogx(x, y, m, color=c, markersize=6) else: - #print('here ki ={} nlst = {}'.format( ki, nlst )) - if nlst==0: - ax.semilogx(x, y, m, color=c,markersize=6, label=g2_labels[ki]) + # print('here ki ={} nlst = {}'.format( ki, nlst )) + if nlst == 0: + ax.semilogx(x, y, m, color=c, markersize=6, label=g2_labels[ki]) else: - ax.semilogx(x, y, m, color=c,markersize=6) + ax.semilogx(x, y, m, color=c, markersize=6) else: - yerr= g2_err_dict[k][nlst][:, l_ind ] + yerr = g2_err_dict[k][nlst][:, l_ind] if g2_labels == None: - ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6) + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) else: - if nlst==0: - ax.errorbar(x, y, yerr=yerr, fmt=m, - color=c,markersize=6, label=g2_labels[ki]) + if nlst == 0: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6, label=g2_labels[ki]) else: - ax.errorbar(x, y, yerr=yerr, fmt=m, color=c,markersize=6) - ax.set_xscale("log", nonposx='clip') - if nlst==0: - if l_ind==0: - ax.legend(loc='best', fontsize = 8, fancybox=True, framealpha=0.5) + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) + ax.set_xscale("log", nonposx="clip") + if nlst == 0: + if l_ind == 0: + ax.legend(loc="best", fontsize=8, fancybox=True, framealpha=0.5) else: - y=g2_dict_[k][:, l_ind ] + y = g2_dict_[k][:, l_ind] x = taus_dict_[k] - if ki==0: - ymin,ymax = min(y), max(y[1:]) + if ki == 0: + ymin, ymax = min(y), max(y[1:]) if g2_err_dict == None: if g2_labels == None: - ax.semilogx(x, y, m, color=c, markersize=6) + ax.semilogx(x, y, m, color=c, markersize=6) else: - ax.semilogx(x, y, m, color=c,markersize=6, label=g2_labels[ki]) + ax.semilogx(x, y, m, color=c, markersize=6, label=g2_labels[ki]) else: - yerr= g2_err_dict[k][:, l_ind ] - #print(x.shape, y.shape, yerr.shape) - #print(yerr) + yerr = g2_err_dict[k][:, l_ind] + # print(x.shape, y.shape, yerr.shape) + # print(yerr) if g2_labels == None: - ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6) + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) else: - ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6,label=g2_labels[ki] ) - ax.set_xscale("log", nonposx='clip') - if l_ind==0: - ax.legend(loc='best', fontsize = 8, fancybox=True, framealpha=0.5) + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6, label=g2_labels[ki]) + ax.set_xscale("log", nonposx="clip") + if l_ind == 0: + ax.legend(loc="best", fontsize=8, fancybox=True, framealpha=0.5) if fit_res_ != None: result1 = fit_res_[l_ind] - #print (result1.best_values) - - beta = result1.best_values['beta'] - baseline = result1.best_values['baseline'] - if function=='simple_exponential' or function=='simple': - rate = result1.best_values['relaxation_rate'] - alpha =1.0 - elif function=='stretched_exponential' or function=='stretched': - rate = result1.best_values['relaxation_rate'] - alpha = result1.best_values['alpha'] - elif function=='stretched_vibration': - rate = result1.best_values['relaxation_rate'] - alpha = result1.best_values['alpha'] - freq = result1.best_values['freq'] - elif function=='flow_vibration': - rate = result1.best_values['relaxation_rate'] - freq = result1.best_values['freq'] - if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration': - rate = result1.best_values['relaxation_rate'] - flow = result1.best_values['flow_velocity'] - if function=='flow_para_function_explicitq' or function=='flow_para_qang': - diff = result1.best_values['diffusion'] + # print (result1.best_values) + + beta = result1.best_values["beta"] + baseline = result1.best_values["baseline"] + if function == "simple_exponential" or function == "simple": + rate = result1.best_values["relaxation_rate"] + alpha = 1.0 + elif function == "stretched_exponential" or function == "stretched": + rate = result1.best_values["relaxation_rate"] + alpha = result1.best_values["alpha"] + elif function == "stretched_vibration": + rate = result1.best_values["relaxation_rate"] + alpha = result1.best_values["alpha"] + freq = result1.best_values["freq"] + elif function == "flow_vibration": + rate = result1.best_values["relaxation_rate"] + freq = result1.best_values["freq"] + if function == "flow_para_function" or function == "flow_para" or function == "flow_vibration": + rate = result1.best_values["relaxation_rate"] + flow = result1.best_values["flow_velocity"] + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + diff = result1.best_values["diffusion"] qrr = short_ulabel[s_ind] - #print(qrr) + # print(qrr) rate = diff * qrr**2 - flow = result1.best_values['flow_velocity'] + flow = result1.best_values["flow_velocity"] if qval_dict_ == None: print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") else: pass - if rate!=0: - txts = r'$\tau_0$' + r'$ = %.3f$'%(1/rate) + r'$ s$' + if rate != 0: + txts = r"$\tau_0$" + r"$ = %.3f$" % (1 / rate) + r"$ s$" else: - txts = r'$\tau_0$' + r'$ = inf$' + r'$ s$' - x=0.25 - y0=0.9 + txts = r"$\tau_0$" + r"$ = inf$" + r"$ s$" + x = 0.25 + y0 = 0.9 fontsize = 12 - ax.text(x =x, y= y0, s=txts, fontsize=fontsize, transform=ax.transAxes) - #print(function) - dt=0 - if function!='flow_para_function' and function!='flow_para' and function!='flow_vibration' and function!='flow_para_qang': - txts = r'$\alpha$' + r'$ = %.3f$'%(alpha) - dt +=0.1 - #txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' - ax.text(x =x, y= y0-dt, s=txts, fontsize=fontsize, transform=ax.transAxes) - - txts = r'$baseline$' + r'$ = %.3f$'%( baseline) - dt +=0.1 - ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) - - if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration' or function=='flow_para_qang': - txts = r'$flow_v$' + r'$ = %.3f$'%( flow) + ax.text(x=x, y=y0, s=txts, fontsize=fontsize, transform=ax.transAxes) + # print(function) + dt = 0 + if ( + function != "flow_para_function" + and function != "flow_para" + and function != "flow_vibration" + and function != "flow_para_qang" + ): + txts = r"$\alpha$" + r"$ = %.3f$" % (alpha) dt += 0.1 - ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) - if function=='stretched_vibration' or function=='flow_vibration': - txts = r'$vibration$' + r'$ = %.1f Hz$'%( freq) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r"$baseline$" + r"$ = %.3f$" % (baseline) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + if ( + function == "flow_para_function" + or function == "flow_para" + or function == "flow_vibration" + or function == "flow_para_qang" + ): + txts = r"$flow_v$" + r"$ = %.3f$" % (flow) dt += 0.1 - ax.text(x =x, y= y0-dt, s=txts, fontsize=fontsize, transform=ax.transAxes) - - txts = r'$\beta$' + r'$ = %.3f$'%( beta ) - dt +=0.1 - ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + if function == "stretched_vibration" or function == "flow_vibration": + txts = r"$vibration$" + r"$ = %.1f Hz$" % (freq) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + txts = r"$\beta$" + r"$ = %.3f$" % (beta) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) - if 'ylim' in kwargs: - ax.set_ylim( kwargs['ylim']) - elif 'vlim' in kwargs: - vmin, vmax =kwargs['vlim'] + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] try: - ax.set_ylim([ymin*vmin, ymax*vmax ]) + ax.set_ylim([ymin * vmin, ymax * vmax]) except: pass else: pass - if 'xlim' in kwargs: - ax.set_xlim( kwargs['xlim']) - if num_short == 1: + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + if num_short == 1: fp = path + filename else: - fp = path + filename + '_%s_%s'%(mastp, s_ind) + fp = path + filename + "_%s_%s" % (mastp, s_ind) - if append_name != '': + if append_name != "": fp = fp + append_name - fps.append( fp + '.png' ) - #if num_long_i <= 16: + fps.append(fp + ".png") + # if num_long_i <= 16: if num_long_i <= max_plotnum_fig: fig.set_tight_layout(True) - #fig.tight_layout() - #print(fig) + # fig.tight_layout() + # print(fig) try: - plt.savefig( fp + '.png', dpi=fig.dpi) + plt.savefig(fp + ".png", dpi=fig.dpi) except: - print('Can not save figure here.') + print("Can not save figure here.") else: - fps=[] + fps = [] for fn, f in enumerate(fig): f.set_tight_layout(True) - fp = path + filename + '_q_%s_%s'%(fn*16, (fn+1)*16) - if append_name != '': + fp = path + filename + "_q_%s_%s" % (fn * 16, (fn + 1) * 16) + if append_name != "": fp = fp + append_name - fps.append( fp + '.png' ) - f.savefig( fp + '.png', dpi=f.dpi) - #plt.savefig( fp + '.png', dpi=fig.dpi) - #combine each saved images together - - if (num_short !=1) or (num_long_i > 16): - outputfile = path + filename + '.png' - if append_name != '': - outputfile = path + filename + append_name + '__joint.png' + fps.append(fp + ".png") + f.savefig(fp + ".png", dpi=f.dpi) + # plt.savefig( fp + '.png', dpi=fig.dpi) + # combine each saved images together + + if (num_short != 1) or (num_long_i > 16): + outputfile = path + filename + ".png" + if append_name != "": + outputfile = path + filename + append_name + "__joint.png" else: - outputfile = path + filename + '__joint.png' - combine_images( fps, outputfile, outsize= outsize ) + outputfile = path + filename + "__joint.png" + combine_images(fps, outputfile, outsize=outsize) if return_fig: return fig - def power_func(x, D0, power=2): return D0 * x**power -def get_q_rate_fit_general( qval_dict, rate, geometry ='saxs', weights=None, *argv,**kwargs): - ''' +def get_q_rate_fit_general(qval_dict, rate, geometry="saxs", weights=None, *argv, **kwargs): + """ Dec 26,2016, Y.G.@CHX Fit q~rate by a power law function and fit curve pass (0,0) @@ -5538,57 +6025,78 @@ def get_q_rate_fit_general( qval_dict, rate, geometry ='saxs', weights=None, *ar Return: D0 qrate_fit_res - ''' + """ - power_variable=False + power_variable = False - if 'fit_range' in kwargs.keys(): - fit_range = kwargs['fit_range'] + if "fit_range" in kwargs.keys(): + fit_range = kwargs["fit_range"] else: - fit_range= None + fit_range = None - mod = Model( power_func ) - #mod.set_param_hint( 'power', min=0.5, max= 10 ) - #mod.set_param_hint( 'D0', min=0 ) - pars = mod.make_params( power = 2, D0=1*10^(-5) ) + mod = Model(power_func) + # mod.set_param_hint( 'power', min=0.5, max= 10 ) + # mod.set_param_hint( 'D0', min=0 ) + pars = mod.make_params(power=2, D0=1 * 10 ^ (-5)) if power_variable: - pars['power'].vary = True + pars["power"].vary = True else: - pars['power'].vary = False - - (qr_label, qz_label, num_qz, num_qr, num_short, - num_long, short_label, long_label,short_ulabel, - long_ulabel,ind_long, master_plot, - mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + pars["power"].vary = False + + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) Nqr = num_long Nqz = num_short - D0= np.zeros( Nqz ) - power= 2 #np.zeros( Nqz ) - qrate_fit_res=[] - #print(Nqz) - for i in range(Nqz): - ind_long_i = ind_long[ i ] - y = np.array( rate )[ind_long_i] - x = long_label[ind_long_i] - #print(y,x) + D0 = np.zeros(Nqz) + power = 2 # np.zeros( Nqz ) + qrate_fit_res = [] + # print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[i] + y = np.array(rate)[ind_long_i] + x = long_label[ind_long_i] + # print(y,x) if fit_range != None: - y=y[fit_range[0]:fit_range[1]] - x=x[fit_range[0]:fit_range[1]] - #print (i, y,x) - _result = mod.fit(y, pars, x = x ,weights=weights ) - qrate_fit_res.append( _result ) - D0[i] = _result.best_values['D0'] - #power[i] = _result.best_values['power'] - print ('The fitted diffusion coefficient D0 is: %.3e A^2S-1'%D0[i]) + y = y[fit_range[0] : fit_range[1]] + x = x[fit_range[0] : fit_range[1]] + # print (i, y,x) + _result = mod.fit(y, pars, x=x, weights=weights) + qrate_fit_res.append(_result) + D0[i] = _result.best_values["D0"] + # power[i] = _result.best_values['power'] + print("The fitted diffusion coefficient D0 is: %.3e A^2S-1" % D0[i]) return D0, qrate_fit_res -def plot_q_rate_fit_general( qval_dict, rate, qrate_fit_res, geometry ='saxs', ylim = None, - plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False, - show_fit=True, - *argv,**kwargs): - ''' +def plot_q_rate_fit_general( + qval_dict, + rate, + qrate_fit_res, + geometry="saxs", + ylim=None, + plot_all_range=True, + plot_index_range=None, + show_text=True, + return_fig=False, + show_fit=True, + *argv, + **kwargs, +): + """ Dec 26,2016, Y.G.@CHX plot q~rate fitted by a power law function and fit curve pass (0,0) @@ -5606,119 +6114,133 @@ def plot_q_rate_fit_general( qval_dict, rate, qrate_fit_res, geometry ='saxs', Otherwise, power is variable. show_fit:, bool, if False, not show the fit - ''' + """ - if 'uid' in kwargs.keys(): - uid = kwargs['uid'] + if "uid" in kwargs.keys(): + uid = kwargs["uid"] else: - uid = 'uid' - if 'path' in kwargs.keys(): - path = kwargs['path'] + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] else: - path = '' - (qr_label, qz_label, num_qz, num_qr, num_short, - num_long, short_label, long_label,short_ulabel, - long_ulabel,ind_long, master_plot, - mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + path = "" + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) power = 2 - fig,ax = plt.subplots() - plt.title(r'$Q^%s$'%(power) + '-Rate-%s_Fit'%(uid),fontsize=20, y =1.06) + fig, ax = plt.subplots() + plt.title(r"$Q^%s$" % (power) + "-Rate-%s_Fit" % (uid), fontsize=20, y=1.06) Nqz = num_short - if Nqz!=1: - ls = '--' + if Nqz != 1: + ls = "--" else: - ls='' - for i in range(Nqz): - ind_long_i = ind_long[ i ] - y = np.array( rate )[ind_long_i] - x = long_label[ind_long_i] - D0 = qrate_fit_res[i].best_values['D0'] - #print(i, x, y, D0 ) - if Nqz!=1: - label=r'$q_z=%.5f$'%short_ulabel[i] + ls = "" + for i in range(Nqz): + ind_long_i = ind_long[i] + y = np.array(rate)[ind_long_i] + x = long_label[ind_long_i] + D0 = qrate_fit_res[i].best_values["D0"] + # print(i, x, y, D0 ) + if Nqz != 1: + label = r"$q_z=%.5f$" % short_ulabel[i] else: - label='' - ax.plot(x**power, y, marker = 'o', ls =ls, label=label) + label = "" + ax.plot(x**power, y, marker="o", ls=ls, label=label) yfit = qrate_fit_res[i].best_fit if show_fit: if plot_all_range: - ax.plot(x**power, x**power*D0, '-r') + ax.plot(x**power, x**power * D0, "-r") else: - ax.plot( (x**power)[:len(yfit) ], yfit, '-r') + ax.plot((x**power)[: len(yfit)], yfit, "-r") if show_text: - txts = r'$D0: %.3e$'%D0 + r' $A^2$' + r'$s^{-1}$' - dy=0.1 - ax.text(x =0.15, y=.65 -dy *i, s=txts, fontsize=14, transform=ax.transAxes) - if Nqz!=1:legend = ax.legend(loc='best') + txts = r"$D0: %.3e$" % D0 + r" $A^2$" + r"$s^{-1}$" + dy = 0.1 + ax.text(x=0.15, y=0.65 - dy * i, s=txts, fontsize=14, transform=ax.transAxes) + if Nqz != 1: + legend = ax.legend(loc="best") if plot_index_range != None: - d1,d2 = plot_index_range - d2 = min( len(x)-1, d2 ) - ax.set_xlim( (x**power)[d1], (x**power)[d2] ) - ax.set_ylim( y[d1],y[d2]) + d1, d2 = plot_index_range + d2 = min(len(x) - 1, d2) + ax.set_xlim((x**power)[d1], (x**power)[d2]) + ax.set_ylim(y[d1], y[d2]) if ylim != None: - ax.set_ylim( ylim ) + ax.set_ylim(ylim) - ax.set_ylabel('Relaxation rate 'r'$\gamma$'"($s^{-1}$)") - ax.set_xlabel("$q^%s$"r'($\AA^{-2}$)'%power) - fp = path + '%s_Q_Rate'%(uid) + '_fit.png' - fig.savefig( fp, dpi=fig.dpi) + ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$)") + ax.set_xlabel("$q^%s$" r"($\AA^{-2}$)" % power) + fp = path + "%s_Q_Rate" % (uid) + "_fit.png" + fig.savefig(fp, dpi=fig.dpi) fig.tight_layout() if return_fig: - return fig,ax + return fig, ax -def save_g2_fit_para_tocsv( fit_res, filename, path): - '''Y.G. Dec 29, 2016, +def save_g2_fit_para_tocsv(fit_res, filename, path): + """Y.G. Dec 29, 2016, save g2 fitted parameter to csv file - ''' - col = list( fit_res[0].best_values.keys() ) - m,n = len( fit_res ), len( col ) - data = np.zeros( [m,n] ) - for i in range( m ): - data[i] = list( fit_res[i].best_values.values() ) - df = DataFrame( data ) + """ + col = list(fit_res[0].best_values.keys()) + m, n = len(fit_res), len(col) + data = np.zeros([m, n]) + for i in range(m): + data[i] = list(fit_res[i].best_values.values()) + df = DataFrame(data) df.columns = col - filename1 = os.path.join(path, filename) # + '.csv') + filename1 = os.path.join(path, filename) # + '.csv') df.to_csv(filename1) - print( "The g2 fitting parameters are saved in %s"%filename1) + print("The g2 fitting parameters are saved in %s" % filename1) return df - -def R_2(ydata,fit_data): - ''' Calculates R squared for a particular fit - by L.W. +def R_2(ydata, fit_data): + """Calculates R squared for a particular fit - by L.W. usage R_2(ydata,fit_data) returns R2 by L.W. Feb. 2019 - ''' - y_ave=np.average(ydata) - SS_tot=np.sum((np.array(ydata)-y_ave)**2) - #print('SS_tot: %s'%SS_tot) - SS_res=np.sum((np.array(ydata)-np.array(fit_data))**2) - #print('SS_res: %s'%SS_res) - return 1-SS_res/SS_tot - -def is_outlier(points,thresh=3.5,verbose=False): - """MAD test - """ + """ + y_ave = np.average(ydata) + SS_tot = np.sum((np.array(ydata) - y_ave) ** 2) + # print('SS_tot: %s'%SS_tot) + SS_res = np.sum((np.array(ydata) - np.array(fit_data)) ** 2) + # print('SS_res: %s'%SS_res) + return 1 - SS_res / SS_tot + + +def is_outlier(points, thresh=3.5, verbose=False): + """MAD test""" points.tolist() - if len(points) ==1: - points=points[:,None] + if len(points) == 1: + points = points[:, None] if verbose: - print('input to is_outlier is a single point...') - median = np.median(points)*np.ones(np.shape(points))#, axis=0) - - diff = (points-median)**2 - diff=np.sqrt(diff) - med_abs_deviation= np.median(diff) - modified_z_score = .6745*diff/med_abs_deviation + print("input to is_outlier is a single point...") + median = np.median(points) * np.ones(np.shape(points)) # , axis=0) + + diff = (points - median) ** 2 + diff = np.sqrt(diff) + med_abs_deviation = np.median(diff) + modified_z_score = 0.6745 * diff / med_abs_deviation return modified_z_score > thresh -def outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False): + +def outlier_mask( + avg_img, mask, roi_mask, outlier_threshold=7.5, maximum_outlier_fraction=0.1, verbose=False, plot=False +): """ outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) avg_img: average image data (2D) @@ -5736,67 +6258,104 @@ def outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_f by LW 06/21/2023 """ hhmask = np.ones(np.shape(roi_mask)) - pc=1 - - for rn in np.arange(1,np.max(roi_mask)+1,1): - rm=np.zeros(np.shape(roi_mask));rm=rm-1;rm[np.where( roi_mask == rn)]=1 - pixel = roi.roi_pixel_values(avg_img*rm, roi_mask, [rn] ) - out_l = is_outlier((avg_img*mask*rm)[rm>-1], thresh=outlier_threshold) - if np.nanmax(out_l)>0: # Did detect at least one outlier - ave_roi_int = np.nanmean((pixel[0][0])[out_l<1]) - if verbose: print('ROI #%s\naverage ROI intensity: %s'%(rn,ave_roi_int)) + pc = 1 + + for rn in np.arange(1, np.max(roi_mask) + 1, 1): + rm = np.zeros(np.shape(roi_mask)) + rm = rm - 1 + rm[np.where(roi_mask == rn)] = 1 + pixel = roi.roi_pixel_values(avg_img * rm, roi_mask, [rn]) + out_l = is_outlier((avg_img * mask * rm)[rm > -1], thresh=outlier_threshold) + if np.nanmax(out_l) > 0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l < 1]) + if verbose: + print("ROI #%s\naverage ROI intensity: %s" % (rn, ave_roi_int)) try: - upper_outlier_threshold = np.nanmin((out_l*pixel[0][0])[out_l*pixel[0][0]>ave_roi_int]) - if verbose: print('upper outlier threshold: %s'%upper_outlier_threshold) + upper_outlier_threshold = np.nanmin((out_l * pixel[0][0])[out_l * pixel[0][0] > ave_roi_int]) + if verbose: + print("upper outlier threshold: %s" % upper_outlier_threshold) except: upper_outlier_threshold = False - if verbose: print('no upper outlier threshold found') - ind1 = (out_l*pixel[0][0])>0; ind2 = (out_l*pixel[0][0])< ave_roi_int + if verbose: + print("no upper outlier threshold found") + ind1 = (out_l * pixel[0][0]) > 0 + ind2 = (out_l * pixel[0][0]) < ave_roi_int try: - lower_outlier_threshold = np.nanmax((out_l*pixel[0][0])[ind1*ind2]) + lower_outlier_threshold = np.nanmax((out_l * pixel[0][0])[ind1 * ind2]) except: lower_outlier_threshold = False - if verbose: print('no lower outlier threshold found') + if verbose: + print("no lower outlier threshold found") else: - if verbose: print('ROI #%s: no outliers detected'%rn) + if verbose: + print("ROI #%s: no outliers detected" % rn) - ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi - outlier_fraction = np.sum(out_l)/len(pixel[0][0]) - if verbose: print('fraction of pixel values detected as outliers: %s'%np.round(outlier_fraction,2)) + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l) / len(pixel[0][0]) + if verbose: + print("fraction of pixel values detected as outliers: %s" % np.round(outlier_fraction, 2)) if outlier_fraction > maximum_outlier_fraction: - if verbose: print('fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed'%maximum_outlier_fraction) - upper_outlier_threshold = False; lower_outlier_threshold = False + if verbose: + print( + "fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed" + % maximum_outlier_fraction + ) + upper_outlier_threshold = False + lower_outlier_threshold = False if upper_outlier_threshold: - hhmask[avg_img*rm > upper_outlier_threshold] = 0 + hhmask[avg_img * rm > upper_outlier_threshold] = 0 if lower_outlier_threshold: - hhmask[avg_img*rm < lower_outlier_threshold] = 0 + hhmask[avg_img * rm < lower_outlier_threshold] = 0 if plot: - if pc == 1: fig,ax = plt.subplots(1,5,figsize=(24,4)) - plt.subplot(1,5,pc);pc+=1; - if pc>5: pc=1 - pixel = roi.roi_pixel_values(avg_img*rm*mask, roi_mask, [rn] ) - plt.plot( pixel[0][0] ,'bo',markersize=1.5 ) + if pc == 1: + fig, ax = plt.subplots(1, 5, figsize=(24, 4)) + plt.subplot(1, 5, pc) + pc += 1 + if pc > 5: + pc = 1 + pixel = roi.roi_pixel_values(avg_img * rm * mask, roi_mask, [rn]) + plt.plot(pixel[0][0], "bo", markersize=1.5) if upper_outlier_threshold or lower_outlier_threshold: - x=np.arange(len(out_l)) - plt.plot([x[0],x[-1]],[ave_roi_int,ave_roi_int],'g--',label='ROI average: %s'%np.round(ave_roi_int,4)) + x = np.arange(len(out_l)) + plt.plot( + [x[0], x[-1]], + [ave_roi_int, ave_roi_int], + "g--", + label="ROI average: %s" % np.round(ave_roi_int, 4), + ) if upper_outlier_threshold: - ind=(out_l*pixel[0][0])> upper_outlier_threshold - plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') - plt.plot([x[0],x[-1]],[upper_outlier_threshold,upper_outlier_threshold],'r--',label='upper thresh.: %s'%np.round(upper_outlier_threshold,4)) + ind = (out_l * pixel[0][0]) > upper_outlier_threshold + plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") + plt.plot( + [x[0], x[-1]], + [upper_outlier_threshold, upper_outlier_threshold], + "r--", + label="upper thresh.: %s" % np.round(upper_outlier_threshold, 4), + ) if lower_outlier_threshold: - ind=(out_l*pixel[0][0])< lower_outlier_threshold - plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') - plt.plot([x[0],x[-1]],[lower_outlier_threshold,lower_outlier_threshold],'r--',label='lower thresh.: %s'%np.round(upper_outlier_threshold,4)) - plt.ylabel('Intensity') ;plt.xlabel('pixel');plt.title('ROI #: %s'%rn);plt.legend(loc='best',fontsize=8) + ind = (out_l * pixel[0][0]) < lower_outlier_threshold + plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") + plt.plot( + [x[0], x[-1]], + [lower_outlier_threshold, lower_outlier_threshold], + "r--", + label="lower thresh.: %s" % np.round(upper_outlier_threshold, 4), + ) + plt.ylabel("Intensity") + plt.xlabel("pixel") + plt.title("ROI #: %s" % rn) + plt.legend(loc="best", fontsize=8) if plot: - fig,ax = plt.subplots() + fig, ax = plt.subplots() plt.imshow(hhmask) - hot_dark=np.nonzero(hhmask<1) - cmap = plt.cm.get_cmap('viridis') - plt.plot(hot_dark[1],hot_dark[0],'+',color=cmap(0)) - plt.xlabel('pixel');plt.ylabel('pixel');plt.title('masked pixels with outlier threshold: %s'%outlier_threshold) - - return hhmask \ No newline at end of file + hot_dark = np.nonzero(hhmask < 1) + cmap = plt.cm.get_cmap("viridis") + plt.plot(hot_dark[1], hot_dark[0], "+", color=cmap(0)) + plt.xlabel("pixel") + plt.ylabel("pixel") + plt.title("masked pixels with outlier threshold: %s" % outlier_threshold) + + return hhmask diff --git a/pyCHX/chx_libs.py b/pyCHX/chx_libs.py index 9f58d23..4440215 100644 --- a/pyCHX/chx_libs.py +++ b/pyCHX/chx_libs.py @@ -3,6 +3,7 @@ yuzhang@bnl.gov This module is for the necessary packages for the XPCS analysis """ + ## Import all the required packages for Data Analysis from databroker import Broker from databroker.assets.path_only_handlers import RawHandler diff --git a/pyCHX/chx_outlier_detection.py b/pyCHX/chx_outlier_detection.py index e211742..596393e 100644 --- a/pyCHX/chx_outlier_detection.py +++ b/pyCHX/chx_outlier_detection.py @@ -1,20 +1,22 @@ -def is_outlier(points,thresh=3.5,verbose=False): - """MAD test - """ +def is_outlier(points, thresh=3.5, verbose=False): + """MAD test""" points.tolist() - if len(points) ==1: - points=points[:,None] + if len(points) == 1: + points = points[:, None] if verbose: - print('input to is_outlier is a single point...') - median = np.median(points)*np.ones(np.shape(points))#, axis=0) - - diff = (points-median)**2 - diff=np.sqrt(diff) - med_abs_deviation= np.median(diff) - modified_z_score = .6745*diff/med_abs_deviation + print("input to is_outlier is a single point...") + median = np.median(points) * np.ones(np.shape(points)) # , axis=0) + + diff = (points - median) ** 2 + diff = np.sqrt(diff) + med_abs_deviation = np.median(diff) + modified_z_score = 0.6745 * diff / med_abs_deviation return modified_z_score > thresh -def outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False): + +def outlier_mask( + avg_img, mask, roi_mask, outlier_threshold=7.5, maximum_outlier_fraction=0.1, verbose=False, plot=False +): """ outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) avg_img: average image data (2D) @@ -32,67 +34,104 @@ def outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_f by LW 06/21/2023 """ hhmask = np.ones(np.shape(roi_mask)) - pc=1 + pc = 1 - for rn in np.arange(1,np.max(roi_mask)+1,1): - rm=np.zeros(np.shape(roi_mask));rm=rm-1;rm[np.where( roi_mask == rn)]=1 - pixel = roi.roi_pixel_values(avg_img*rm, roi_mask, [rn] ) - out_l = is_outlier((avg_img*mask*rm)[rm>-1], thresh=outlier_threshold) - if np.nanmax(out_l)>0: # Did detect at least one outlier - ave_roi_int = np.nanmean((pixel[0][0])[out_l<1]) - if verbose: print('ROI #%s\naverage ROI intensity: %s'%(rn,ave_roi_int)) + for rn in np.arange(1, np.max(roi_mask) + 1, 1): + rm = np.zeros(np.shape(roi_mask)) + rm = rm - 1 + rm[np.where(roi_mask == rn)] = 1 + pixel = roi.roi_pixel_values(avg_img * rm, roi_mask, [rn]) + out_l = is_outlier((avg_img * mask * rm)[rm > -1], thresh=outlier_threshold) + if np.nanmax(out_l) > 0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l < 1]) + if verbose: + print("ROI #%s\naverage ROI intensity: %s" % (rn, ave_roi_int)) try: - upper_outlier_threshold = np.nanmin((out_l*pixel[0][0])[out_l*pixel[0][0]>ave_roi_int]) - if verbose: print('upper outlier threshold: %s'%upper_outlier_threshold) + upper_outlier_threshold = np.nanmin((out_l * pixel[0][0])[out_l * pixel[0][0] > ave_roi_int]) + if verbose: + print("upper outlier threshold: %s" % upper_outlier_threshold) except: upper_outlier_threshold = False - if verbose: print('no upper outlier threshold found') - ind1 = (out_l*pixel[0][0])>0; ind2 = (out_l*pixel[0][0])< ave_roi_int + if verbose: + print("no upper outlier threshold found") + ind1 = (out_l * pixel[0][0]) > 0 + ind2 = (out_l * pixel[0][0]) < ave_roi_int try: - lower_outlier_threshold = np.nanmax((out_l*pixel[0][0])[ind1*ind2]) + lower_outlier_threshold = np.nanmax((out_l * pixel[0][0])[ind1 * ind2]) except: lower_outlier_threshold = False - if verbose: print('no lower outlier threshold found') + if verbose: + print("no lower outlier threshold found") else: - if verbose: print('ROI #%s: no outliers detected'%rn) + if verbose: + print("ROI #%s: no outliers detected" % rn) - ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi - outlier_fraction = np.sum(out_l)/len(pixel[0][0]) - if verbose: print('fraction of pixel values detected as outliers: %s'%np.round(outlier_fraction,2)) + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l) / len(pixel[0][0]) + if verbose: + print("fraction of pixel values detected as outliers: %s" % np.round(outlier_fraction, 2)) if outlier_fraction > maximum_outlier_fraction: - if verbose: print('fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed'%maximum_outlier_fraction) - upper_outlier_threshold = False; lower_outlier_threshold = False + if verbose: + print( + "fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed" + % maximum_outlier_fraction + ) + upper_outlier_threshold = False + lower_outlier_threshold = False if upper_outlier_threshold: - hhmask[avg_img*rm > upper_outlier_threshold] = 0 + hhmask[avg_img * rm > upper_outlier_threshold] = 0 if lower_outlier_threshold: - hhmask[avg_img*rm < lower_outlier_threshold] = 0 + hhmask[avg_img * rm < lower_outlier_threshold] = 0 if plot: - if pc == 1: fig,ax = plt.subplots(1,5,figsize=(24,4)) - plt.subplot(1,5,pc);pc+=1; - if pc>5: pc=1 - pixel = roi.roi_pixel_values(avg_img*rm*mask, roi_mask, [rn] ) - plt.plot( pixel[0][0] ,'bo',markersize=1.5 ) + if pc == 1: + fig, ax = plt.subplots(1, 5, figsize=(24, 4)) + plt.subplot(1, 5, pc) + pc += 1 + if pc > 5: + pc = 1 + pixel = roi.roi_pixel_values(avg_img * rm * mask, roi_mask, [rn]) + plt.plot(pixel[0][0], "bo", markersize=1.5) if upper_outlier_threshold or lower_outlier_threshold: - x=np.arange(len(out_l)) - plt.plot([x[0],x[-1]],[ave_roi_int,ave_roi_int],'g--',label='ROI average: %s'%np.round(ave_roi_int,4)) + x = np.arange(len(out_l)) + plt.plot( + [x[0], x[-1]], + [ave_roi_int, ave_roi_int], + "g--", + label="ROI average: %s" % np.round(ave_roi_int, 4), + ) if upper_outlier_threshold: - ind=(out_l*pixel[0][0])> upper_outlier_threshold - plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') - plt.plot([x[0],x[-1]],[upper_outlier_threshold,upper_outlier_threshold],'r--',label='upper thresh.: %s'%np.round(upper_outlier_threshold,4)) + ind = (out_l * pixel[0][0]) > upper_outlier_threshold + plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") + plt.plot( + [x[0], x[-1]], + [upper_outlier_threshold, upper_outlier_threshold], + "r--", + label="upper thresh.: %s" % np.round(upper_outlier_threshold, 4), + ) if lower_outlier_threshold: - ind=(out_l*pixel[0][0])< lower_outlier_threshold - plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') - plt.plot([x[0],x[-1]],[lower_outlier_threshold,lower_outlier_threshold],'r--',label='lower thresh.: %s'%np.round(upper_outlier_threshold,4)) - plt.ylabel('Intensity') ;plt.xlabel('pixel');plt.title('ROI #: %s'%rn);plt.legend(loc='best',fontsize=8) + ind = (out_l * pixel[0][0]) < lower_outlier_threshold + plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") + plt.plot( + [x[0], x[-1]], + [lower_outlier_threshold, lower_outlier_threshold], + "r--", + label="lower thresh.: %s" % np.round(upper_outlier_threshold, 4), + ) + plt.ylabel("Intensity") + plt.xlabel("pixel") + plt.title("ROI #: %s" % rn) + plt.legend(loc="best", fontsize=8) if plot: - fig,ax = plt.subplots() + fig, ax = plt.subplots() plt.imshow(hhmask) - hot_dark=np.nonzero(hhmask<1) - cmap = plt.cm.get_cmap('viridis') - plt.plot(hot_dark[1],hot_dark[0],'+',color=cmap(0)) - plt.xlabel('pixel');plt.ylabel('pixel');plt.title('masked pixels with outlier threshold: %s'%outlier_threshold) + hot_dark = np.nonzero(hhmask < 1) + cmap = plt.cm.get_cmap("viridis") + plt.plot(hot_dark[1], hot_dark[0], "+", color=cmap(0)) + plt.xlabel("pixel") + plt.ylabel("pixel") + plt.title("masked pixels with outlier threshold: %s" % outlier_threshold) return hhmask diff --git a/pyCHX/chx_speckle.py b/pyCHX/chx_speckle.py index c0d78d9..a6eb8f3 100644 --- a/pyCHX/chx_speckle.py +++ b/pyCHX/chx_speckle.py @@ -681,9 +681,7 @@ def fit_xsvs1( # print ( rois ) if func == "bn": - result = mod.fit( - spe_cts_all[j, i][rois], bin_values=bin_edges[j, i][:-1][rois], K=5 * 2**j, M=12 - ) + result = mod.fit(spe_cts_all[j, i][rois], bin_values=bin_edges[j, i][:-1][rois], K=5 * 2**j, M=12) elif func == "gm": result = mod.fit( spe_cts_all[j, i][rois], bin_values=bin_edges[j, i][:-1][rois], K=K_mean[i] * 2**j, M=20 diff --git a/pyCHX/chx_specklecp.py b/pyCHX/chx_specklecp.py index 187cef4..d03ea3b 100644 --- a/pyCHX/chx_specklecp.py +++ b/pyCHX/chx_specklecp.py @@ -1778,9 +1778,7 @@ def fit_xsvs1( # print ( rois ) if func == "bn": - result = mod.fit( - spe_cts_all[j, i][rois], bin_values=bin_edges[j, i][:-1][rois], K=5 * 2**j, M=12 - ) + result = mod.fit(spe_cts_all[j, i][rois], bin_values=bin_edges[j, i][:-1][rois], K=5 * 2**j, M=12) elif func == "gm": result = mod.fit( spe_cts_all[j, i][rois], bin_values=bin_edges[j, i][:-1][rois], K=K_mean[i] * 2**j, M=20 diff --git a/pyCHX/chx_xpcs_xsvs_jupyter_V1.py b/pyCHX/chx_xpcs_xsvs_jupyter_V1.py index 9755142..2c9b9e3 100644 --- a/pyCHX/chx_xpcs_xsvs_jupyter_V1.py +++ b/pyCHX/chx_xpcs_xsvs_jupyter_V1.py @@ -1,16 +1,21 @@ -from pyCHX.chx_packages import * -from pyCHX.chx_libs import markers, colors -#from pyCHX.chx_generic_functions import get_short_long_labels_from_qval_dict -#RUN_GUI = False -#from pyCHX.chx_libs import markers -from IPython import get_ipython import pandas as pds +# from pyCHX.chx_generic_functions import get_short_long_labels_from_qval_dict +# RUN_GUI = False +# from pyCHX.chx_libs import markers +from IPython import get_ipython + +from pyCHX.chx_libs import colors, markers +from pyCHX.chx_packages import * + ip = get_ipython() -ip.run_line_magic("run", "/nsls2/data/chx/shared/CHX_Software/packages/environment_management/chx_analysis_setup.ipynb") +ip.run_line_magic( + "run", "/nsls2/data/chx/shared/CHX_Software/packages/environment_management/chx_analysis_setup.ipynb" +) + -def get_t_iqc_uids( uid_list, setup_pargs, slice_num= 10, slice_width= 1): - '''Get Iq at different time edge (difined by slice_num and slice_width) for a list of uids +def get_t_iqc_uids(uid_list, setup_pargs, slice_num=10, slice_width=1): + """Get Iq at different time edge (difined by slice_num and slice_width) for a list of uids Input: uid_list: list of string (uid) setup_pargs: dict, for caculation of Iq, the key of this dict should include @@ -22,80 +27,98 @@ def get_t_iqc_uids( uid_list, setup_pargs, slice_num= 10, slice_width= 1): Output: qs: dict, with uid as key, with value as q values iqsts:dict, with uid as key, with value as iq values - tstamp:dict, with uid as key, with value as time values - - ''' + tstamp:dict, with uid as key, with value as time values + + """ iqsts = {} tstamp = {} qs = {} label = [] for uid in uid_list: - md = get_meta_data( uid ) - luid = md['uid'] - timeperframe = md['cam_acquire_period'] - N = md['cam_num_images'] - filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%luid + md = get_meta_data(uid) + luid = md["uid"] + timeperframe = md["cam_acquire_period"] + N = md["cam_num_images"] + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % luid good_start = 5 - FD = Multifile(filename, good_start, N ) - Nimg = FD.end - FD.beg - time_edge = create_time_slice( Nimg, slice_num= slice_num, slice_width= slice_width, edges = None ) - time_edge = np.array( time_edge ) + good_start - #print( time_edge ) - tstamp[uid] = time_edge[:,0] * timeperframe - qpt, iqsts[uid], qt = get_t_iqc( FD, time_edge, None, pargs=setup_pargs, nx=1500 ) - qs[uid] = qt - - return qs, iqsts, tstamp - + FD = Multifile(filename, good_start, N) + Nimg = FD.end - FD.beg + time_edge = create_time_slice(Nimg, slice_num=slice_num, slice_width=slice_width, edges=None) + time_edge = np.array(time_edge) + good_start + # print( time_edge ) + tstamp[uid] = time_edge[:, 0] * timeperframe + qpt, iqsts[uid], qt = get_t_iqc(FD, time_edge, None, pargs=setup_pargs, nx=1500) + qs[uid] = qt + return qs, iqsts, tstamp -def plot_t_iqtMq2(qt, iqst, tstamp, ax=None, perf='' ): - '''plot q2~Iq at differnt time''' +def plot_t_iqtMq2(qt, iqst, tstamp, ax=None, perf=""): + """plot q2~Iq at differnt time""" if ax is None: fig, ax = plt.subplots() q = qt for i in range(iqst.shape[0]): yi = iqst[i] * q**2 - time_labeli = perf+'time_%s s'%( round( tstamp[i], 3) ) - plot1D( x = q, y = yi, legend= time_labeli, xlabel='Q (A-1)', ylabel='I(q)*Q^2', title='I(q)*Q^2 ~ time', - m=markers[i], c = colors[i], ax=ax, ylim=[ -0.001, 0.005]) #, xlim=[0.007,0.1] ) - - -def plot_t_iqc_uids( qs, iqsts, tstamps ): - '''plot q2~Iq at differnt time for a uid list - ''' + time_labeli = perf + "time_%s s" % (round(tstamp[i], 3)) + plot1D( + x=q, + y=yi, + legend=time_labeli, + xlabel="Q (A-1)", + ylabel="I(q)*Q^2", + title="I(q)*Q^2 ~ time", + m=markers[i], + c=colors[i], + ax=ax, + ylim=[-0.001, 0.005], + ) # , xlim=[0.007,0.1] ) + + +def plot_t_iqc_uids(qs, iqsts, tstamps): + """plot q2~Iq at differnt time for a uid list""" keys = list(qs.keys()) fig, ax = plt.subplots() for uid in keys: qt = qs[uid] iqst = iqsts[uid] - tstamp = tstamps[uid] - plot_t_iqtMq2(qt, iqst, tstamp, ax=ax, perf=uid + '_' ) - - -def plot_entries_from_csvlist( csv_list, uid_list, inDir, key = 'g2', qth = 1, legend_size=8, - yshift= 0.01, ymulti=1, xlim=None, ylim=None,uid_length=None, - legend=None, fp_fulluid=True ): - - ''' + tstamp = tstamps[uid] + plot_t_iqtMq2(qt, iqst, tstamp, ax=ax, perf=uid + "_") + + +def plot_entries_from_csvlist( + csv_list, + uid_list, + inDir, + key="g2", + qth=1, + legend_size=8, + yshift=0.01, + ymulti=1, + xlim=None, + ylim=None, + uid_length=None, + legend=None, + fp_fulluid=True, +): + """ YG Feb2, 2018, make yshift be also a list - + YG June 9, 2017@CHX YG Sep 29, 2017@CHX. plot enteries for a list csvs Input: csv_list: list, a list of uid (string) inDir: string, imported folder for saved analysis results - key: string, plot entry, surport - 'g2' for one-time, + key: string, plot entry, surport + 'g2' for one-time, 'iq' for q~iq - 'mean_int_sets' for mean intensity of each roi as a function of frame + 'mean_int_sets' for mean intensity of each roi as a function of frame TODOLIST:#also can plot the following - dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', - 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', - 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', - 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) + dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', + 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', + 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', + 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) qth: integer, the intesrest q number yshift: float, values of shift in y direction xlim: [x1,x2], for plot x limit @@ -105,90 +128,143 @@ def plot_entries_from_csvlist( csv_list, uid_list, inDir, key = 'g2', qth = 1, Example: uid_list = ['5492b9', '54c5e0'] plot_entries_from_uids( uid_list, inDir, yshift = 0.01, key= 'g2', ylim=[1, 1.2]) - ''' - + """ + uid_dict = {} - fig, ax =plt.subplots() + fig, ax = plt.subplots() for uid in uid_list: if uid_length is not None: uid_ = uid[:uid_length] else: - uid_=uid - #print(uid_) - uid_dict[uid_] = get_meta_data( uid )['uid'] - #for i, u in enumerate( list( uid_dict.keys() )): - - for i,fp in enumerate( list(csv_list)): - u = uid_list[i] #print(u) - inDiru = inDir + u + '/' + uid_ = uid + # print(uid_) + uid_dict[uid_] = get_meta_data(uid)["uid"] + # for i, u in enumerate( list( uid_dict.keys() )): + + for i, fp in enumerate(list(csv_list)): + u = uid_list[i] # print(u) + inDiru = inDir + u + "/" if fp_fulluid: - inDiru = inDir + uid_dict[u] + '/' + inDiru = inDir + uid_dict[u] + "/" else: - inDiru = inDir + u + '/' - d = pds.read_csv( inDiru + fp ) - #print(d) - - if key == 'g2': - taus = d['tau'][1:] - col = d.columns[qth +1] - #print( qth+1, col ) - y= d[col][1:] + inDiru = inDir + u + "/" + d = pds.read_csv(inDiru + fp) + # print(d) + + if key == "g2": + taus = d["tau"][1:] + col = d.columns[qth + 1] + # print( qth+1, col ) + y = d[col][1:] if legend is None: - leg=u + leg = u else: - leg='uid=%s-->'%u+legend[i] - if isinstance(yshift,list): + leg = "uid=%s-->" % u + legend[i] + if isinstance(yshift, list): yshift_ = yshift[i] ii = i + 1 else: yshift_ = yshift ii = i - plot1D( x = taus, y=y + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=True, legend= leg, - xlabel='t (sec)', ylabel='g2', legend_size=legend_size,) - title='Q = %s'%(col) + plot1D( + x=taus, + y=y + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=True, + legend=leg, + xlabel="t (sec)", + ylabel="g2", + legend_size=legend_size, + ) + title = "Q = %s" % (col) ax.set_title(title) - elif key=='imgsum': - y = total_res[key] - plot1D( y=d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, - xlabel='Frame', ylabel='imgsum',) - - elif key == 'iq': - x= total_res['q_saxs'] - y= total_res['iq_saxs'] - plot1D( x=x, y= y* ymulti[i] + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx= False, logy=True, - legend= u, xlabel ='Q 'r'($\AA^{-1}$)', ylabel = "I(q)" ) + elif key == "imgsum": + y = total_res[key] + plot1D( + y=d + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=False, + legend=u, + xlabel="Frame", + ylabel="imgsum", + ) + + elif key == "iq": + x = total_res["q_saxs"] + y = total_res["iq_saxs"] + plot1D( + x=x, + y=y * ymulti[i] + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=False, + logy=True, + legend=u, + xlabel="Q " r"($\AA^{-1}$)", + ylabel="I(q)", + ) else: - d = total_res[key][:,qth] - plot1D( x = np.arange(len(d)), y= d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, - xlabel= 'xx', ylabel=key ) - if key=='mean_int_sets':ax.set_xlabel( 'frame ') - if xlim is not None:ax.set_xlim(xlim) - if ylim is not None:ax.set_ylim(ylim) - return fig,ax - - -def plot_entries_from_uids( uid_list, inDir, key= 'g2', qth = 1, legend_size=8, - yshift= 0.01, ymulti=1, xlim=None, ylim=None,legend=None, uid_length = None, filename_list=None, fp_fulluid=False, fp_append = None ):#,title='' ): - - ''' + d = total_res[key][:, qth] + plot1D( + x=np.arange(len(d)), + y=d + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=False, + legend=u, + xlabel="xx", + ylabel=key, + ) + if key == "mean_int_sets": + ax.set_xlabel("frame ") + if xlim is not None: + ax.set_xlim(xlim) + if ylim is not None: + ax.set_ylim(ylim) + return fig, ax + + +def plot_entries_from_uids( + uid_list, + inDir, + key="g2", + qth=1, + legend_size=8, + yshift=0.01, + ymulti=1, + xlim=None, + ylim=None, + legend=None, + uid_length=None, + filename_list=None, + fp_fulluid=False, + fp_append=None, +): # ,title='' ): + """ YG Feb2, 2018, make yshift be also a list - + YG June 9, 2017@CHX YG Sep 29, 2017@CHX. plot enteries for a list uids Input: uid_list: list, a list of uid (string) inDir: string, imported folder for saved analysis results - key: string, plot entry, surport - 'g2' for one-time, + key: string, plot entry, surport + 'g2' for one-time, 'iq' for q~iq - 'mean_int_sets' for mean intensity of each roi as a function of frame + 'mean_int_sets' for mean intensity of each roi as a function of frame TODOLIST:#also can plot the following - dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', - 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', - 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', - 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) + dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', + 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', + 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', + 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) qth: integer, the intesrest q number yshift: float, values of shift in y direction xlim: [x1,x2], for plot x limit @@ -198,78 +274,112 @@ def plot_entries_from_uids( uid_list, inDir, key= 'g2', qth = 1, legend_size=8 Example: uid_list = ['5492b9', '54c5e0'] plot_entries_from_uids( uid_list, inDir, yshift = 0.01, key= 'g2', ylim=[1, 1.2]) - ''' - + """ + uid_dict = {} - fig, ax =plt.subplots() + fig, ax = plt.subplots() for uid in uid_list: if uid_length is not None: uid_ = uid[:uid_length] else: - uid_=uid - #print(uid_) - uid_dict[uid_] = get_meta_data( uid )['uid'] - #for i, u in enumerate( list( uid_dict.keys() )): - for i,u in enumerate( list(uid_list)): - #print(u) - if isinstance(yshift,list): + uid_ = uid + # print(uid_) + uid_dict[uid_] = get_meta_data(uid)["uid"] + # for i, u in enumerate( list( uid_dict.keys() )): + for i, u in enumerate(list(uid_list)): + # print(u) + if isinstance(yshift, list): yshift_ = yshift[i] ii = i + 1 else: yshift_ = yshift - ii = i + ii = i if uid_length is not None: - u = u[:uid_length] - inDiru = inDir + u + '/' + u = u[:uid_length] + inDiru = inDir + u + "/" if fp_fulluid: - inDiru = inDir + uid_dict[u] + '/' + inDiru = inDir + uid_dict[u] + "/" else: - inDiru = inDir + u + '/' + inDiru = inDir + u + "/" if filename_list is None: - if fp_append is not None: - filename = 'uid=%s%s_Res.h5'%(uid_dict[u],fp_append ) - else: - filename = 'uid=%s_Res.h5'%uid_dict[u] + if fp_append is not None: + filename = "uid=%s%s_Res.h5" % (uid_dict[u], fp_append) + else: + filename = "uid=%s_Res.h5" % uid_dict[u] else: - filename = filename_list[i] - total_res = extract_xpcs_results_from_h5( filename = filename, - import_dir = inDiru, exclude_keys = ['g12b'] ) - if key=='g2': - d = total_res[key][1:,qth] - taus = total_res['taus'][1:] + filename = filename_list[i] + total_res = extract_xpcs_results_from_h5(filename=filename, import_dir=inDiru, exclude_keys=["g12b"]) + if key == "g2": + d = total_res[key][1:, qth] + taus = total_res["taus"][1:] if legend is None: - leg=u + leg = u else: - leg='uid=%s-->'%u+legend[i] - plot1D( x = taus, y=d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=True, legend= leg, - xlabel='t (sec)', ylabel='g2', legend_size=legend_size,) - title='Q = %s'%(total_res['qval_dict'][qth]) + leg = "uid=%s-->" % u + legend[i] + plot1D( + x=taus, + y=d + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=True, + legend=leg, + xlabel="t (sec)", + ylabel="g2", + legend_size=legend_size, + ) + title = "Q = %s" % (total_res["qval_dict"][qth]) ax.set_title(title) - elif key=='imgsum': - d = total_res[key] - plot1D( y=d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, - xlabel='Frame', ylabel='imgsum',) - - elif key == 'iq': - - x= total_res['q_saxs'] - y= total_res['iq_saxs'] - plot1D( x=x, y= y* ymulti[i] + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx= False, logy=True, - legend= u, xlabel ='Q 'r'($\AA^{-1}$)', ylabel = "I(q)" ) + elif key == "imgsum": + d = total_res[key] + plot1D( + y=d + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=False, + legend=u, + xlabel="Frame", + ylabel="imgsum", + ) + + elif key == "iq": + + x = total_res["q_saxs"] + y = total_res["iq_saxs"] + plot1D( + x=x, + y=y * ymulti[i] + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=False, + logy=True, + legend=u, + xlabel="Q " r"($\AA^{-1}$)", + ylabel="I(q)", + ) else: - d = total_res[key][:,qth] - plot1D( x = np.arange(len(d)), y= d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, - xlabel= 'xx', ylabel=key ) - if key=='mean_int_sets':ax.set_xlabel( 'frame ') - if xlim is not None:ax.set_xlim(xlim) - if ylim is not None:ax.set_ylim(ylim) - return fig,ax - - - - - + d = total_res[key][:, qth] + plot1D( + x=np.arange(len(d)), + y=d + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=False, + legend=u, + xlabel="xx", + ylabel=key, + ) + if key == "mean_int_sets": + ax.set_xlabel("frame ") + if xlim is not None: + ax.set_xlim(xlim) + if ylim is not None: + ax.set_ylim(ylim) + return fig, ax #################################################################################################### @@ -277,11 +387,8 @@ def plot_entries_from_uids( uid_list, inDir, key= 'g2', qth = 1, legend_size=8 ################################################################################################# - - - -def get_iq_from_uids( uids, mask, setup_pargs ): - ''' Y.G. developed July 17, 2017 @CHX +def get_iq_from_uids(uids, mask, setup_pargs): + """Y.G. developed July 17, 2017 @CHX Get q-Iq of a uids dict, each uid could corrrespond one frame or a time seriers uids: dict, val: meaningful decription, key: a list of uids mask: bool-type 2D array @@ -293,367 +400,415 @@ def get_iq_from_uids( uids, mask, setup_pargs ): 'exposuretime': 0.99998999, 'lambda_': 1.2845441, 'path': '/XF11ID/analysis/2017_2/yuzhang/Results/Yang_Pressure/', - - ''' - Nuid = len( np.concatenate( np.array( list(uids.values()) ) ) ) - label = np.zeros( [ Nuid+1], dtype=object) - img_data = {} #np.zeros( [ Nuid, avg_img.shape[0], avg_img.shape[1]]) - - n = 0 + + """ + Nuid = len(np.concatenate(np.array(list(uids.values())))) + label = np.zeros([Nuid + 1], dtype=object) + img_data = {} # np.zeros( [ Nuid, avg_img.shape[0], avg_img.shape[1]]) + + n = 0 for k in list(uids.keys()): for uid in uids[k]: - - uidstr = 'uid=%s'%uid + + uidstr = "uid=%s" % uid sud = get_sid_filenames(db[uid]) - #print(sud) - md = get_meta_data( uid ) - imgs = load_data( uid, md['detector'], reverse= True ) - md.update( imgs.md ); - Nimg = len(imgs); - if Nimg !=1: - filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%sud[1] - mask0, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename, - force_compress= False, para_compress= True, bad_pixel_threshold = 1e14, - bins=1, num_sub= 100, num_max_para_process= 500, with_pickle=True ) + # print(sud) + md = get_meta_data(uid) + imgs = load_data(uid, md["detector"], reverse=True) + md.update(imgs.md) + Nimg = len(imgs) + if Nimg != 1: + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % sud[1] + mask0, avg_img, imgsum, bad_frame_list = compress_eigerdata( + imgs, + mask, + md, + filename, + force_compress=False, + para_compress=True, + bad_pixel_threshold=1e14, + bins=1, + num_sub=100, + num_max_para_process=500, + with_pickle=True, + ) else: avg_img = imgs[0] - show_img( avg_img, vmin=0.00001, vmax= 1e1, logs=True, aspect=1, #save_format='tif', - image_name= uidstr + '_img_avg', save=True, - path=setup_pargs['path'], cmap = cmap_albula ) - - setup_pargs['uid'] = uidstr - - qp_saxs, iq_saxs, q_saxs = get_circular_average( avg_img, mask, - pargs= setup_pargs, save=True ) - if n ==0: - iqs = np.zeros( [ len(q_saxs), Nuid+1]) - iqs[:,0] = q_saxs - label[0] = 'q' - img_data[ k + '_'+ uid ] = avg_img - iqs[:,n+1] = iq_saxs - label[n+1] = k + '_'+ uid - n +=1 - plot_circular_average( qp_saxs, iq_saxs, q_saxs, pargs= setup_pargs, - xlim=[q_saxs.min(), q_saxs.max()*0.9], ylim = [iq_saxs.min(), iq_saxs.max()] ) - if 'filename' in list(setup_pargs.keys()): - filename = setup_pargs['filename'] + show_img( + avg_img, + vmin=0.00001, + vmax=1e1, + logs=True, + aspect=1, # save_format='tif', + image_name=uidstr + "_img_avg", + save=True, + path=setup_pargs["path"], + cmap=cmap_albula, + ) + + setup_pargs["uid"] = uidstr + + qp_saxs, iq_saxs, q_saxs = get_circular_average(avg_img, mask, pargs=setup_pargs, save=True) + if n == 0: + iqs = np.zeros([len(q_saxs), Nuid + 1]) + iqs[:, 0] = q_saxs + label[0] = "q" + img_data[k + "_" + uid] = avg_img + iqs[:, n + 1] = iq_saxs + label[n + 1] = k + "_" + uid + n += 1 + plot_circular_average( + qp_saxs, + iq_saxs, + q_saxs, + pargs=setup_pargs, + xlim=[q_saxs.min(), q_saxs.max() * 0.9], + ylim=[iq_saxs.min(), iq_saxs.max()], + ) + if "filename" in list(setup_pargs.keys()): + filename = setup_pargs["filename"] else: - filename = 'qIq.csv' - pd = save_arrays( iqs, label=label, dtype='array', filename= filename, - path= setup_pargs['path'], return_res=True) + filename = "qIq.csv" + pd = save_arrays(iqs, label=label, dtype="array", filename=filename, path=setup_pargs["path"], return_res=True) return pd, img_data - - - -def wait_func( wait_time = 2 ): - print( 'Waiting %s secdons for upcoming data...'%wait_time) - time.sleep( wait_time) - #print( 'Starting to do something here...') - -def wait_data_acquistion_finish( uid, wait_time = 2, max_try_num = 3 ): - '''check the completion of a data uid acquistion - Parameter: - uid: - wait_time: the waiting step in unit of second - check_func: the function to check the completion - max_try_num: the maximum number for waiting - Return: - True: completion - False: not completion (include waiting time exceeds the max_wait_time) - - ''' + + +def wait_func(wait_time=2): + print("Waiting %s secdons for upcoming data..." % wait_time) + time.sleep(wait_time) + # print( 'Starting to do something here...') + + +def wait_data_acquistion_finish(uid, wait_time=2, max_try_num=3): + """check the completion of a data uid acquistion + Parameter: + uid: + wait_time: the waiting step in unit of second + check_func: the function to check the completion + max_try_num: the maximum number for waiting + Return: + True: completion + False: not completion (include waiting time exceeds the max_wait_time) + + """ FINISH = False Fake_FINISH = True - w = 0 - sleep_time = 0 - while( not FINISH): + w = 0 + sleep_time = 0 + while not FINISH: try: - get_meta_data( uid ) + get_meta_data(uid) FINISH = True - print( 'The data acquistion finished.') - print( 'Starting to do something here...') - except: - wait_func( wait_time = wait_time ) + print("The data acquistion finished.") + print("Starting to do something here...") + except: + wait_func(wait_time=wait_time) w += 1 - print('Try number: %s'%w) - if w> max_try_num: - print( 'There could be something going wrong with data acquistion.') - print( 'Force to terminate after %s tries.'%w) + print("Try number: %s" % w) + if w > max_try_num: + print("There could be something going wrong with data acquistion.") + print("Force to terminate after %s tries." % w) FINISH = True Fake_FINISH = False - sleep_time += wait_time - return FINISH * Fake_FINISH #, sleep_time - -def get_uids_by_range( start_uidth=-1, end_uidth = 0 ): - '''Y.G. Dec 22, 2016 - A wrap funciton to find uids by giving start and end uid number, i.e. -10, -1 - Return: - uids: list, uid with 8 character length - fuids: list, uid with full length - - ''' - hdrs = list([ db[n] for n in range(start_uidth, end_uidth)] ) - if len(hdrs)!=0: - print ('Totally %s uids are found.'%(len(hdrs))) - - uids=[] #short uid - fuids=[] #full uid - for hdr in hdrs: - fuid = hdr['start']['uid'] - uids.append( fuid[:8] ) - fuids.append( fuid ) - uids=uids[::-1] - fuids=fuids[::-1] - return np.array(uids), np.array(fuids) - - -def get_uids_in_time_period( start_time, stop_time ): - '''Y.G. Dec 22, 2016 - A wrap funciton to find uids by giving start and end time - Return: - uids: list, uid with 8 character length - fuids: list, uid with full length - - ''' - hdrs = list( db(start_time= start_time, stop_time = stop_time) ) - if len(hdrs)!=0: - print ('Totally %s uids are found.'%(len(hdrs))) - - uids=[] #short uid - fuids=[] #full uid - for hdr in hdrs: - fuid = hdr['start']['uid'] - uids.append( fuid[:8] ) - fuids.append( fuid ) - uids=uids[::-1] - fuids=fuids[::-1] - return np.array(uids), np.array(fuids) - -def do_compress_on_line( start_time, stop_time, mask_dict=None, mask=None, - wait_time = 2, max_try_num = 3 ): - '''Y.G. Mar 10, 2017 - Do on-line compress by giving start time and stop time - Parameters: - mask_dict: a dict, e.g., {mask1: mask_array1, mask2:mask_array2} - wait_time: search interval time - max_try_num: for each found uid, will try max_try_num*wait_time seconds - Return: - running time - ''' - - t0 = time.time() - uids, fuids = get_uids_in_time_period(start_time, stop_time) - print( fuids ) + sleep_time += wait_time + return FINISH * Fake_FINISH # , sleep_time + + +def get_uids_by_range(start_uidth=-1, end_uidth=0): + """Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end uid number, i.e. -10, -1 + Return: + uids: list, uid with 8 character length + fuids: list, uid with full length + + """ + hdrs = list([db[n] for n in range(start_uidth, end_uidth)]) + if len(hdrs) != 0: + print("Totally %s uids are found." % (len(hdrs))) + + uids = [] # short uid + fuids = [] # full uid + for hdr in hdrs: + fuid = hdr["start"]["uid"] + uids.append(fuid[:8]) + fuids.append(fuid) + uids = uids[::-1] + fuids = fuids[::-1] + return np.array(uids), np.array(fuids) + + +def get_uids_in_time_period(start_time, stop_time): + """Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end time + Return: + uids: list, uid with 8 character length + fuids: list, uid with full length + + """ + hdrs = list(db(start_time=start_time, stop_time=stop_time)) + if len(hdrs) != 0: + print("Totally %s uids are found." % (len(hdrs))) + + uids = [] # short uid + fuids = [] # full uid + for hdr in hdrs: + fuid = hdr["start"]["uid"] + uids.append(fuid[:8]) + fuids.append(fuid) + uids = uids[::-1] + fuids = fuids[::-1] + return np.array(uids), np.array(fuids) + + +def do_compress_on_line(start_time, stop_time, mask_dict=None, mask=None, wait_time=2, max_try_num=3): + """Y.G. Mar 10, 2017 + Do on-line compress by giving start time and stop time + Parameters: + mask_dict: a dict, e.g., {mask1: mask_array1, mask2:mask_array2} + wait_time: search interval time + max_try_num: for each found uid, will try max_try_num*wait_time seconds + Return: + running time + """ + + t0 = time.time() + uids, fuids = get_uids_in_time_period(start_time, stop_time) + print(fuids) if len(fuids): for uid in fuids: - print('*'*50) - print('Do compress for %s now...'%uid) - if db[uid]['start']['plan_name'] == 'count': - finish = wait_data_acquistion_finish( uid, wait_time,max_try_num ) - if finish: + print("*" * 50) + print("Do compress for %s now..." % uid) + if db[uid]["start"]["plan_name"] == "count": + finish = wait_data_acquistion_finish(uid, wait_time, max_try_num) + if finish: try: - md = get_meta_data( uid ) - compress_multi_uids( [ uid ], mask=mask, mask_dict = mask_dict, - force_compress=False, para_compress= True, bin_frame_number=1 ) - - update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) + md = get_meta_data(uid) + compress_multi_uids( + [uid], + mask=mask, + mask_dict=mask_dict, + force_compress=False, + para_compress=True, + bin_frame_number=1, + ) + + update_olog_uid(uid=md["uid"], text="Data are on-line sparsified!", attachments=None) except: - print('There are something wrong with this data: %s...'%uid) - print('*'*50) + print("There are something wrong with this data: %s..." % uid) + print("*" * 50) return time.time() - t0 +def realtime_xpcs_analysis( + start_time, stop_time, run_pargs, md_update=None, wait_time=2, max_try_num=3, emulation=False, clear_plot=False +): + """Y.G. Mar 10, 2017 + Do on-line xpcs by giving start time and stop time + Parameters: + run_pargs: all the run control parameters, including giving roi_mask + md_update: if not None, a dict, will update all the found uid metadata by this md_update + e.g, + md['beam_center_x'] = 1012 + md['beam_center_y']= 1020 + md['det_distance']= 16718.0 + wait_time: search interval time + max_try_num: for each found uid, will try max_try_num*wait_time seconds + emulation: if True, it will only check dataset and not do real analysis + Return: + running time + """ -def realtime_xpcs_analysis( start_time, stop_time, run_pargs, md_update=None, - wait_time = 2, max_try_num = 3, emulation=False,clear_plot=False ): - '''Y.G. Mar 10, 2017 - Do on-line xpcs by giving start time and stop time - Parameters: - run_pargs: all the run control parameters, including giving roi_mask - md_update: if not None, a dict, will update all the found uid metadata by this md_update - e.g, - md['beam_center_x'] = 1012 - md['beam_center_y']= 1020 - md['det_distance']= 16718.0 - wait_time: search interval time - max_try_num: for each found uid, will try max_try_num*wait_time seconds - emulation: if True, it will only check dataset and not do real analysis - Return: - running time - ''' - - t0 = time.time() - uids, fuids = get_uids_in_time_period(start_time, stop_time) - #print( fuids ) + t0 = time.time() + uids, fuids = get_uids_in_time_period(start_time, stop_time) + # print( fuids ) if len(fuids): for uid in fuids: - print('*'*50) - #print('Do compress for %s now...'%uid) - print('Starting analysis for %s now...'%uid) - if db[uid]['start']['plan_name'] == 'count' or db[uid]['start']['plan_name'] == 'manual_count': - #if db[uid]['start']['dtype'] =='xpcs': - finish = wait_data_acquistion_finish( uid, wait_time,max_try_num ) - if finish: + print("*" * 50) + # print('Do compress for %s now...'%uid) + print("Starting analysis for %s now..." % uid) + if db[uid]["start"]["plan_name"] == "count" or db[uid]["start"]["plan_name"] == "manual_count": + # if db[uid]['start']['dtype'] =='xpcs': + finish = wait_data_acquistion_finish(uid, wait_time, max_try_num) + if finish: try: - md = get_meta_data( uid ) + md = get_meta_data(uid) ##corect some metadata if md_update is not None: - md.update( md_update ) - #if 'username' in list(md.keys()): - #try: + md.update(md_update) + # if 'username' in list(md.keys()): + # try: # md_cor['username'] = md_update['username'] - #except: + # except: # md_cor = None - #uid = uid[:8] - #print(md_cor) + # uid = uid[:8] + # print(md_cor) if not emulation: - #suid=uid[:6] - run_xpcs_xsvs_single( uid, run_pargs= run_pargs, md_cor = None, - return_res= False, clear_plot=clear_plot ) - #update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) + # suid=uid[:6] + run_xpcs_xsvs_single( + uid, run_pargs=run_pargs, md_cor=None, return_res=False, clear_plot=clear_plot + ) + # update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) except: - print('There are something wrong with this data: %s...'%uid) + print("There are something wrong with this data: %s..." % uid) else: - print('\nThis is not a XPCS series. We will simiply ignore it.') - print('*'*50) - - #print( 'Sleep 10 sec here!!!') - #time.sleep(10) - - return time.time() - t0 - - - - - - - - + print("\nThis is not a XPCS series. We will simiply ignore it.") + print("*" * 50) + # print( 'Sleep 10 sec here!!!') + # time.sleep(10) + return time.time() - t0 #################################################################################################### ##compress multi uids, sequential compress for uids, but for each uid, can apply parallel compress## ################################################################################################# -def compress_multi_uids( uids, mask, mask_dict = None, force_compress=False, para_compress= True, bin_frame_number=1, - reverse=True, rot90=False,use_local_disk=True): - ''' Compress time series data for a set of uids +def compress_multi_uids( + uids, + mask, + mask_dict=None, + force_compress=False, + para_compress=True, + bin_frame_number=1, + reverse=True, + rot90=False, + use_local_disk=True, +): + """Compress time series data for a set of uids Parameters: uids: list, a list of uid mask: bool array, mask array force_compress: default is False, just load the compresssed data; if True, will compress it to overwrite the old compressed data para_compress: apply the parallel compress algorithm - bin_frame_number: + bin_frame_number: Return: None, save the compressed data in, by default, /XF11ID/analysis/Compressed_Data with filename as '/uid_%s.cmp' uid is the full uid string - + e.g., compress_multi_uids( uids, mask, force_compress= False, bin_frame_number=1 ) - - ''' - for uid in uids: - print('UID: %s is in processing...'%uid) - if validate_uid( uid ): - md = get_meta_data( uid ) - imgs = load_data( uid, md['detector'], reverse= reverse, rot90=rot90 ) + + """ + for uid in uids: + print("UID: %s is in processing..." % uid) + if validate_uid(uid): + md = get_meta_data(uid) + imgs = load_data(uid, md["detector"], reverse=reverse, rot90=rot90) sud = get_sid_filenames(db[uid]) for pa in sud[2]: - if 'master.h5' in pa: - data_fullpath = pa - print( imgs, data_fullpath ) + if "master.h5" in pa: + data_fullpath = pa + print(imgs, data_fullpath) if mask_dict is not None: - mask = mask_dict[md['detector']] - print('The detecotr is: %s'% md['detector']) - md.update( imgs.md ) + mask = mask_dict[md["detector"]] + print("The detecotr is: %s" % md["detector"]) + md.update(imgs.md) if not use_local_disk: - cmp_path = '/nsls2/xf11id1/analysis/Compressed_Data' + cmp_path = "/nsls2/xf11id1/analysis/Compressed_Data" else: - cmp_path = '/tmp_data/compressed' - cmp_path = '/nsls2/xf11id1/analysis/Compressed_Data' - if bin_frame_number==1: - cmp_file = '/uid_%s.cmp'%md['uid'] + cmp_path = "/tmp_data/compressed" + cmp_path = "/nsls2/xf11id1/analysis/Compressed_Data" + if bin_frame_number == 1: + cmp_file = "/uid_%s.cmp" % md["uid"] else: - cmp_file = '/uid_%s_bined--%s.cmp'%(md['uid'],bin_frame_number) - filename = cmp_path + cmp_file - mask, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename, - force_compress= force_compress, para_compress= para_compress, bad_pixel_threshold = 1e14, - reverse=reverse, rot90=rot90, - bins=bin_frame_number, num_sub= 100, num_max_para_process= 500, with_pickle=True, - direct_load_data =use_local_disk, data_path = data_fullpath, ) + cmp_file = "/uid_%s_bined--%s.cmp" % (md["uid"], bin_frame_number) + filename = cmp_path + cmp_file + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata( + imgs, + mask, + md, + filename, + force_compress=force_compress, + para_compress=para_compress, + bad_pixel_threshold=1e14, + reverse=reverse, + rot90=rot90, + bins=bin_frame_number, + num_sub=100, + num_max_para_process=500, + with_pickle=True, + direct_load_data=use_local_disk, + data_path=data_fullpath, + ) + + print("Done!") - print('Done!') - #################################################################################################### ##get_two_time_mulit_uids, sequential cal for uids, but apply parallel for each uid ## ################################################################################################# -def get_two_time_mulit_uids( uids, roi_mask, norm= None, bin_frame_number=1, path=None, force_generate=False, - md=None, imgs=None,direct_load_data=False,compress_path=None ): - - ''' Calculate two time correlation by using auto_two_Arrayc func for a set of uids, + +def get_two_time_mulit_uids( + uids, + roi_mask, + norm=None, + bin_frame_number=1, + path=None, + force_generate=False, + md=None, + imgs=None, + direct_load_data=False, + compress_path=None, +): + """Calculate two time correlation by using auto_two_Arrayc func for a set of uids, if the two-time resutls are already created, by default (force_generate=False), just pass Parameters: uids: list, a list of uid roi_mask: bool array, roi mask array - norm: the normalization array - path: string, where to save the two time + norm: the normalization array + path: string, where to save the two time force_generate: default, False, if the two-time resutls are already created, just pass if True, will force to calculate two-time no matter exist or not - + Return: - None, save the two-time in as path + uid + 'uid=%s_g12b'%uid - + None, save the two-time in as path + uid + 'uid=%s_g12b'%uid + e.g., - get_two_time_mulit_uids( guids, roi_mask, norm= norm,bin_frame_number=1, + get_two_time_mulit_uids( guids, roi_mask, norm= norm,bin_frame_number=1, path= data_dir,force_generate=False ) - - ''' - + + """ + qind, pixelist = roi.extract_label_indices(roi_mask) for uid in uids: - print('UID: %s is in processing...'%uid) + print("UID: %s is in processing..." % uid) if not direct_load_data: - md = get_meta_data( uid ) - imgs = load_data( uid, md['detector'], reverse= True ) + md = get_meta_data(uid) + imgs = load_data(uid, md["detector"], reverse=True) else: pass N = len(imgs) - #print( N ) + # print( N ) if compress_path is None: - compress_path = '/XF11ID/analysis/Compressed_Data/' - if bin_frame_number==1: - filename = '%s'%compress_path +'uid_%s.cmp'%md['uid'] + compress_path = "/XF11ID/analysis/Compressed_Data/" + if bin_frame_number == 1: + filename = "%s" % compress_path + "uid_%s.cmp" % md["uid"] else: - filename = '%s'%compress_path +'uid_%s_bined--%s.cmp'%(md['uid'],bin_frame_number) - - FD = Multifile(filename, 0, N//bin_frame_number) - #print( FD.beg, FD.end) - uid_ = md['uid'] - os.makedirs(path + uid_ + '/', exist_ok=True) - filename = path + uid_ + '/' + 'uid=%s_g12b'%uid + filename = "%s" % compress_path + "uid_%s_bined--%s.cmp" % (md["uid"], bin_frame_number) + + FD = Multifile(filename, 0, N // bin_frame_number) + # print( FD.beg, FD.end) + uid_ = md["uid"] + os.makedirs(path + uid_ + "/", exist_ok=True) + filename = path + uid_ + "/" + "uid=%s_g12b" % uid doit = True if not force_generate: - if os.path.exists( filename + '.npy'): - doit=False - print('The two time correlation function for uid=%s is already calculated. Just pass...'%uid) - if doit: - data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm= norm ).get_data() - g12b = auto_two_Arrayc( data_pixel, roi_mask, index = None ) - np.save( filename, g12b) + if os.path.exists(filename + ".npy"): + doit = False + print("The two time correlation function for uid=%s is already calculated. Just pass..." % uid) + if doit: + data_pixel = Get_Pixel_Arrayc(FD, pixelist, norm=norm).get_data() + g12b = auto_two_Arrayc(data_pixel, roi_mask, index=None) + np.save(filename, g12b) del g12b - print( 'The two time correlation function for uid={} is saved as {}.'.format(uid, filename )) - - + print("The two time correlation function for uid={} is saved as {}.".format(uid, filename)) - - -def get_series_g2_from_g12( g12b, fra_num_by_dose = None, dose_label = None, - good_start=0, log_taus = True, num_bufs=8, time_step=1 ): - ''' +def get_series_g2_from_g12( + g12b, fra_num_by_dose=None, dose_label=None, good_start=0, log_taus=True, num_bufs=8, time_step=1 +): + """ Get a series of one-time function from two-time by giving noframes Parameters: g12b: a two time function @@ -661,75 +816,86 @@ def get_series_g2_from_g12( g12b, fra_num_by_dose = None, dose_label = None, fra_num_by_dose: a list, correlation number starting from index 0, if this number is larger than g12b length, will give a warning message, and will use g12b length to replace this number - by default is None, will = [ g12b.shape[0] ] + by default is None, will = [ g12b.shape[0] ] dose_label: the label of each dose, also is the keys of returned g2, lag - log_taus: if true, will only return a g2 with the correponding tau values + log_taus: if true, will only return a g2 with the correponding tau values as calculated by multi-tau defined taus Return: - + g2_series, a dict, with keys as dose_label (corrected on if warning message is given) lag_steps, the corresponding lags - - ''' - g2={} + + """ + g2 = {} lag_steps = {} - L,L,qs= g12b.shape + L, L, qs = g12b.shape if fra_num_by_dose is None: - fra_num_by_dose = [L] + fra_num_by_dose = [L] if dose_label is None: - dose_label = fra_num_by_dose - fra_num_by_dose = sorted( fra_num_by_dose ) - dose_label = sorted( dose_label ) + dose_label = fra_num_by_dose + fra_num_by_dose = sorted(fra_num_by_dose) + dose_label = sorted(dose_label) for i, good_end in enumerate(fra_num_by_dose): - key = round(dose_label[i] ,3) - #print( good_end ) - if good_end>L: - warnings.warn("Warning: the dose value is too large, and please check the maxium dose in this data set and give a smaller dose value. We will use the maxium dose of the data.") - good_end = L - if not log_taus: - g2[ key ] = get_one_time_from_two_time(g12b[good_start:good_end,good_start:good_end,:] ) - else: - #print( good_end, num_bufs ) - lag_step = get_multi_tau_lag_steps(good_end, num_bufs) - lag_step = lag_step[ lag_step < good_end - good_start] - #print( len(lag_steps ) ) + key = round(dose_label[i], 3) + # print( good_end ) + if good_end > L: + warnings.warn( + "Warning: the dose value is too large, and please check the maxium dose in this data set and give a smaller dose value. We will use the maxium dose of the data." + ) + good_end = L + if not log_taus: + g2[key] = get_one_time_from_two_time(g12b[good_start:good_end, good_start:good_end, :]) + else: + # print( good_end, num_bufs ) + lag_step = get_multi_tau_lag_steps(good_end, num_bufs) + lag_step = lag_step[lag_step < good_end - good_start] + # print( len(lag_steps ) ) lag_steps[key] = lag_step * time_step - g2[key] = get_one_time_from_two_time(g12b[good_start:good_end,good_start:good_end,:] )[lag_step] - + g2[key] = get_one_time_from_two_time(g12b[good_start:good_end, good_start:good_end, :])[lag_step] + return lag_steps, g2 - -def get_fra_num_by_dose( exp_dose, exp_time, att=1, dead_time =2 ): - ''' + +def get_fra_num_by_dose(exp_dose, exp_time, att=1, dead_time=2): + """ Calculate the frame number to be correlated by giving a X-ray exposure dose - + Paramters: exp_dose: a list, the exposed dose, e.g., in unit of exp_time(ms)*N(fram num)*att( attenuation) exp_time: float, the exposure time for a xpcs time sereies dead_time: dead time for the fast shutter reponse time, CHX = 2ms Return: - noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) + noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) e.g., - + no_dose_fra = get_fra_num_by_dose( exp_dose = [ 3.34* 20, 3.34*50, 3.34*100, 3.34*502, 3.34*505 ], exp_time = 1.34, dead_time = 2) - - --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) - ''' - return np.int_( np.array( exp_dose )/( exp_time + dead_time)/ att ) - - - -def get_series_one_time_mulit_uids( uids, qval_dict, trans = None, good_start=0, path=None, - exposure_dose = None, dead_time = 0, - num_bufs =8, save_g2=True, - md = None, imgs=None, direct_load_data= False ): - ''' Calculate a dose depedent series of one time correlations from two time + + --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) + """ + return np.int_(np.array(exp_dose) / (exp_time + dead_time) / att) + + +def get_series_one_time_mulit_uids( + uids, + qval_dict, + trans=None, + good_start=0, + path=None, + exposure_dose=None, + dead_time=0, + num_bufs=8, + save_g2=True, + md=None, + imgs=None, + direct_load_data=False, +): + """Calculate a dose depedent series of one time correlations from two time Parameters: uids: list, a list of uid trans: list, same length as uids, the transmission list exposure_dose: list, a list x-ray exposure dose; - by default is None, namely, = [ max_frame_number ], + by default is None, namely, = [ max_frame_number ], can be [3.34 334, 3340] in unit of ms, in unit of exp_time(ms)*N(fram num)*att( attenuation) path: string, where to load the two time, if None, ask for it the real g12 path is two_time_path + uid + '/' @@ -737,150 +903,197 @@ def get_series_one_time_mulit_uids( uids, qval_dict, trans = None, good_start= Return: taus_uids, with keys as uid, and taus_uids[uid] is also a dict, with keys as dose_frame - g2_uids, with keys as uid, and + g2_uids, with keys as uid, and g2_uids[uid] is also a dict, with keys as dose_frame will also save g2 results to the 'path' - ''' - + """ + if path is None: - print( 'Please calculate two time function first by using get_two_time_mulit_uids function.') + print("Please calculate two time function first by using get_two_time_mulit_uids function.") else: taus_uids = {} g2_uids = {} for i, uid in enumerate(uids): - print('UID: %s is in processing...'%uid) + print("UID: %s is in processing..." % uid) if not direct_load_data: - md = get_meta_data( uid ) - imgs = load_data( uid, md['detector'], reverse= True ) - #print(md) - detectors = md['detector'] - if isinstance( detectors,list): - if len(detectors)>1: - if '_image' in md['detector']: - pref = md['detector'][:-5] + md = get_meta_data(uid) + imgs = load_data(uid, md["detector"], reverse=True) + # print(md) + detectors = md["detector"] + if isinstance(detectors, list): + if len(detectors) > 1: + if "_image" in md["detector"]: + pref = md["detector"][:-5] else: - pref=md['detector'] - for k in [ 'beam_center_x', 'beam_center_y','cam_acquire_time','cam_acquire_period','cam_num_images', - 'wavelength', 'det_distance', 'photon_energy']: - md[k] = md[ pref + '%s'%k] - + pref = md["detector"] + for k in [ + "beam_center_x", + "beam_center_y", + "cam_acquire_time", + "cam_acquire_period", + "cam_num_images", + "wavelength", + "det_distance", + "photon_energy", + ]: + md[k] = md[pref + "%s" % k] + else: pass N = len(imgs) if exposure_dose is None: exposure_dose = [N] try: - g2_path = path + uid + '/' - g12b = np.load( g2_path + 'uid=%s_g12b.npy'%uid) + g2_path = path + uid + "/" + g12b = np.load(g2_path + "uid=%s_g12b.npy" % uid) except: - g2_path = path + md['uid'] + '/' - g12b = np.load( g2_path + 'uid=%s_g12b.npy'%uid) + g2_path = path + md["uid"] + "/" + g12b = np.load(g2_path + "uid=%s_g12b.npy" % uid) try: - exp_time = float( md['cam_acquire_time']) #*1000 #from second to ms - except: - exp_time = float( md['exposure time']) #* 1000 #from second to ms - if trans is None: + exp_time = float(md["cam_acquire_time"]) # *1000 #from second to ms + except: + exp_time = float(md["exposure time"]) # * 1000 #from second to ms + if trans is None: try: - transi = md['transmission'] + transi = md["transmission"] except: - transi = [1] + transi = [1] else: transi = trans[i] - fra_num_by_dose = get_fra_num_by_dose( exp_dose = exposure_dose, - exp_time =exp_time, dead_time = dead_time, att = transi ) - - print( 'uid: %s--> fra_num_by_dose: %s'%(uid, fra_num_by_dose ) ) - - taus_uid, g2_uid = get_series_g2_from_g12( g12b, fra_num_by_dose=fra_num_by_dose, - dose_label = exposure_dose, - good_start=good_start, num_bufs=num_bufs, - time_step = exp_time)#md['cam_acquire_period'] ) - g2_uids['uid_%03d=%s'%(i,uid)] = g2_uid - taus_uids['uid_%03d=%s'%(i,uid)] = taus_uid + fra_num_by_dose = get_fra_num_by_dose( + exp_dose=exposure_dose, exp_time=exp_time, dead_time=dead_time, att=transi + ) + + print("uid: %s--> fra_num_by_dose: %s" % (uid, fra_num_by_dose)) + + taus_uid, g2_uid = get_series_g2_from_g12( + g12b, + fra_num_by_dose=fra_num_by_dose, + dose_label=exposure_dose, + good_start=good_start, + num_bufs=num_bufs, + time_step=exp_time, + ) # md['cam_acquire_period'] ) + g2_uids["uid_%03d=%s" % (i, uid)] = g2_uid + taus_uids["uid_%03d=%s" % (i, uid)] = taus_uid if save_g2: - for k in list( g2_uid.keys()): - #print(k) - uid_ = uid + '_fra_%s_%s'%(good_start, k ) - save_g2_general( g2_uid[k], taus=taus_uid[k],qr=np.array( list( qval_dict.values() ) )[:,0], - uid=uid_+'_g2.csv', path= g2_path, return_res=False ) + for k in list(g2_uid.keys()): + # print(k) + uid_ = uid + "_fra_%s_%s" % (good_start, k) + save_g2_general( + g2_uid[k], + taus=taus_uid[k], + qr=np.array(list(qval_dict.values()))[:, 0], + uid=uid_ + "_g2.csv", + path=g2_path, + return_res=False, + ) return taus_uids, g2_uids - - - - -def plot_dose_g2( taus_uids, g2_uids, qval_dict, qth_interest = None, ylim=[0.95, 1.05], vshift=0.1, - fit_res= None, geometry= 'saxs',filename= 'dose'+'_g2', legend_size=None, - path= None, function= None, g2_labels=None, ylabel= 'g2_dose', append_name= '_dose', - return_fig=False): - '''Plot a does-dependent g2 + + +def plot_dose_g2( + taus_uids, + g2_uids, + qval_dict, + qth_interest=None, + ylim=[0.95, 1.05], + vshift=0.1, + fit_res=None, + geometry="saxs", + filename="dose" + "_g2", + legend_size=None, + path=None, + function=None, + g2_labels=None, + ylabel="g2_dose", + append_name="_dose", + return_fig=False, +): + """Plot a does-dependent g2 taus_uids, dict, with format as {uid1: { dose1: tau_1, dose2: tau_2...}, uid2: ...} g2_uids, dict, with format as {uid1: { dose1: g2_1, dose2: g2_2...}, uid2: ...} qval_dict: a dict of qvals vshift: float, vertical shift value of different dose of g2 - - ''' - - uids = sorted( list( taus_uids.keys() ) ) - #print( uids ) - dose = sorted( list( taus_uids[ uids[0] ].keys() ) ) - if qth_interest is None: - g2_dict= {} + + """ + + uids = sorted(list(taus_uids.keys())) + # print( uids ) + dose = sorted(list(taus_uids[uids[0]].keys())) + if qth_interest is None: + g2_dict = {} taus_dict = {} if g2_labels is None: - g2_labels = [] - for i in range( len( dose )): + g2_labels = [] + for i in range(len(dose)): g2_dict[i + 1] = [] - taus_dict[i +1 ] = [] - #print ( i ) - for j in range( len( uids )): - #print( uids[i] , dose[j]) - g2_dict[i +1 ].append( g2_uids[ uids[j] ][ dose[i] ] + vshift*i ) - taus_dict[i +1 ].append( taus_uids[ uids[j] ][ dose[i] ] ) - if j ==0: - g2_labels.append( 'Dose_%s'%dose[i] ) - - plot_g2_general( g2_dict, taus_dict, - ylim=[ylim[0], ylim[1] + vshift * len(dose)], - qval_dict = qval_dict, fit_res= None, geometry= geometry,filename= filename, - path= path, function= function, ylabel= ylabel, g2_labels=g2_labels, append_name= append_name ) - + taus_dict[i + 1] = [] + # print ( i ) + for j in range(len(uids)): + # print( uids[i] , dose[j]) + g2_dict[i + 1].append(g2_uids[uids[j]][dose[i]] + vshift * i) + taus_dict[i + 1].append(taus_uids[uids[j]][dose[i]]) + if j == 0: + g2_labels.append("Dose_%s" % dose[i]) + + plot_g2_general( + g2_dict, + taus_dict, + ylim=[ylim[0], ylim[1] + vshift * len(dose)], + qval_dict=qval_dict, + fit_res=None, + geometry=geometry, + filename=filename, + path=path, + function=function, + ylabel=ylabel, + g2_labels=g2_labels, + append_name=append_name, + ) + else: - fig,ax= plt.subplots() - q = qval_dict[qth_interest-1][0] - j = 0 + fig, ax = plt.subplots() + q = qval_dict[qth_interest - 1][0] + j = 0 for uid in uids: - #uid = uids[0] - #print( uid ) - dose_list = sorted( list(taus_uids['%s'%uid].keys()) ) - #print( dose_list ) + # uid = uids[0] + # print( uid ) + dose_list = sorted(list(taus_uids["%s" % uid].keys())) + # print( dose_list ) for i, dose in enumerate(dose_list): dose = float(dose) - if j ==0: - legend= 'dose_%s'%round(dose,2) + if j == 0: + legend = "dose_%s" % round(dose, 2) else: - legend = '' - - #print( markers[i], colors[i] ) - - plot1D(x= taus_uids['%s'%uid][dose_list[i]], - y =g2_uids['%s'%uid][dose_list[i]][:,qth_interest] + i*vshift, - logx=True, ax=ax, legend= legend, m = markers[i], c= colors[i], - lw=3, title='%s_Q=%s'%(uid, q) + r'$\AA^{-1}$', legend_size=legend_size ) - ylabel='g2--Dose (trans*exptime_sec)' - j +=1 - - ax.set_ylabel( r"$%s$"%ylabel + '(' + r'$\tau$' + ')' ) - ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) - ax.set_ylim ( ylim ) + legend = "" + + # print( markers[i], colors[i] ) + + plot1D( + x=taus_uids["%s" % uid][dose_list[i]], + y=g2_uids["%s" % uid][dose_list[i]][:, qth_interest] + i * vshift, + logx=True, + ax=ax, + legend=legend, + m=markers[i], + c=colors[i], + lw=3, + title="%s_Q=%s" % (uid, q) + r"$\AA^{-1}$", + legend_size=legend_size, + ) + ylabel = "g2--Dose (trans*exptime_sec)" + j += 1 + + ax.set_ylabel(r"$%s$" % ylabel + "(" + r"$\tau$" + ")") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + ax.set_ylim(ylim) if return_fig: return fig, ax - #return taus_dict, g2_dict - - + # return taus_dict, g2_dict -def run_xpcs_xsvs_single( uid, run_pargs, md_cor=None, return_res=False,reverse=True, clear_plot=False ): - '''Y.G. Dec 22, 2016 +def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse=True, clear_plot=False): + """Y.G. Dec 22, 2016 Run XPCS XSVS analysis for a single uid Parameters: uid: unique id @@ -890,10 +1103,10 @@ def run_xpcs_xsvs_single( uid, run_pargs, md_cor=None, return_res=False,reverse= save analysis result to csv/png/h5 files return_res: if true, return a dict, containing g2,g4,g12,contrast et.al. depending on the run type An example for the run_pargs: - - run_pargs= dict( + + run_pargs= dict( scat_geometry = 'gi_saxs' #suport 'saxs', 'gi_saxs', 'ang_saxs' (for anisotropics saxs or flow-xpcs) - force_compress = True,#False, + force_compress = True,#False, para_compress = True, run_fit_form = False, run_waterfall = True,#False, @@ -907,794 +1120,1344 @@ def run_xpcs_xsvs_single( uid, run_pargs, md_cor=None, return_res=False,reverse= att_pdf_report = True, show_plot = False, - CYCLE = '2016_3', + CYCLE = '2016_3', mask_path = '/XF11ID/analysis/2016_3/masks/', - mask_name = 'Nov28_4M_SAXS_mask.npy', - good_start = 5, + mask_name = 'Nov28_4M_SAXS_mask.npy', + good_start = 5, uniformq = True, inner_radius= 0.005, #0.005 for 50 nm, 0.006, #for 10nm/coralpor - outer_radius = 0.04, #0.04 for 50 nm, 0.05, #for 10nm/coralpor + outer_radius = 0.04, #0.04 for 50 nm, 0.05, #for 10nm/coralpor num_rings = 12, - gap_ring_number = 6, - number_rings= 1, - #qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 - #width = 0.0002 - qth_interest = 1, #the intested single qth + gap_ring_number = 6, + number_rings= 1, + #qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 + #width = 0.0002 + qth_interest = 1, #the intested single qth use_sqnorm = False, use_imgsum_norm = True, - pdf_version = '_1' #for pdf report name + pdf_version = '_1' #for pdf report name ) - + md_cor: if not None, will update the metadata with md_cor - - ''' - - scat_geometry = run_pargs['scat_geometry'] - force_compress = run_pargs['force_compress'] - para_compress = run_pargs['para_compress'] - run_fit_form = run_pargs['run_fit_form'] - run_waterfall = run_pargs['run_waterfall'] - run_t_ROI_Inten = run_pargs['run_t_ROI_Inten'] - - #run_fit_g2 = run_pargs['run_fit_g2'], - fit_g2_func = run_pargs['fit_g2_func'] - run_one_time = run_pargs['run_one_time'] - run_two_time = run_pargs['run_two_time'] - run_four_time = run_pargs['run_four_time'] - run_xsvs=run_pargs['run_xsvs'] + + """ + + scat_geometry = run_pargs["scat_geometry"] + force_compress = run_pargs["force_compress"] + para_compress = run_pargs["para_compress"] + run_fit_form = run_pargs["run_fit_form"] + run_waterfall = run_pargs["run_waterfall"] + run_t_ROI_Inten = run_pargs["run_t_ROI_Inten"] + + # run_fit_g2 = run_pargs['run_fit_g2'], + fit_g2_func = run_pargs["fit_g2_func"] + run_one_time = run_pargs["run_one_time"] + run_two_time = run_pargs["run_two_time"] + run_four_time = run_pargs["run_four_time"] + run_xsvs = run_pargs["run_xsvs"] try: - run_dose = run_pargs['run_dose'] + run_dose = run_pargs["run_dose"] except: - run_dose= False + run_dose = False ############################################################### - if scat_geometry =='gi_saxs': #to be done for other types - run_xsvs = False; - ############################################################### - + if scat_geometry == "gi_saxs": # to be done for other types + run_xsvs = False + ############################################################### + ############################################################### - if scat_geometry == 'ang_saxs': - run_xsvs= False;run_waterfall=False;run_two_time=False;run_four_time=False;run_t_ROI_Inten=False; - ############################################################### - if 'bin_frame' in list( run_pargs.keys() ): - bin_frame = run_pargs['bin_frame'] - bin_frame_number= run_pargs['bin_frame_number'] + if scat_geometry == "ang_saxs": + run_xsvs = False + run_waterfall = False + run_two_time = False + run_four_time = False + run_t_ROI_Inten = False + ############################################################### + if "bin_frame" in list(run_pargs.keys()): + bin_frame = run_pargs["bin_frame"] + bin_frame_number = run_pargs["bin_frame_number"] else: - bin_frame = False + bin_frame = False if not bin_frame: - bin_frame_number = 1 - - att_pdf_report = run_pargs['att_pdf_report'] - show_plot = run_pargs['show_plot'] - CYCLE = run_pargs['CYCLE'] - mask_path = run_pargs['mask_path'] - mask_name = run_pargs['mask_name'] - good_start = run_pargs['good_start'] - use_imgsum_norm = run_pargs['use_imgsum_norm'] + bin_frame_number = 1 + + att_pdf_report = run_pargs["att_pdf_report"] + show_plot = run_pargs["show_plot"] + CYCLE = run_pargs["CYCLE"] + mask_path = run_pargs["mask_path"] + mask_name = run_pargs["mask_name"] + good_start = run_pargs["good_start"] + use_imgsum_norm = run_pargs["use_imgsum_norm"] try: - use_sqnorm = run_pargs['use_sqnorm'] + use_sqnorm = run_pargs["use_sqnorm"] except: use_sqnorm = False try: - inc_x0 = run_pargs['inc_x0'] - inc_y0 = run_pargs['inc_y0'] + inc_x0 = run_pargs["inc_x0"] + inc_y0 = run_pargs["inc_y0"] except: inc_x0 = None - inc_y0= None - - #for different scattering geogmetry, we only need to change roi_mask - #and qval_dict - qval_dict = run_pargs['qval_dict'] - if scat_geometry != 'ang_saxs': - roi_mask = run_pargs['roi_mask'] - qind, pixelist = roi.extract_label_indices( roi_mask ) + inc_y0 = None + + # for different scattering geogmetry, we only need to change roi_mask + # and qval_dict + qval_dict = run_pargs["qval_dict"] + if scat_geometry != "ang_saxs": + roi_mask = run_pargs["roi_mask"] + qind, pixelist = roi.extract_label_indices(roi_mask) noqs = len(np.unique(qind)) - nopr = np.bincount(qind, minlength=(noqs+1))[1:] - - else: - roi_mask_p = run_pargs['roi_mask_p'] - qval_dict_p = run_pargs['qval_dict_p'] - roi_mask_v = run_pargs['roi_mask_v'] - qval_dict_v = run_pargs['qval_dict_v'] - - if scat_geometry == 'gi_saxs': - refl_x0 = run_pargs['refl_x0'] - refl_y0 = run_pargs['refl_y0'] - Qr, Qz, qr_map, qz_map = run_pargs['Qr'], run_pargs['Qz'], run_pargs['qr_map'], run_pargs['qz_map'] - - - taus=None;g2=None;tausb=None;g2b=None;g12b=None;taus4=None;g4=None;times_xsv=None;contrast_factorL=None; - qth_interest = run_pargs['qth_interest'] - pdf_version = run_pargs['pdf_version'] - - + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + + else: + roi_mask_p = run_pargs["roi_mask_p"] + qval_dict_p = run_pargs["qval_dict_p"] + roi_mask_v = run_pargs["roi_mask_v"] + qval_dict_v = run_pargs["qval_dict_v"] + + if scat_geometry == "gi_saxs": + refl_x0 = run_pargs["refl_x0"] + refl_y0 = run_pargs["refl_y0"] + Qr, Qz, qr_map, qz_map = run_pargs["Qr"], run_pargs["Qz"], run_pargs["qr_map"], run_pargs["qz_map"] + + taus = None + g2 = None + tausb = None + g2b = None + g12b = None + taus4 = None + g4 = None + times_xsv = None + contrast_factorL = None + qth_interest = run_pargs["qth_interest"] + pdf_version = run_pargs["pdf_version"] + try: - username = run_pargs['username'] + username = run_pargs["username"] except: username = getpass.getuser() - - data_dir0 = os.path.join('/XF11ID/analysis/', CYCLE, username, 'Results/') + + data_dir0 = os.path.join("/XF11ID/analysis/", CYCLE, username, "Results/") os.makedirs(data_dir0, exist_ok=True) - print('Results from this analysis will be stashed in the directory %s' % data_dir0) - #uid = (sys.argv)[1] - print ('*'*40) - print ( '*'*5 + 'The processing uid is: %s'%uid + '*'*5) - print ('*'*40) - suid = uid #[:6] - data_dir = os.path.join(data_dir0, '%s/'%suid) + print("Results from this analysis will be stashed in the directory %s" % data_dir0) + # uid = (sys.argv)[1] + print("*" * 40) + print("*" * 5 + "The processing uid is: %s" % uid + "*" * 5) + print("*" * 40) + suid = uid # [:6] + data_dir = os.path.join(data_dir0, "%s/" % suid) os.makedirs(data_dir, exist_ok=True) - print('Results from this analysis will be stashed in the directory %s' % data_dir) - md = get_meta_data( uid ) - uidstr = 'uid=%s'%uid[:6] - imgs = load_data( uid, md['detector'], reverse= reverse ) - md.update( imgs.md ) + print("Results from this analysis will be stashed in the directory %s" % data_dir) + md = get_meta_data(uid) + uidstr = "uid=%s" % uid[:6] + imgs = load_data(uid, md["detector"], reverse=reverse) + md.update(imgs.md) Nimg = len(imgs) if md_cor is not None: - md.update( md_cor ) - - + md.update(md_cor) + if inc_x0 is not None: - md['beam_center_x']= inc_x0 + md["beam_center_x"] = inc_x0 if inc_y0 is not None: - md['beam_center_y']= inc_y0 - - #print( run_pargs ) - #print( run_pargs['inc_x0'],run_pargs['inc_y0'] ) - #print( inc_x0, inc_y0 ) - - if md['detector'] =='eiger1m_single_image': - Chip_Mask=np.load( '/XF11ID/analysis/2017_1/masks/Eiger1M_Chip_Mask.npy') - elif md['detector'] =='eiger4m_single_image' or md['detector'] == 'image': - Chip_Mask= np.array(np.load( '/XF11ID/analysis/2017_1/masks/Eiger4M_chip_mask.npy'), dtype=bool) - BadPix = np.load('/XF11ID/analysis/2018_1/BadPix_4M.npy' ) + md["beam_center_y"] = inc_y0 + + # print( run_pargs ) + # print( run_pargs['inc_x0'],run_pargs['inc_y0'] ) + # print( inc_x0, inc_y0 ) + + if md["detector"] == "eiger1m_single_image": + Chip_Mask = np.load("/XF11ID/analysis/2017_1/masks/Eiger1M_Chip_Mask.npy") + elif md["detector"] == "eiger4m_single_image" or md["detector"] == "image": + Chip_Mask = np.array(np.load("/XF11ID/analysis/2017_1/masks/Eiger4M_chip_mask.npy"), dtype=bool) + BadPix = np.load("/XF11ID/analysis/2018_1/BadPix_4M.npy") Chip_Mask.ravel()[BadPix] = 0 - elif md['detector'] =='eiger500K_single_image': - Chip_Mask= 1 #to be defined the chip mask + elif md["detector"] == "eiger500K_single_image": + Chip_Mask = 1 # to be defined the chip mask else: Chip_Mask = 1 - #show_img(Chip_Mask) - - center = [ int(md['beam_center_y']),int( md['beam_center_x'] ) ] #beam center [y,x] for python image - - - pixel_mask = 1- np.int_( np.array( imgs.md['pixel_mask'], dtype= bool) ) - print( 'The data are: %s' %imgs ) - + # show_img(Chip_Mask) + + center = [int(md["beam_center_y"]), int(md["beam_center_x"])] # beam center [y,x] for python image + + pixel_mask = 1 - np.int_(np.array(imgs.md["pixel_mask"], dtype=bool)) + print("The data are: %s" % imgs) + if False: - print_dict( md, ['suid', 'number of images', 'uid', 'scan_id', 'start_time', 'stop_time', 'sample', 'Measurement', - 'acquire period', 'exposure time', - 'det_distanc', 'beam_center_x', 'beam_center_y', ] ) - ## Overwrite Some Metadata if Wrong Input - dpix, lambda_, Ldet, exposuretime, timeperframe, center = check_lost_metadata( - md, Nimg, inc_x0 = inc_x0, inc_y0= inc_y0, pixelsize = 7.5*10*(-5) ) - - print( 'The beam center is: %s'%center ) - - timeperframe *= bin_frame_number - - setup_pargs=dict(uid=uidstr, dpix= dpix, Ldet=Ldet, lambda_= lambda_, exposuretime=exposuretime, - timeperframe=timeperframe, center=center, path= data_dir) - #print_dict( setup_pargs ) - - mask = load_mask(mask_path, mask_name, plot_ = False, image_name = uidstr + '_mask', reverse=reverse ) + print_dict( + md, + [ + "suid", + "number of images", + "uid", + "scan_id", + "start_time", + "stop_time", + "sample", + "Measurement", + "acquire period", + "exposure time", + "det_distanc", + "beam_center_x", + "beam_center_y", + ], + ) + ## Overwrite Some Metadata if Wrong Input + dpix, lambda_, Ldet, exposuretime, timeperframe, center = check_lost_metadata( + md, Nimg, inc_x0=inc_x0, inc_y0=inc_y0, pixelsize=7.5 * 10 * (-5) + ) + + print("The beam center is: %s" % center) + + timeperframe *= bin_frame_number + + setup_pargs = dict( + uid=uidstr, + dpix=dpix, + Ldet=Ldet, + lambda_=lambda_, + exposuretime=exposuretime, + timeperframe=timeperframe, + center=center, + path=data_dir, + ) + # print_dict( setup_pargs ) + + mask = load_mask(mask_path, mask_name, plot_=False, image_name=uidstr + "_mask", reverse=reverse) mask *= pixel_mask - if md['detector'] =='eiger4m_single_image': - mask[:,2069] =0 # False #Concluded from the previous results - show_img(mask,image_name = uidstr + '_mask', save=True, path=data_dir) - mask_load=mask.copy() - imgsa = apply_mask( imgs, mask ) - + if md["detector"] == "eiger4m_single_image": + mask[:, 2069] = 0 # False #Concluded from the previous results + show_img(mask, image_name=uidstr + "_mask", save=True, path=data_dir) + mask_load = mask.copy() + imgsa = apply_mask(imgs, mask) img_choice_N = 2 - img_samp_index = random.sample( range(len(imgs)), img_choice_N) - avg_img = get_avg_img( imgsa, img_samp_index, plot_ = False, uid =uidstr) - + img_samp_index = random.sample(range(len(imgs)), img_choice_N) + avg_img = get_avg_img(imgsa, img_samp_index, plot_=False, uid=uidstr) + if avg_img.max() == 0: - print('There are no photons recorded for this uid: %s'%uid) - print('The data analysis should be terminated! Please try another uid.') - - else: - if scat_geometry !='saxs': - show_img( avg_img, vmin=.1, vmax=np.max(avg_img*.1), logs=True, - image_name= uidstr + '_%s_frames_avg'%img_choice_N, save=True, path=data_dir) - else: - show_saxs_qmap( avg_img, setup_pargs, width=400, show_pixel = False, - vmin=.1, vmax= np.max(avg_img), logs=True, image_name= uidstr + '_%s_frames_avg'%img_choice_N ) - - compress=True - photon_occ = len( np.where(avg_img)[0] ) / ( imgsa[0].size) - #compress = photon_occ < .4 #if the photon ocupation < 0.5, do compress - print ("The non-zeros photon occupation is %s."%( photon_occ)) - print("Will " + 'Always ' + ['NOT', 'DO'][compress] + " apply compress process.") - #good_start = 5 #make the good_start at least 0 - t0= time.time() - filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%md['uid'] - mask, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename, - force_compress= force_compress, para_compress= para_compress, bad_pixel_threshold= 1e14, - bins=bin_frame_number, num_sub= 100, num_max_para_process= 500, with_pickle=True ) - min_inten = 10 - good_start = max(good_start, np.where( np.array(imgsum) > min_inten )[0][0] ) - print ('The good_start frame number is: %s '%good_start) + print("There are no photons recorded for this uid: %s" % uid) + print("The data analysis should be terminated! Please try another uid.") + + else: + if scat_geometry != "saxs": + show_img( + avg_img, + vmin=0.1, + vmax=np.max(avg_img * 0.1), + logs=True, + image_name=uidstr + "_%s_frames_avg" % img_choice_N, + save=True, + path=data_dir, + ) + else: + show_saxs_qmap( + avg_img, + setup_pargs, + width=400, + show_pixel=False, + vmin=0.1, + vmax=np.max(avg_img), + logs=True, + image_name=uidstr + "_%s_frames_avg" % img_choice_N, + ) + + compress = True + photon_occ = len(np.where(avg_img)[0]) / (imgsa[0].size) + # compress = photon_occ < .4 #if the photon ocupation < 0.5, do compress + print("The non-zeros photon occupation is %s." % (photon_occ)) + print("Will " + "Always " + ["NOT", "DO"][compress] + " apply compress process.") + # good_start = 5 #make the good_start at least 0 + t0 = time.time() + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % md["uid"] + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata( + imgs, + mask, + md, + filename, + force_compress=force_compress, + para_compress=para_compress, + bad_pixel_threshold=1e14, + bins=bin_frame_number, + num_sub=100, + num_max_para_process=500, + with_pickle=True, + ) + min_inten = 10 + good_start = max(good_start, np.where(np.array(imgsum) > min_inten)[0][0]) + print("The good_start frame number is: %s " % good_start) FD = Multifile(filename, good_start, len(imgs)) - #FD = Multifile(filename, good_start, 100) - uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end) - print( uid_ ) - plot1D( y = imgsum[ np.array( [i for i in np.arange(good_start, len(imgsum)) if i not in bad_frame_list])], - title =uidstr + '_imgsum', xlabel='Frame', ylabel='Total_Intensity', legend='imgsum' ) + # FD = Multifile(filename, good_start, 100) + uid_ = uidstr + "_fra_%s_%s" % (FD.beg, FD.end) + print(uid_) + plot1D( + y=imgsum[np.array([i for i in np.arange(good_start, len(imgsum)) if i not in bad_frame_list])], + title=uidstr + "_imgsum", + xlabel="Frame", + ylabel="Total_Intensity", + legend="imgsum", + ) run_time(t0) - - mask = mask * Chip_Mask - - #%system free && sync && echo 3 > /proc/sys/vm/drop_caches && free + + mask = mask * Chip_Mask + + # %system free && sync && echo 3 > /proc/sys/vm/drop_caches && free ## Get bad frame list by a polynominal fit - bad_frame_list = get_bad_frame_list( imgsum, fit=True, plot=True,polyfit_order = 30, - scale= 5.5, good_start = good_start, uid= uidstr, path=data_dir) - print( 'The bad frame list length is: %s'%len(bad_frame_list) ) - + bad_frame_list = get_bad_frame_list( + imgsum, + fit=True, + plot=True, + polyfit_order=30, + scale=5.5, + good_start=good_start, + uid=uidstr, + path=data_dir, + ) + print("The bad frame list length is: %s" % len(bad_frame_list)) + ### Creat new mask by masking the bad pixels and get new avg_img if False: - mask = mask_exclude_badpixel( bp, mask, md['uid']) - avg_img = get_avg_imgc( FD, sampling = 1, bad_frame_list=bad_frame_list ) - - show_img( avg_img, vmin=.001, vmax= np.max(avg_img), logs=True, aspect=1, #save_format='tif', - image_name= uidstr + '_img_avg', save=True, path=data_dir, cmap = cmap_albula ) - - imgsum_y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])] - imgsum_x = np.arange( len( imgsum_y)) - save_lists( [imgsum_x, imgsum_y], label=['Frame', 'Total_Intensity'], - filename=uidstr + '_img_sum_t', path= data_dir ) - plot1D( y = imgsum_y, title = uidstr + '_img_sum_t', xlabel='Frame', - ylabel='Total_Intensity', legend='imgsum', save=True, path=data_dir) - - + mask = mask_exclude_badpixel(bp, mask, md["uid"]) + avg_img = get_avg_imgc(FD, sampling=1, bad_frame_list=bad_frame_list) + + show_img( + avg_img, + vmin=0.001, + vmax=np.max(avg_img), + logs=True, + aspect=1, # save_format='tif', + image_name=uidstr + "_img_avg", + save=True, + path=data_dir, + cmap=cmap_albula, + ) + + imgsum_y = imgsum[np.array([i for i in np.arange(len(imgsum)) if i not in bad_frame_list])] + imgsum_x = np.arange(len(imgsum_y)) + save_lists( + [imgsum_x, imgsum_y], label=["Frame", "Total_Intensity"], filename=uidstr + "_img_sum_t", path=data_dir + ) + plot1D( + y=imgsum_y, + title=uidstr + "_img_sum_t", + xlabel="Frame", + ylabel="Total_Intensity", + legend="imgsum", + save=True, + path=data_dir, + ) + ############for SAXS and ANG_SAXS (Flow_SAXS) - if scat_geometry =='saxs' or scat_geometry =='ang_saxs': - - #show_saxs_qmap( avg_img, setup_pargs, width=600, vmin=.1, vmax=np.max(avg_img*.1), logs=True, - # image_name= uidstr + '_img_avg', save=True) - #np.save( data_dir + 'uid=%s--img-avg'%uid, avg_img) + if scat_geometry == "saxs" or scat_geometry == "ang_saxs": - #try: + # show_saxs_qmap( avg_img, setup_pargs, width=600, vmin=.1, vmax=np.max(avg_img*.1), logs=True, + # image_name= uidstr + '_img_avg', save=True) + # np.save( data_dir + 'uid=%s--img-avg'%uid, avg_img) + + # try: # hmask = create_hot_pixel_mask( avg_img, threshold = 1000, center=center, center_radius= 600) - #except: + # except: # hmask=1 - hmask=1 - qp_saxs, iq_saxs, q_saxs = get_circular_average( avg_img * Chip_Mask, mask * hmask * Chip_Mask, pargs=setup_pargs, save=True ) - - plot_circular_average( qp_saxs, iq_saxs, q_saxs, pargs= setup_pargs, - xlim=[q_saxs.min(), q_saxs.max()], ylim = [iq_saxs.min(), iq_saxs.max()] ) - - #pd = trans_data_to_pd( np.where( hmask !=1), + hmask = 1 + qp_saxs, iq_saxs, q_saxs = get_circular_average( + avg_img * Chip_Mask, mask * hmask * Chip_Mask, pargs=setup_pargs, save=True + ) + + plot_circular_average( + qp_saxs, + iq_saxs, + q_saxs, + pargs=setup_pargs, + xlim=[q_saxs.min(), q_saxs.max()], + ylim=[iq_saxs.min(), iq_saxs.max()], + ) + + # pd = trans_data_to_pd( np.where( hmask !=1), # label=[md['uid']+'_hmask'+'x', md['uid']+'_hmask'+'y' ], dtype='list') - - #pd.to_csv('/XF11ID/analysis/Commissioning/eiger4M_badpixel.csv', mode='a' ) - - #mask =np.array( mask * hmask, dtype=bool) - #show_img( mask ) - + + # pd.to_csv('/XF11ID/analysis/Commissioning/eiger4M_badpixel.csv', mode='a' ) + + # mask =np.array( mask * hmask, dtype=bool) + # show_img( mask ) + if run_fit_form: - form_res = fit_form_factor( q_saxs,iq_saxs, guess_values={'radius': 2500, 'sigma':0.05, - 'delta_rho':1E-10 }, fit_range=[0.0001, 0.015], fit_variables={'radius': T, 'sigma':T, - 'delta_rho':T}, res_pargs=setup_pargs, xlim=[0.0001, 0.015]) - - show_ROI_on_image( avg_img, roi_mask, center, label_on = False, rwidth =700, alpha=.9, - save=True, path=data_dir, uid=uidstr, vmin= np.min(avg_img), vmax= np.max(avg_img) ) - - qr = np.array( [ qval_dict[k][0] for k in list( qval_dict.keys()) ] ) - plot_qIq_with_ROI( q_saxs, iq_saxs, qr, logs=True, uid=uidstr, xlim=[q_saxs.min(), q_saxs.max()], - ylim = [iq_saxs.min(), iq_saxs.max()], save=True, path=data_dir) - - if scat_geometry != 'ang_saxs': - Nimg = FD.end - FD.beg - time_edge = create_time_slice( N= Nimg, slice_num= 3, slice_width= 1, edges = None ) - time_edge = np.array( time_edge ) + good_start - #print( time_edge ) - qpt, iqst, qt = get_t_iqc( FD, time_edge, mask* Chip_Mask, pargs=setup_pargs, nx=1500 ) - plot_t_iqc( qt, iqst, time_edge, pargs=setup_pargs, xlim=[qt.min(), qt.max()], - ylim = [iqst.min(), iqst.max()], save=True ) - - elif scat_geometry == 'gi_waxs': - #roi_mask[badpixel] = 0 - qr = np.array( [ qval_dict[k][0] for k in list( qval_dict.keys()) ] ) - show_ROI_on_image( avg_img, roi_mask, label_on = True, alpha=.5,save=True, path= data_dir, uid=uidstr)#, vmin=1, vmax=15) - - elif scat_geometry == 'gi_saxs': - show_img( avg_img, vmin=.1, vmax=np.max(avg_img*.1), - logs=True, image_name= uidstr + '_img_avg', save=True, path=data_dir) - ticks_ = get_qzr_map( qr_map, qz_map, inc_x0, Nzline=10, Nrline=10 ) - ticks = ticks_[:4] - plot_qzr_map( qr_map, qz_map, inc_x0, ticks = ticks_, data= avg_img, uid= uidstr, path = data_dir ) - show_qzr_roi( avg_img, roi_mask, inc_x0, ticks, alpha=0.5, save=True, path=data_dir, uid=uidstr ) - qr_1d_pds = cal_1d_qr( avg_img, Qr, Qz, qr_map, qz_map, inc_x0, setup_pargs=setup_pargs ) - plot_qr_1d_with_ROI( qr_1d_pds, qr_center=np.unique( np.array(list( qval_dict.values() ) )[:,0] ), - loglog=False, save=True, uid=uidstr, path = data_dir) - - Nimg = FD.end - FD.beg - time_edge = create_time_slice( N= Nimg, slice_num= 3, slice_width= 1, edges = None ) - time_edge = np.array( time_edge ) + good_start - qrt_pds = get_t_qrc( FD, time_edge, Qr, Qz, qr_map, qz_map, path=data_dir, uid = uidstr ) - plot_qrt_pds( qrt_pds, time_edge, qz_index = 0, uid = uidstr, path = data_dir ) - - + form_res = fit_form_factor( + q_saxs, + iq_saxs, + guess_values={"radius": 2500, "sigma": 0.05, "delta_rho": 1e-10}, + fit_range=[0.0001, 0.015], + fit_variables={"radius": T, "sigma": T, "delta_rho": T}, + res_pargs=setup_pargs, + xlim=[0.0001, 0.015], + ) + + show_ROI_on_image( + avg_img, + roi_mask, + center, + label_on=False, + rwidth=700, + alpha=0.9, + save=True, + path=data_dir, + uid=uidstr, + vmin=np.min(avg_img), + vmax=np.max(avg_img), + ) + + qr = np.array([qval_dict[k][0] for k in list(qval_dict.keys())]) + plot_qIq_with_ROI( + q_saxs, + iq_saxs, + qr, + logs=True, + uid=uidstr, + xlim=[q_saxs.min(), q_saxs.max()], + ylim=[iq_saxs.min(), iq_saxs.max()], + save=True, + path=data_dir, + ) + + if scat_geometry != "ang_saxs": + Nimg = FD.end - FD.beg + time_edge = create_time_slice(N=Nimg, slice_num=3, slice_width=1, edges=None) + time_edge = np.array(time_edge) + good_start + # print( time_edge ) + qpt, iqst, qt = get_t_iqc(FD, time_edge, mask * Chip_Mask, pargs=setup_pargs, nx=1500) + plot_t_iqc( + qt, + iqst, + time_edge, + pargs=setup_pargs, + xlim=[qt.min(), qt.max()], + ylim=[iqst.min(), iqst.max()], + save=True, + ) + + elif scat_geometry == "gi_waxs": + # roi_mask[badpixel] = 0 + qr = np.array([qval_dict[k][0] for k in list(qval_dict.keys())]) + show_ROI_on_image( + avg_img, roi_mask, label_on=True, alpha=0.5, save=True, path=data_dir, uid=uidstr + ) # , vmin=1, vmax=15) + + elif scat_geometry == "gi_saxs": + show_img( + avg_img, + vmin=0.1, + vmax=np.max(avg_img * 0.1), + logs=True, + image_name=uidstr + "_img_avg", + save=True, + path=data_dir, + ) + ticks_ = get_qzr_map(qr_map, qz_map, inc_x0, Nzline=10, Nrline=10) + ticks = ticks_[:4] + plot_qzr_map(qr_map, qz_map, inc_x0, ticks=ticks_, data=avg_img, uid=uidstr, path=data_dir) + show_qzr_roi(avg_img, roi_mask, inc_x0, ticks, alpha=0.5, save=True, path=data_dir, uid=uidstr) + qr_1d_pds = cal_1d_qr(avg_img, Qr, Qz, qr_map, qz_map, inc_x0, setup_pargs=setup_pargs) + plot_qr_1d_with_ROI( + qr_1d_pds, + qr_center=np.unique(np.array(list(qval_dict.values()))[:, 0]), + loglog=False, + save=True, + uid=uidstr, + path=data_dir, + ) + + Nimg = FD.end - FD.beg + time_edge = create_time_slice(N=Nimg, slice_num=3, slice_width=1, edges=None) + time_edge = np.array(time_edge) + good_start + qrt_pds = get_t_qrc(FD, time_edge, Qr, Qz, qr_map, qz_map, path=data_dir, uid=uidstr) + plot_qrt_pds(qrt_pds, time_edge, qz_index=0, uid=uidstr, path=data_dir) ############################## - ##the below works for all the geometries + ##the below works for all the geometries ######################################## - if scat_geometry !='ang_saxs': - roi_inten = check_ROI_intensity( avg_img, roi_mask, ring_number= qth_interest, uid =uidstr, save=True, path=data_dir ) - if scat_geometry =='saxs' or scat_geometry =='gi_saxs' or scat_geometry =='gi_waxs': + if scat_geometry != "ang_saxs": + roi_inten = check_ROI_intensity( + avg_img, roi_mask, ring_number=qth_interest, uid=uidstr, save=True, path=data_dir + ) + if scat_geometry == "saxs" or scat_geometry == "gi_saxs" or scat_geometry == "gi_waxs": if run_waterfall: - wat = cal_waterfallc( FD, roi_mask, - qindex= qth_interest, save = True, path=data_dir,uid=uidstr) - if run_waterfall: - plot_waterfallc( wat, qindex=qth_interest, aspect=None, - vmax= np.max(wat), uid=uidstr, save =True, - path=data_dir, beg= FD.beg) - ring_avg = None - + wat = cal_waterfallc(FD, roi_mask, qindex=qth_interest, save=True, path=data_dir, uid=uidstr) + if run_waterfall: + plot_waterfallc( + wat, + qindex=qth_interest, + aspect=None, + vmax=np.max(wat), + uid=uidstr, + save=True, + path=data_dir, + beg=FD.beg, + ) + ring_avg = None + if run_t_ROI_Inten: - times_roi, mean_int_sets = cal_each_ring_mean_intensityc(FD, roi_mask, timeperframe = None, multi_cor=True ) - plot_each_ring_mean_intensityc( times_roi, mean_int_sets, uid = uidstr, save=True, path=data_dir ) - roi_avg = np.average( mean_int_sets, axis=0) + times_roi, mean_int_sets = cal_each_ring_mean_intensityc( + FD, roi_mask, timeperframe=None, multi_cor=True + ) + plot_each_ring_mean_intensityc(times_roi, mean_int_sets, uid=uidstr, save=True, path=data_dir) + roi_avg = np.average(mean_int_sets, axis=0) - uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end) + uid_ = uidstr + "_fra_%s_%s" % (FD.beg, FD.end) lag_steps = None - + if use_sqnorm: - norm = get_pixelist_interp_iq( qp_saxs, iq_saxs, roi_mask, center) + norm = get_pixelist_interp_iq(qp_saxs, iq_saxs, roi_mask, center) else: - norm=None - + norm = None + define_good_series = False if define_good_series: - FD = Multifile(filename, beg = good_start, end = Nimg) - uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end) - print( uid_ ) - - if 'g2_fit_variables' in list( run_pargs.keys() ): - g2_fit_variables = run_pargs['g2_fit_variables'] + FD = Multifile(filename, beg=good_start, end=Nimg) + uid_ = uidstr + "_fra_%s_%s" % (FD.beg, FD.end) + print(uid_) + + if "g2_fit_variables" in list(run_pargs.keys()): + g2_fit_variables = run_pargs["g2_fit_variables"] else: - g2_fit_variables = {'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True} + g2_fit_variables = {"baseline": True, "beta": True, "alpha": False, "relaxation_rate": True} - if 'g2_guess_values' in list( run_pargs.keys() ): - g2_guess_values = run_pargs['g2_guess_values'] + if "g2_guess_values" in list(run_pargs.keys()): + g2_guess_values = run_pargs["g2_guess_values"] else: - g2_guess_values= {'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,} - - if 'g2_guess_limits' in list( run_pargs.keys()): - g2_guess_limits = run_pargs['g2_guess_limits'] + g2_guess_values = { + "baseline": 1.0, + "beta": 0.05, + "alpha": 1.0, + "relaxation_rate": 0.01, + } + + if "g2_guess_limits" in list(run_pargs.keys()): + g2_guess_limits = run_pargs["g2_guess_limits"] else: - g2_guess_limits = dict( baseline =[1, 2], alpha=[0, 2], beta = [0, 1], relaxation_rate= [0.001, 5000]) - - if run_one_time: + g2_guess_limits = dict(baseline=[1, 2], alpha=[0, 2], beta=[0, 1], relaxation_rate=[0.001, 5000]) + + if run_one_time: if use_imgsum_norm: imgsum_ = imgsum else: - imgsum_ = None - if scat_geometry !='ang_saxs': + imgsum_ = None + if scat_geometry != "ang_saxs": t0 = time.time() - g2, lag_steps = cal_g2p( FD, roi_mask, bad_frame_list,good_start, num_buf = 8, num_lev= None, - imgsum= imgsum_, norm=norm ) + g2, lag_steps = cal_g2p( + FD, roi_mask, bad_frame_list, good_start, num_buf=8, num_lev=None, imgsum=imgsum_, norm=norm + ) run_time(t0) - taus = lag_steps * timeperframe - g2_pds = save_g2_general( g2, taus=taus,qr=np.array( list( qval_dict.values() ) )[:,0], - uid=uid_+'_g2.csv', path= data_dir, return_res=True ) - g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, - function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, - fit_variables= g2_fit_variables, - guess_values= g2_guess_values, - guess_limits = g2_guess_limits) - - g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_g2_fit_paras.csv', path=data_dir ) - - #if run_one_time: - #plot_g2_general( g2_dict={1:g2}, taus_dict={1:taus},vlim=[0.95, 1.05], qval_dict = qval_dict, fit_res= None, - # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') - - plot_g2_general( g2_dict={1:g2, 2:g2_fit}, taus_dict={1:taus, 2:taus_fit},vlim=[0.95, 1.05], - qval_dict = qval_dict, fit_res= g2_fit_result, geometry=scat_geometry,filename=uid_ + '_g2', - path= data_dir, function= fit_g2_func, ylabel='g2', append_name= '_fit') - - D0, qrate_fit_res = get_q_rate_fit_general( qval_dict, g2_fit_paras['relaxation_rate'], geometry= scat_geometry ) - plot_q_rate_fit_general( qval_dict, g2_fit_paras['relaxation_rate'], qrate_fit_res, - geometry= scat_geometry,uid=uid_ , path= data_dir ) - - + taus = lag_steps * timeperframe + g2_pds = save_g2_general( + g2, + taus=taus, + qr=np.array(list(qval_dict.values()))[:, 0], + uid=uid_ + "_g2.csv", + path=data_dir, + return_res=True, + ) + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( + g2, + taus, + function=fit_g2_func, + vlim=[0.95, 1.05], + fit_range=None, + fit_variables=g2_fit_variables, + guess_values=g2_guess_values, + guess_limits=g2_guess_limits, + ) + + g2_fit_paras = save_g2_fit_para_tocsv( + g2_fit_result, filename=uid_ + "_g2_fit_paras.csv", path=data_dir + ) + + # if run_one_time: + # plot_g2_general( g2_dict={1:g2}, taus_dict={1:taus},vlim=[0.95, 1.05], qval_dict = qval_dict, fit_res= None, + # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') + + plot_g2_general( + g2_dict={1: g2, 2: g2_fit}, + taus_dict={1: taus, 2: taus_fit}, + vlim=[0.95, 1.05], + qval_dict=qval_dict, + fit_res=g2_fit_result, + geometry=scat_geometry, + filename=uid_ + "_g2", + path=data_dir, + function=fit_g2_func, + ylabel="g2", + append_name="_fit", + ) + + D0, qrate_fit_res = get_q_rate_fit_general( + qval_dict, g2_fit_paras["relaxation_rate"], geometry=scat_geometry + ) + plot_q_rate_fit_general( + qval_dict, + g2_fit_paras["relaxation_rate"], + qrate_fit_res, + geometry=scat_geometry, + uid=uid_, + path=data_dir, + ) + else: - t0 = time.time() - g2_v, lag_steps_v = cal_g2p( FD, roi_mask_v, bad_frame_list,good_start, num_buf = 8, num_lev= None, - imgsum= imgsum_, norm=norm ) - g2_p, lag_steps_p = cal_g2p( FD, roi_mask_p, bad_frame_list,good_start, num_buf = 8, num_lev= None, - imgsum= imgsum_, norm=norm ) - run_time(t0) - - taus_v = lag_steps_v * timeperframe - g2_pds_v = save_g2_general( g2_v, taus=taus_v,qr=np.array( list( qval_dict_v.values() ) )[:,0], - uid=uid_+'_g2v.csv', path= data_dir, return_res=True ) - - taus_p = lag_steps_p * timeperframe - g2_pds_p = save_g2_general( g2_p, taus=taus_p,qr=np.array( list( qval_dict_p.values() ) )[:,0], - uid=uid_+'_g2p.csv', path= data_dir, return_res=True ) - - fit_g2_func_v = 'stretched' #for vertical - g2_fit_result_v, taus_fit_v, g2_fit_v = get_g2_fit_general( g2_v, taus_v, - function = fit_g2_func_v, vlim=[0.95, 1.05], fit_range= None, - fit_variables={'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True}, - guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,}) - g2_fit_paras_v = save_g2_fit_para_tocsv(g2_fit_result_v, filename= uid_ +'_g2_fit_paras_v.csv', path=data_dir ) - - fit_g2_func_p ='flow_para' #for parallel - g2_fit_result_p, taus_fit_p, g2_fit_p = get_g2_fit_general( g2_p, taus_p, - function = fit_g2_func_p, vlim=[0.95, 1.05], fit_range= None, - fit_variables={'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True,'flow_velocity':True}, - guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,'flow_velocity':1}) - g2_fit_paras_p = save_g2_fit_para_tocsv(g2_fit_result_p, filename= uid_ +'_g2_fit_paras_p.csv', path=data_dir ) - - - - plot_g2_general( g2_dict={1:g2_v, 2:g2_fit_v}, taus_dict={1:taus_v, 2:taus_fit_v},vlim=[0.95, 1.05], - qval_dict = qval_dict_v, fit_res= g2_fit_result_v, geometry=scat_geometry,filename= uid_+'_g2_v', - path= data_dir, function= fit_g2_func_v, ylabel='g2_v', append_name= '_fit') - - plot_g2_general( g2_dict={1:g2_p, 2:g2_fit_p}, taus_dict={1:taus_p, 2:taus_fit_p},vlim=[0.95, 1.05], - qval_dict = qval_dict_p, fit_res= g2_fit_result_p, geometry=scat_geometry,filename= uid_+'_g2_p', - path= data_dir, function= fit_g2_func_p, ylabel='g2_p', append_name= '_fit') - - combine_images( [data_dir + uid_+'_g2_v_fit.png', data_dir + uid_+'_g2_p_fit.png'], data_dir + uid_+'_g2_fit.png', outsize=(2000, 2400) ) - - - D0_v, qrate_fit_res_v = get_q_rate_fit_general( qval_dict_v, g2_fit_paras_v['relaxation_rate'], geometry= scat_geometry ) - plot_q_rate_fit_general( qval_dict_v, g2_fit_paras_v['relaxation_rate'], qrate_fit_res_v, - geometry= scat_geometry,uid=uid_ +'_vert' , path= data_dir ) - - D0_p, qrate_fit_res_p = get_q_rate_fit_general( qval_dict_p, g2_fit_paras_p['relaxation_rate'], geometry= scat_geometry ) - plot_q_rate_fit_general( qval_dict_p, g2_fit_paras_p['relaxation_rate'], qrate_fit_res_p, - geometry= scat_geometry,uid=uid_ +'_para' , path= data_dir ) - - - combine_images( [data_dir + uid_+ '_vert_Q_Rate_fit.png', data_dir + uid_+ '_para_Q_Rate_fit.png'], data_dir + uid_+'_Q_Rate_fit.png', outsize=(2000, 2400) ) + t0 = time.time() + g2_v, lag_steps_v = cal_g2p( + FD, roi_mask_v, bad_frame_list, good_start, num_buf=8, num_lev=None, imgsum=imgsum_, norm=norm + ) + g2_p, lag_steps_p = cal_g2p( + FD, roi_mask_p, bad_frame_list, good_start, num_buf=8, num_lev=None, imgsum=imgsum_, norm=norm + ) + run_time(t0) + taus_v = lag_steps_v * timeperframe + g2_pds_v = save_g2_general( + g2_v, + taus=taus_v, + qr=np.array(list(qval_dict_v.values()))[:, 0], + uid=uid_ + "_g2v.csv", + path=data_dir, + return_res=True, + ) + + taus_p = lag_steps_p * timeperframe + g2_pds_p = save_g2_general( + g2_p, + taus=taus_p, + qr=np.array(list(qval_dict_p.values()))[:, 0], + uid=uid_ + "_g2p.csv", + path=data_dir, + return_res=True, + ) + + fit_g2_func_v = "stretched" # for vertical + g2_fit_result_v, taus_fit_v, g2_fit_v = get_g2_fit_general( + g2_v, + taus_v, + function=fit_g2_func_v, + vlim=[0.95, 1.05], + fit_range=None, + fit_variables={"baseline": True, "beta": True, "alpha": False, "relaxation_rate": True}, + guess_values={ + "baseline": 1.0, + "beta": 0.05, + "alpha": 1.0, + "relaxation_rate": 0.01, + }, + ) + g2_fit_paras_v = save_g2_fit_para_tocsv( + g2_fit_result_v, filename=uid_ + "_g2_fit_paras_v.csv", path=data_dir + ) + + fit_g2_func_p = "flow_para" # for parallel + g2_fit_result_p, taus_fit_p, g2_fit_p = get_g2_fit_general( + g2_p, + taus_p, + function=fit_g2_func_p, + vlim=[0.95, 1.05], + fit_range=None, + fit_variables={ + "baseline": True, + "beta": True, + "alpha": False, + "relaxation_rate": True, + "flow_velocity": True, + }, + guess_values={ + "baseline": 1.0, + "beta": 0.05, + "alpha": 1.0, + "relaxation_rate": 0.01, + "flow_velocity": 1, + }, + ) + g2_fit_paras_p = save_g2_fit_para_tocsv( + g2_fit_result_p, filename=uid_ + "_g2_fit_paras_p.csv", path=data_dir + ) + + plot_g2_general( + g2_dict={1: g2_v, 2: g2_fit_v}, + taus_dict={1: taus_v, 2: taus_fit_v}, + vlim=[0.95, 1.05], + qval_dict=qval_dict_v, + fit_res=g2_fit_result_v, + geometry=scat_geometry, + filename=uid_ + "_g2_v", + path=data_dir, + function=fit_g2_func_v, + ylabel="g2_v", + append_name="_fit", + ) + + plot_g2_general( + g2_dict={1: g2_p, 2: g2_fit_p}, + taus_dict={1: taus_p, 2: taus_fit_p}, + vlim=[0.95, 1.05], + qval_dict=qval_dict_p, + fit_res=g2_fit_result_p, + geometry=scat_geometry, + filename=uid_ + "_g2_p", + path=data_dir, + function=fit_g2_func_p, + ylabel="g2_p", + append_name="_fit", + ) + + combine_images( + [data_dir + uid_ + "_g2_v_fit.png", data_dir + uid_ + "_g2_p_fit.png"], + data_dir + uid_ + "_g2_fit.png", + outsize=(2000, 2400), + ) + + D0_v, qrate_fit_res_v = get_q_rate_fit_general( + qval_dict_v, g2_fit_paras_v["relaxation_rate"], geometry=scat_geometry + ) + plot_q_rate_fit_general( + qval_dict_v, + g2_fit_paras_v["relaxation_rate"], + qrate_fit_res_v, + geometry=scat_geometry, + uid=uid_ + "_vert", + path=data_dir, + ) + + D0_p, qrate_fit_res_p = get_q_rate_fit_general( + qval_dict_p, g2_fit_paras_p["relaxation_rate"], geometry=scat_geometry + ) + plot_q_rate_fit_general( + qval_dict_p, + g2_fit_paras_p["relaxation_rate"], + qrate_fit_res_p, + geometry=scat_geometry, + uid=uid_ + "_para", + path=data_dir, + ) + + combine_images( + [data_dir + uid_ + "_vert_Q_Rate_fit.png", data_dir + uid_ + "_para_Q_Rate_fit.png"], + data_dir + uid_ + "_Q_Rate_fit.png", + outsize=(2000, 2400), + ) # For two-time data_pixel = None - if run_two_time: - - data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm=norm ).get_data() - t0=time.time() - g12b = auto_two_Arrayc( data_pixel, roi_mask, index = None ) + if run_two_time: + + data_pixel = Get_Pixel_Arrayc(FD, pixelist, norm=norm).get_data() + t0 = time.time() + g12b = auto_two_Arrayc(data_pixel, roi_mask, index=None) if run_dose: - np.save( data_dir + 'uid=%s_g12b'%uid, g12b) - - + np.save(data_dir + "uid=%s_g12b" % uid, g12b) + if lag_steps is None: - num_bufs=8 + num_bufs = 8 noframes = FD.end - FD.beg - num_levels = int(np.log( noframes/(num_bufs-1))/np.log(2) +1) +1 + num_levels = int(np.log(noframes / (num_bufs - 1)) / np.log(2) + 1) + 1 tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) - max_taus= lag_steps.max() - lag_steps = lag_steps[ lag_steps < Nimg - good_start ] - - run_time( t0 ) - - show_C12(g12b, q_ind= qth_interest, N1= FD.beg, N2=min( FD.end,5000), vmin= 0.99, vmax=1.3, - timeperframe=timeperframe,save=True, cmap=cmap_albula, - path= data_dir, uid = uid_ ) - - #print('here') - #show_C12(g12b, q_ind= 3, N1= 5, N2=min(5000,5000), vmin=.8, vmax=1.31, cmap=cmap_albula, - # timeperframe= timeperframe,save=False, path= data_dir, uid = uid_ +'_' + k) - max_taus = Nimg - t0=time.time() - #g2b = get_one_time_from_two_time(g12b)[:max_taus] + max_taus = lag_steps.max() + lag_steps = lag_steps[lag_steps < Nimg - good_start] + + run_time(t0) + + show_C12( + g12b, + q_ind=qth_interest, + N1=FD.beg, + N2=min(FD.end, 5000), + vmin=0.99, + vmax=1.3, + timeperframe=timeperframe, + save=True, + cmap=cmap_albula, + path=data_dir, + uid=uid_, + ) + + # print('here') + # show_C12(g12b, q_ind= 3, N1= 5, N2=min(5000,5000), vmin=.8, vmax=1.31, cmap=cmap_albula, + # timeperframe= timeperframe,save=False, path= data_dir, uid = uid_ +'_' + k) + max_taus = Nimg + t0 = time.time() + # g2b = get_one_time_from_two_time(g12b)[:max_taus] g2b = get_one_time_from_two_time(g12b)[lag_steps] - - tausb = lag_steps *timeperframe - run_time(t0) - - - #tausb = np.arange( g2b.shape[0])[:max_taus] *timeperframe - g2b_pds = save_g2_general( g2b, taus=tausb, qr= np.array( list( qval_dict.values() ) )[:,0], - qz=None, uid=uid_ +'_g2b.csv', path= data_dir, return_res=True ) - - - g2_fit_resultb, taus_fitb, g2_fitb = get_g2_fit_general( g2b, tausb, - function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, - fit_variables=g2_fit_variables, guess_values=g2_guess_values, guess_limits =g2_guess_limits) - - g2b_fit_paras = save_g2_fit_para_tocsv(g2_fit_resultb, - filename= uid_ + '_g2b_fit_paras.csv', path=data_dir ) - - D0b, qrate_fit_resb = get_q_rate_fit_general( qval_dict, g2b_fit_paras['relaxation_rate'], - fit_range=None, geometry= scat_geometry ) - - - #print( qval_dict, g2b_fit_paras['relaxation_rate'], qrate_fit_resb ) - plot_q_rate_fit_general( qval_dict, g2b_fit_paras['relaxation_rate'], qrate_fit_resb, - geometry= scat_geometry,uid=uid_ +'_two_time' , path= data_dir ) - - - - plot_g2_general( g2_dict={1:g2b, 2:g2_fitb}, taus_dict={1:tausb, 2:taus_fitb},vlim=[0.95, 1.05], - qval_dict=qval_dict, fit_res= g2_fit_resultb, geometry=scat_geometry,filename=uid_+'_g2', - path= data_dir, function= fit_g2_func, ylabel='g2', append_name= '_b_fit') - + + tausb = lag_steps * timeperframe + run_time(t0) + + # tausb = np.arange( g2b.shape[0])[:max_taus] *timeperframe + g2b_pds = save_g2_general( + g2b, + taus=tausb, + qr=np.array(list(qval_dict.values()))[:, 0], + qz=None, + uid=uid_ + "_g2b.csv", + path=data_dir, + return_res=True, + ) + + g2_fit_resultb, taus_fitb, g2_fitb = get_g2_fit_general( + g2b, + tausb, + function=fit_g2_func, + vlim=[0.95, 1.05], + fit_range=None, + fit_variables=g2_fit_variables, + guess_values=g2_guess_values, + guess_limits=g2_guess_limits, + ) + + g2b_fit_paras = save_g2_fit_para_tocsv( + g2_fit_resultb, filename=uid_ + "_g2b_fit_paras.csv", path=data_dir + ) + + D0b, qrate_fit_resb = get_q_rate_fit_general( + qval_dict, g2b_fit_paras["relaxation_rate"], fit_range=None, geometry=scat_geometry + ) + + # print( qval_dict, g2b_fit_paras['relaxation_rate'], qrate_fit_resb ) + plot_q_rate_fit_general( + qval_dict, + g2b_fit_paras["relaxation_rate"], + qrate_fit_resb, + geometry=scat_geometry, + uid=uid_ + "_two_time", + path=data_dir, + ) + + plot_g2_general( + g2_dict={1: g2b, 2: g2_fitb}, + taus_dict={1: tausb, 2: taus_fitb}, + vlim=[0.95, 1.05], + qval_dict=qval_dict, + fit_res=g2_fit_resultb, + geometry=scat_geometry, + filename=uid_ + "_g2", + path=data_dir, + function=fit_g2_func, + ylabel="g2", + append_name="_b_fit", + ) + if run_two_time and run_one_time: - plot_g2_general( g2_dict={1:g2, 2:g2b}, taus_dict={1:taus, 2:tausb},vlim=[0.95, 1.05], - qval_dict=qval_dict, g2_labels=['from_one_time', 'from_two_time'], - geometry=scat_geometry,filename=uid_+'_g2_two_g2', path= data_dir, ylabel='g2', ) + plot_g2_general( + g2_dict={1: g2, 2: g2b}, + taus_dict={1: taus, 2: tausb}, + vlim=[0.95, 1.05], + qval_dict=qval_dict, + g2_labels=["from_one_time", "from_two_time"], + geometry=scat_geometry, + filename=uid_ + "_g2_two_g2", + path=data_dir, + ylabel="g2", + ) - - # Four Time Correlation - if run_four_time: #have to run one and two first - t0=time.time() + if run_four_time: # have to run one and two first + t0 = time.time() g4 = get_four_time_from_two_time(g12b, g2=g2b)[:max_taus] run_time(t0) - taus4 = np.arange( g4.shape[0])*timeperframe - g4_pds = save_g2_general( g4, taus=taus4, qr=np.array( list( qval_dict.values() ) )[:,0], - qz=None, uid=uid_ +'_g4.csv', path= data_dir, return_res=True ) - plot_g2_general( g2_dict={1:g4}, taus_dict={1:taus4},vlim=[0.95, 1.05], qval_dict=qval_dict, fit_res= None, - geometry=scat_geometry,filename=uid_+'_g4',path= data_dir, ylabel='g4') + taus4 = np.arange(g4.shape[0]) * timeperframe + g4_pds = save_g2_general( + g4, + taus=taus4, + qr=np.array(list(qval_dict.values()))[:, 0], + qz=None, + uid=uid_ + "_g4.csv", + path=data_dir, + return_res=True, + ) + plot_g2_general( + g2_dict={1: g4}, + taus_dict={1: taus4}, + vlim=[0.95, 1.05], + qval_dict=qval_dict, + fit_res=None, + geometry=scat_geometry, + filename=uid_ + "_g4", + path=data_dir, + ylabel="g4", + ) if run_dose: - get_two_time_mulit_uids( [uid], roi_mask, norm= norm, bin_frame_number=bin_frame_number, - path= data_dir0, force_generate=False ) + get_two_time_mulit_uids( + [uid], roi_mask, norm=norm, bin_frame_number=bin_frame_number, path=data_dir0, force_generate=False + ) N = len(imgs) try: - tr = md['transmission'] + tr = md["transmission"] except: tr = 1 - if 'dose_frame' in list(run_pargs.keys()): - dose_frame = run_pargs['dose_frame'] + if "dose_frame" in list(run_pargs.keys()): + dose_frame = run_pargs["dose_frame"] else: - dose_frame = np.int_([ N/8, N/4 ,N/2, 3*N/4, N*0.99 ] ) - #N/32, N/16, N/8, N/4 ,N/2, 3*N/4, N*0.99 + dose_frame = np.int_([N / 8, N / 4, N / 2, 3 * N / 4, N * 0.99]) + # N/32, N/16, N/8, N/4 ,N/2, 3*N/4, N*0.99 exposure_dose = tr * exposuretime * dose_frame - taus_uids, g2_uids = get_series_one_time_mulit_uids( [ uid ], qval_dict, good_start=good_start, - path= data_dir0, exposure_dose = exposure_dose, num_bufs =8, save_g2= False, - dead_time = 0, trans = [ tr ] ) - - plot_dose_g2( taus_uids, g2_uids, ylim=[0.95, 1.2], vshift= 0.00, - qval_dict = qval_dict, fit_res= None, geometry= scat_geometry, - filename= '%s_dose_analysis'%uid_, - path= data_dir, function= None, ylabel='g2_Dose', g2_labels= None, append_name= '' ) - + taus_uids, g2_uids = get_series_one_time_mulit_uids( + [uid], + qval_dict, + good_start=good_start, + path=data_dir0, + exposure_dose=exposure_dose, + num_bufs=8, + save_g2=False, + dead_time=0, + trans=[tr], + ) + + plot_dose_g2( + taus_uids, + g2_uids, + ylim=[0.95, 1.2], + vshift=0.00, + qval_dict=qval_dict, + fit_res=None, + geometry=scat_geometry, + filename="%s_dose_analysis" % uid_, + path=data_dir, + function=None, + ylabel="g2_Dose", + g2_labels=None, + append_name="", + ) + # Speckel Visiblity - if run_xsvs: - max_cts = get_max_countc(FD, roi_mask ) - qind, pixelist = roi.extract_label_indices( roi_mask ) - noqs = len( np.unique(qind) ) - nopr = np.bincount(qind, minlength=(noqs+1))[1:] - #time_steps = np.array( utils.geometric_series(2, len(imgs) ) ) - time_steps = [0,1] #only run the first two levels - num_times = len(time_steps) - times_xsvs = exposuretime + (2**( np.arange( len(time_steps) ) ) -1 ) *timeperframe - print( 'The max counts are: %s'%max_cts ) - - ### Do historam - if roi_avg is None: - times_roi, mean_int_sets = cal_each_ring_mean_intensityc(FD, roi_mask, timeperframe = None, ) - roi_avg = np.average( mean_int_sets, axis=0) - - t0=time.time() - spec_bins, spec_his, spec_std = xsvsp( FD, np.int_(roi_mask), norm=None, - max_cts=int(max_cts+2), bad_images=bad_frame_list, only_two_levels=True ) - spec_kmean = np.array( [roi_avg * 2**j for j in range( spec_his.shape[0] )] ) + if run_xsvs: + max_cts = get_max_countc(FD, roi_mask) + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + # time_steps = np.array( utils.geometric_series(2, len(imgs) ) ) + time_steps = [0, 1] # only run the first two levels + num_times = len(time_steps) + times_xsvs = exposuretime + (2 ** (np.arange(len(time_steps))) - 1) * timeperframe + print("The max counts are: %s" % max_cts) + + ### Do historam + if roi_avg is None: + times_roi, mean_int_sets = cal_each_ring_mean_intensityc( + FD, + roi_mask, + timeperframe=None, + ) + roi_avg = np.average(mean_int_sets, axis=0) + + t0 = time.time() + spec_bins, spec_his, spec_std = xsvsp( + FD, + np.int_(roi_mask), + norm=None, + max_cts=int(max_cts + 2), + bad_images=bad_frame_list, + only_two_levels=True, + ) + spec_kmean = np.array([roi_avg * 2**j for j in range(spec_his.shape[0])]) run_time(t0) - + run_xsvs_all_lags = False if run_xsvs_all_lags: - times_xsvs = exposuretime + lag_steps * acquisition_period + times_xsvs = exposuretime + lag_steps * acquisition_period if data_pixel is None: - data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm=norm ).get_data() - t0=time.time() - spec_bins, spec_his, spec_std, spec_kmean = get_binned_his_std(data_pixel, np.int_(ro_mask), lag_steps ) - run_time(t0) - spec_pds = save_bin_his_std( spec_bins, spec_his, spec_std, filename=uid_+'_spec_res.csv', path=data_dir ) - - ML_val, KL_val,K_ = get_xsvs_fit( spec_his, spec_kmean, spec_std, max_bins=2,varyK= False, ) - - #print( 'The observed average photon counts are: %s'%np.round(K_mean,4)) - #print( 'The fitted average photon counts are: %s'%np.round(K_,4)) - print( 'The difference sum of average photon counts between fit and data are: %s'%np.round( - abs(np.sum( spec_kmean[0,:] - K_ )),4)) - print( '#'*30) - qth= 10 - print( 'The fitted M for Qth= %s are: %s'%(qth, ML_val[qth]) ) - print( K_[qth]) - print( '#'*30) - - - plot_xsvs_fit( spec_his, ML_val, KL_val, K_mean = spec_kmean, spec_std=spec_std, - xlim = [0,10], vlim =[.9, 1.1], - uid=uid_, qth= qth_interest, logy= True, times= times_xsvs, q_ring_center=qr, path=data_dir) - - plot_xsvs_fit( spec_his, ML_val, KL_val, K_mean = spec_kmean, spec_std = spec_std, - xlim = [0,15], vlim =[.9, 1.1], - uid=uid_, qth= None, logy= True, times= times_xsvs, q_ring_center=qr, path=data_dir ) + data_pixel = Get_Pixel_Arrayc(FD, pixelist, norm=norm).get_data() + t0 = time.time() + spec_bins, spec_his, spec_std, spec_kmean = get_binned_his_std( + data_pixel, np.int_(ro_mask), lag_steps + ) + run_time(t0) + spec_pds = save_bin_his_std( + spec_bins, spec_his, spec_std, filename=uid_ + "_spec_res.csv", path=data_dir + ) + + ML_val, KL_val, K_ = get_xsvs_fit( + spec_his, + spec_kmean, + spec_std, + max_bins=2, + varyK=False, + ) + + # print( 'The observed average photon counts are: %s'%np.round(K_mean,4)) + # print( 'The fitted average photon counts are: %s'%np.round(K_,4)) + print( + "The difference sum of average photon counts between fit and data are: %s" + % np.round(abs(np.sum(spec_kmean[0, :] - K_)), 4) + ) + print("#" * 30) + qth = 10 + print("The fitted M for Qth= %s are: %s" % (qth, ML_val[qth])) + print(K_[qth]) + print("#" * 30) + + plot_xsvs_fit( + spec_his, + ML_val, + KL_val, + K_mean=spec_kmean, + spec_std=spec_std, + xlim=[0, 10], + vlim=[0.9, 1.1], + uid=uid_, + qth=qth_interest, + logy=True, + times=times_xsvs, + q_ring_center=qr, + path=data_dir, + ) + + plot_xsvs_fit( + spec_his, + ML_val, + KL_val, + K_mean=spec_kmean, + spec_std=spec_std, + xlim=[0, 15], + vlim=[0.9, 1.1], + uid=uid_, + qth=None, + logy=True, + times=times_xsvs, + q_ring_center=qr, + path=data_dir, + ) ### Get contrast - contrast_factorL = get_contrast( ML_val) - spec_km_pds = save_KM( spec_kmean, KL_val, ML_val, qs=qr, level_time=times_xsvs, uid=uid_ , path = data_dir ) - #print( spec_km_pds ) - - plot_g2_contrast( contrast_factorL, g2, times_xsvs, taus, qr, - vlim=[0.8,1.2], qth = qth_interest, uid=uid_,path = data_dir, legend_size=14) - - plot_g2_contrast( contrast_factorL, g2, times_xsvs, taus, qr, - vlim=[0.8,1.2], qth = None, uid=uid_,path = data_dir, legend_size=4) - - - - - - md['mask_file']= mask_path + mask_name - md['mask'] = mask - md['NOTEBOOK_FULL_PATH'] = None - md['good_start'] = good_start - md['bad_frame_list'] = bad_frame_list - md['avg_img'] = avg_img - md['roi_mask'] = roi_mask - - if scat_geometry == 'gi_saxs': - md['Qr'] = Qr - md['Qz'] = Qz - md['qval_dict'] = qval_dict - md['beam_center_x'] = inc_x0 - md['beam_center_y']= inc_y0 - md['beam_refl_center_x'] = refl_x0 - md['beam_refl_center_y'] = refl_y0 - - elif scat_geometry == 'saxs' or 'gi_waxs': - md['qr']= qr - #md['qr_edge'] = qr_edge - md['qval_dict'] = qval_dict - md['beam_center_x'] = center[1] - md['beam_center_y']= center[0] - - elif scat_geometry == 'ang_saxs': - md['qval_dict_v'] = qval_dict_v - md['qval_dict_p'] = qval_dict_p - md['beam_center_x'] = center[1] - md['beam_center_y']= center[0] - - - md['beg'] = FD.beg - md['end'] = FD.end - md['metadata_file'] = data_dir + 'md.csv-&-md.pkl' - psave_obj( md, data_dir + 'uid=%s_md'%uid[:6] ) #save the setup parameters - #psave_obj( md, data_dir + 'uid=%s_md'%uid ) #save the setup parameters - save_dict_csv( md, data_dir + 'uid=%s_md.csv'%uid, 'w') - - Exdt = {} - if scat_geometry == 'gi_saxs': - for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list', 'qr_1d_pds'], - [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list, qr_1d_pds] ): - Exdt[ k ] = v - elif scat_geometry == 'saxs': - for k,v in zip( ['md', 'q_saxs', 'iq_saxs','iqst','qt','roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], - [md, q_saxs, iq_saxs, iqst, qt,roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): - Exdt[ k ] = v - elif scat_geometry == 'gi_waxs': - for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], - [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): - Exdt[ k ] = v - elif scat_geometry == 'ang_saxs': - for k,v in zip( ['md', 'q_saxs', 'iq_saxs','roi_mask_v','roi_mask_p', - 'qval_dict_v','qval_dict_p','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], - [md, q_saxs, iq_saxs, roi_mask_v,roi_mask_p, - qval_dict_v,qval_dict_p, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): - Exdt[ k ] = v - - if run_waterfall:Exdt['wat'] = wat - if run_t_ROI_Inten:Exdt['times_roi'] = times_roi;Exdt['mean_int_sets']=mean_int_sets + contrast_factorL = get_contrast(ML_val) + spec_km_pds = save_KM( + spec_kmean, KL_val, ML_val, qs=qr, level_time=times_xsvs, uid=uid_, path=data_dir + ) + # print( spec_km_pds ) + + plot_g2_contrast( + contrast_factorL, + g2, + times_xsvs, + taus, + qr, + vlim=[0.8, 1.2], + qth=qth_interest, + uid=uid_, + path=data_dir, + legend_size=14, + ) + + plot_g2_contrast( + contrast_factorL, + g2, + times_xsvs, + taus, + qr, + vlim=[0.8, 1.2], + qth=None, + uid=uid_, + path=data_dir, + legend_size=4, + ) + + md["mask_file"] = mask_path + mask_name + md["mask"] = mask + md["NOTEBOOK_FULL_PATH"] = None + md["good_start"] = good_start + md["bad_frame_list"] = bad_frame_list + md["avg_img"] = avg_img + md["roi_mask"] = roi_mask + + if scat_geometry == "gi_saxs": + md["Qr"] = Qr + md["Qz"] = Qz + md["qval_dict"] = qval_dict + md["beam_center_x"] = inc_x0 + md["beam_center_y"] = inc_y0 + md["beam_refl_center_x"] = refl_x0 + md["beam_refl_center_y"] = refl_y0 + + elif scat_geometry == "saxs" or "gi_waxs": + md["qr"] = qr + # md['qr_edge'] = qr_edge + md["qval_dict"] = qval_dict + md["beam_center_x"] = center[1] + md["beam_center_y"] = center[0] + + elif scat_geometry == "ang_saxs": + md["qval_dict_v"] = qval_dict_v + md["qval_dict_p"] = qval_dict_p + md["beam_center_x"] = center[1] + md["beam_center_y"] = center[0] + + md["beg"] = FD.beg + md["end"] = FD.end + md["metadata_file"] = data_dir + "md.csv-&-md.pkl" + psave_obj(md, data_dir + "uid=%s_md" % uid[:6]) # save the setup parameters + # psave_obj( md, data_dir + 'uid=%s_md'%uid ) #save the setup parameters + save_dict_csv(md, data_dir + "uid=%s_md.csv" % uid, "w") + + Exdt = {} + if scat_geometry == "gi_saxs": + for k, v in zip( + [ + "md", + "roi_mask", + "qval_dict", + "avg_img", + "mask", + "pixel_mask", + "imgsum", + "bad_frame_list", + "qr_1d_pds", + ], + [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, bad_frame_list, qr_1d_pds], + ): + Exdt[k] = v + elif scat_geometry == "saxs": + for k, v in zip( + [ + "md", + "q_saxs", + "iq_saxs", + "iqst", + "qt", + "roi_mask", + "qval_dict", + "avg_img", + "mask", + "pixel_mask", + "imgsum", + "bad_frame_list", + ], + [ + md, + q_saxs, + iq_saxs, + iqst, + qt, + roi_mask, + qval_dict, + avg_img, + mask, + pixel_mask, + imgsum, + bad_frame_list, + ], + ): + Exdt[k] = v + elif scat_geometry == "gi_waxs": + for k, v in zip( + ["md", "roi_mask", "qval_dict", "avg_img", "mask", "pixel_mask", "imgsum", "bad_frame_list"], + [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, bad_frame_list], + ): + Exdt[k] = v + elif scat_geometry == "ang_saxs": + for k, v in zip( + [ + "md", + "q_saxs", + "iq_saxs", + "roi_mask_v", + "roi_mask_p", + "qval_dict_v", + "qval_dict_p", + "avg_img", + "mask", + "pixel_mask", + "imgsum", + "bad_frame_list", + ], + [ + md, + q_saxs, + iq_saxs, + roi_mask_v, + roi_mask_p, + qval_dict_v, + qval_dict_p, + avg_img, + mask, + pixel_mask, + imgsum, + bad_frame_list, + ], + ): + Exdt[k] = v + + if run_waterfall: + Exdt["wat"] = wat + if run_t_ROI_Inten: + Exdt["times_roi"] = times_roi + Exdt["mean_int_sets"] = mean_int_sets if run_one_time: - if scat_geometry != 'ang_saxs': - for k,v in zip( ['taus','g2','g2_fit_paras'], [taus,g2,g2_fit_paras] ):Exdt[ k ] = v + if scat_geometry != "ang_saxs": + for k, v in zip(["taus", "g2", "g2_fit_paras"], [taus, g2, g2_fit_paras]): + Exdt[k] = v else: - for k,v in zip( ['taus_v','g2_v','g2_fit_paras_v'], [taus_v,g2_v,g2_fit_paras_v] ):Exdt[ k ] = v - for k,v in zip( ['taus_p','g2_p','g2_fit_paras_p'], [taus_p,g2_p,g2_fit_paras_p] ):Exdt[ k ] = v + for k, v in zip(["taus_v", "g2_v", "g2_fit_paras_v"], [taus_v, g2_v, g2_fit_paras_v]): + Exdt[k] = v + for k, v in zip(["taus_p", "g2_p", "g2_fit_paras_p"], [taus_p, g2_p, g2_fit_paras_p]): + Exdt[k] = v if run_two_time: - for k,v in zip( ['tausb','g2b','g2b_fit_paras', 'g12b'], [tausb,g2b,g2b_fit_paras,g12b] ):Exdt[ k ] = v + for k, v in zip(["tausb", "g2b", "g2b_fit_paras", "g12b"], [tausb, g2b, g2b_fit_paras, g12b]): + Exdt[k] = v if run_four_time: - for k,v in zip( ['taus4','g4'], [taus4,g4] ):Exdt[ k ] = v + for k, v in zip(["taus4", "g4"], [taus4, g4]): + Exdt[k] = v if run_xsvs: - for k,v in zip( ['spec_kmean','spec_pds','times_xsvs','spec_km_pds','contrast_factorL'], - [ spec_kmean,spec_pds,times_xsvs,spec_km_pds,contrast_factorL] ):Exdt[ k ] = v - - - export_xpcs_results_to_h5( 'uid=%s_Res.h5'%md['uid'], data_dir, export_dict = Exdt ) - #extract_dict = extract_xpcs_results_from_h5( filename = 'uid=%s_Res.h5'%md['uid'], import_dir = data_dir ) + for k, v in zip( + ["spec_kmean", "spec_pds", "times_xsvs", "spec_km_pds", "contrast_factorL"], + [spec_kmean, spec_pds, times_xsvs, spec_km_pds, contrast_factorL], + ): + Exdt[k] = v + + export_xpcs_results_to_h5("uid=%s_Res.h5" % md["uid"], data_dir, export_dict=Exdt) + # extract_dict = extract_xpcs_results_from_h5( filename = 'uid=%s_Res.h5'%md['uid'], import_dir = data_dir ) # Creat PDF Report - pdf_out_dir = os.path.join('/XF11ID/analysis/', CYCLE, username, 'Results/') - pdf_filename = "XPCS_Analysis_Report_for_uid=%s%s.pdf"%(uid,pdf_version) + pdf_out_dir = os.path.join("/XF11ID/analysis/", CYCLE, username, "Results/") + pdf_filename = "XPCS_Analysis_Report_for_uid=%s%s.pdf" % (uid, pdf_version) if run_xsvs: - pdf_filename = "XPCS_XSVS_Analysis_Report_for_uid=%s%s.pdf"%(uid,pdf_version) - #pdf_filename - - print( data_dir, uid[:6], pdf_out_dir, pdf_filename, username ) - - make_pdf_report( data_dir, uid[:6], pdf_out_dir, pdf_filename, username, - run_fit_form, run_one_time, run_two_time, run_four_time, run_xsvs, run_dose=run_dose, - report_type= scat_geometry - ) - ## Attach the PDF report to Olog + pdf_filename = "XPCS_XSVS_Analysis_Report_for_uid=%s%s.pdf" % (uid, pdf_version) + # pdf_filename + + print(data_dir, uid[:6], pdf_out_dir, pdf_filename, username) + + make_pdf_report( + data_dir, + uid[:6], + pdf_out_dir, + pdf_filename, + username, + run_fit_form, + run_one_time, + run_two_time, + run_four_time, + run_xsvs, + run_dose=run_dose, + report_type=scat_geometry, + ) + ## Attach the PDF report to Olog if att_pdf_report: - os.environ['HTTPS_PROXY'] = 'https://proxy:8888' - os.environ['no_proxy'] = 'cs.nsls2.local,localhost,127.0.0.1' - pname = pdf_out_dir + pdf_filename - atch=[ Attachment(open(pname, 'rb')) ] + os.environ["HTTPS_PROXY"] = "https://proxy:8888" + os.environ["no_proxy"] = "cs.nsls2.local,localhost,127.0.0.1" + pname = pdf_out_dir + pdf_filename + atch = [Attachment(open(pname, "rb"))] try: - update_olog_uid( uid= md['uid'], text='Add XPCS Analysis PDF Report', attachments= atch ) + update_olog_uid(uid=md["uid"], text="Add XPCS Analysis PDF Report", attachments=atch) except: - print("I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file."%pname) + print( + "I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file." + % pname + ) if show_plot: - plt.show() - #else: + plt.show() + # else: # plt.close('all') if clear_plot: - plt.close('all') + plt.close("all") if return_res: res = {} - if scat_geometry == 'saxs': - for k,v in zip( ['md', 'q_saxs', 'iq_saxs','iqst','qt','avg_img','mask', 'imgsum','bad_frame_list','roi_mask', 'qval_dict'], - [ md, q_saxs, iq_saxs, iqst, qt, avg_img,mask,imgsum,bad_frame_list,roi_mask, qval_dict ] ): - res[ k ] = v - - elif scat_geometry == 'ang_saxs': - for k,v in zip( [ 'md', 'q_saxs', 'iq_saxs','roi_mask_v','roi_mask_p', - 'qval_dict_v','qval_dict_p','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], - [ md, q_saxs, iq_saxs, roi_mask_v,roi_mask_p, - qval_dict_v,qval_dict_p, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): - res[ k ] = v - - elif scat_geometry == 'gi_saxs': - for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list', 'qr_1d_pds'], - [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list, qr_1d_pds] ): - res[ k ] = v - - elif scat_geometry == 'gi_waxs': - for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], - [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): - res[ k ] = v - + if scat_geometry == "saxs": + for k, v in zip( + [ + "md", + "q_saxs", + "iq_saxs", + "iqst", + "qt", + "avg_img", + "mask", + "imgsum", + "bad_frame_list", + "roi_mask", + "qval_dict", + ], + [md, q_saxs, iq_saxs, iqst, qt, avg_img, mask, imgsum, bad_frame_list, roi_mask, qval_dict], + ): + res[k] = v + + elif scat_geometry == "ang_saxs": + for k, v in zip( + [ + "md", + "q_saxs", + "iq_saxs", + "roi_mask_v", + "roi_mask_p", + "qval_dict_v", + "qval_dict_p", + "avg_img", + "mask", + "pixel_mask", + "imgsum", + "bad_frame_list", + ], + [ + md, + q_saxs, + iq_saxs, + roi_mask_v, + roi_mask_p, + qval_dict_v, + qval_dict_p, + avg_img, + mask, + pixel_mask, + imgsum, + bad_frame_list, + ], + ): + res[k] = v + + elif scat_geometry == "gi_saxs": + for k, v in zip( + [ + "md", + "roi_mask", + "qval_dict", + "avg_img", + "mask", + "pixel_mask", + "imgsum", + "bad_frame_list", + "qr_1d_pds", + ], + [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, bad_frame_list, qr_1d_pds], + ): + res[k] = v + + elif scat_geometry == "gi_waxs": + for k, v in zip( + ["md", "roi_mask", "qval_dict", "avg_img", "mask", "pixel_mask", "imgsum", "bad_frame_list"], + [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, bad_frame_list], + ): + res[k] = v + if run_waterfall: - res['wat'] = wat + res["wat"] = wat if run_t_ROI_Inten: - res['times_roi'] = times_roi; - res['mean_int_sets']=mean_int_sets + res["times_roi"] = times_roi + res["mean_int_sets"] = mean_int_sets if run_one_time: - if scat_geometry != 'ang_saxs': - res['g2'] = g2 - res['taus']=taus + if scat_geometry != "ang_saxs": + res["g2"] = g2 + res["taus"] = taus else: - res['g2_p'] = g2_p - res['taus_p']=taus_p - res['g2_v'] = g2_v - res['taus_v']=taus_v - + res["g2_p"] = g2_p + res["taus_p"] = taus_p + res["g2_v"] = g2_v + res["taus_v"] = taus_v + if run_two_time: - res['tausb'] = tausb - res['g12b'] = g12b - res['g2b'] = g2b + res["tausb"] = tausb + res["g12b"] = g12b + res["g2b"] = g2b if run_four_time: - res['g4']= g4 - res['taus4']=taus4 + res["g4"] = g4 + res["taus4"] = taus4 if run_xsvs: - res['spec_kmean']=spec_kmean - res['spec_pds']= spec_pds - res['contrast_factorL'] = contrast_factorL - res['times_xsvs']= times_xsvs + res["spec_kmean"] = spec_kmean + res["spec_pds"] = spec_pds + res["contrast_factorL"] = contrast_factorL + res["times_xsvs"] = times_xsvs return res - -#uid = '3ff4ee' -#run_xpcs_xsvs_single( uid, run_pargs ) - - - +# uid = '3ff4ee' +# run_xpcs_xsvs_single( uid, run_pargs ) diff --git a/pyCHX/xpcs_timepixel.py b/pyCHX/xpcs_timepixel.py index 286141e..85080c5 100644 --- a/pyCHX/xpcs_timepixel.py +++ b/pyCHX/xpcs_timepixel.py @@ -1,830 +1,907 @@ -from numpy import pi,sin,arctan,sqrt,mgrid,where,shape,exp,linspace,std,arange -from numpy import power,log,log10,array,zeros,ones,reshape,mean,histogram,round,int_ -from numpy import indices,hypot,digitize,ma,histogramdd,apply_over_axes,sum -from numpy import around,intersect1d, ravel, unique,hstack,vstack,zeros_like -from numpy import save, load, dot -from numpy.linalg import lstsq -from numpy import polyfit,poly1d; -import sys,os +import os import pickle as pkl +import struct +import sys -import matplotlib.pyplot as plt -#from Init_for_Timepix import * # the setup file +# from Init_for_Timepix import * # the setup file import time - -import struct -import numpy as np -from tqdm import tqdm -import pandas as pds -from pyCHX.chx_libs import multi_tau_lags -from pyCHX.chx_compress import Multifile, go_through_FD, pass_FD +import matplotlib.pyplot as plt +import numpy as np +import pandas as pds +from numpy import ( + apply_over_axes, + arange, + arctan, + around, + array, + digitize, + dot, + exp, + histogram, + histogramdd, + hstack, + hypot, + indices, + int_, + intersect1d, + linspace, + load, + log, + log10, + ma, + mean, + mgrid, + ones, + pi, + poly1d, + polyfit, + power, + ravel, + reshape, + round, + save, + shape, + sin, + sqrt, + std, + sum, + unique, + vstack, + where, + zeros, + zeros_like, +) +from numpy.linalg import lstsq +from tqdm import tqdm +from pyCHX.chx_compress import Multifile, go_through_FD, pass_FD +from pyCHX.chx_libs import multi_tau_lags -def get_timepixel_data( data_dir, filename, time_unit= 1 ): - '''give a csv file of a timepixel data, return x,y,t +def get_timepixel_data(data_dir, filename, time_unit=1): + """give a csv file of a timepixel data, return x,y,t x, pos_x in pixel y, pos_y in pixel t, arrival time - time_unit, t*time_unit will convert to second, in reality, this value is 6.1e-12 - return x,y,t (in second, starting from zero) - - ''' - data = pds.read_csv( data_dir + filename ) + time_unit, t*time_unit will convert to second, in reality, this value is 6.1e-12 + return x,y,t (in second, starting from zero) + + """ + data = pds.read_csv(data_dir + filename) #'#Col', ' #Row', ' #ToA', - #return np.array( data['Col'] ), np.array(data['Row']), np.array(data['GlobalTimeFine']) #*6.1 #in ps - if time_unit !=1: + # return np.array( data['Col'] ), np.array(data['Row']), np.array(data['GlobalTimeFine']) #*6.1 #in ps + if time_unit != 1: try: - x,y,t=np.array( data['#Col'] ), np.array(data['#Row']), np.array(data['#ToA'] ) * time_unit + x, y, t = np.array(data["#Col"]), np.array(data["#Row"]), np.array(data["#ToA"]) * time_unit except: - x,y,t=np.array( data['#Col'] ), np.array(data[' #Row']), np.array(data[' #ToA'] ) * time_unit + x, y, t = np.array(data["#Col"]), np.array(data[" #Row"]), np.array(data[" #ToA"]) * time_unit else: try: - x,y,t=np.array( data['#Col'] ), np.array(data['#Row']), np.array(data['#ToA'] ) + x, y, t = np.array(data["#Col"]), np.array(data["#Row"]), np.array(data["#ToA"]) except: - x,y,t=np.array( data['#Col'] ), np.array(data[' #Row']), np.array(data[' #ToA'] ) - return x,y, t-t.min() #* 25/4096. #in ns - - -def get_pvlist_from_post( p, t, binstep=100, detx=256, dety=256 ): - '''YG.DEV@CHX Nov, 2017 to get a pos, val list of phonton hitting detector by giving - p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin - The most important function for timepix - Input: - p: array, int64, coordinate-x * det_x + coordinate-y - t: list, int64, photon hit time - binstep: int, binstep (in t unit) period - detx,dety: int/int, the detector size in x and y - Output: - positions: int array, (x*detx +y) - vals: int array, counts of that positions - counts: int array, counts of that positions in each binstep - ''' - v = ( t - t[0])//binstep - L= np.max( v ) + 1 - arr = np.ravel_multi_index( [ p, v ], [detx * dety,L ] ) - uval, ind, count = np.unique( arr, return_counts=True, return_index=True) - ind2 = np.lexsort( ( p[ind], v[ind] ) ) + x, y, t = np.array(data["#Col"]), np.array(data[" #Row"]), np.array(data[" #ToA"]) + return x, y, t - t.min() # * 25/4096. #in ns + + +def get_pvlist_from_post(p, t, binstep=100, detx=256, dety=256): + """YG.DEV@CHX Nov, 2017 to get a pos, val list of phonton hitting detector by giving + p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + p: array, int64, coordinate-x * det_x + coordinate-y + t: list, int64, photon hit time + binstep: int, binstep (in t unit) period + detx,dety: int/int, the detector size in x and y + Output: + positions: int array, (x*detx +y) + vals: int array, counts of that positions + counts: int array, counts of that positions in each binstep + """ + v = (t - t[0]) // binstep + L = np.max(v) + 1 + arr = np.ravel_multi_index([p, v], [detx * dety, L]) + uval, ind, count = np.unique(arr, return_counts=True, return_index=True) + ind2 = np.lexsort((p[ind], v[ind])) ps = (p[ind])[ind2] vs = count[ind2] cs = np.bincount(v[ind]) - return ps,vs,cs - - - -def histogram_pt( p, t, binstep=100, detx=256, dety=256 ): - '''YG.DEV@CHX Nov, 2017 to get a histogram of phonton counts by giving - p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin - The most important function for timepix - Input: - p: coordinate-x * det_x + coordinate-y - t: photon hit time - bin t in binstep (in t unit) period - detx,dety: the detector size in x and y - Output: - the hitorgram of photons with bins as binstep (in time unit) - ''' - L= np.max( (t-t[0])//binstep ) + 1 - #print(L,x,y, (t-t[0])//binstep) - arr = np.ravel_multi_index( [ p, (t-t[0])//binstep ], [detx * dety,L ] ) - M,N = arr.max(),arr.min() - da = np.zeros( [detx * dety, L ] ) - da.flat[np.arange(N, M ) ] = np.bincount( arr- N ) - return da - -def histogram_xyt( x, y, t, binstep=100, detx=256, dety=256 ): - '''YG.DEV@CHX Mar, 2017 to get a histogram of phonton counts by giving - x (photon hit pos_x), y (photon hit pos_y), t (photon hit time), and the time bin - The most important function for timepix - Input: - x: coordinate-x - y: coordinate-y - t: photon hit time - bin t in binstep (in t unit) period - detx,dety: the detector size in x and y - Output: - the hitorgram of photons with bins as binstep (in time unit) - - - ''' - L= np.max( (t-t[0])//binstep ) + 1 - #print(L,x,y, (t-t[0])//binstep) - arr = np.ravel_multi_index( [x, y, (t-t[0])//binstep ], [detx, dety,L ] ) - M,N = arr.max(),arr.min() - da = np.zeros( [detx, dety, L ] ) - da.flat[np.arange(N, M ) ] = np.bincount( arr- N ) + return ps, vs, cs + + +def histogram_pt(p, t, binstep=100, detx=256, dety=256): + """YG.DEV@CHX Nov, 2017 to get a histogram of phonton counts by giving + p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + p: coordinate-x * det_x + coordinate-y + t: photon hit time + bin t in binstep (in t unit) period + detx,dety: the detector size in x and y + Output: + the hitorgram of photons with bins as binstep (in time unit) + """ + L = np.max((t - t[0]) // binstep) + 1 + # print(L,x,y, (t-t[0])//binstep) + arr = np.ravel_multi_index([p, (t - t[0]) // binstep], [detx * dety, L]) + M, N = arr.max(), arr.min() + da = np.zeros([detx * dety, L]) + da.flat[np.arange(N, M)] = np.bincount(arr - N) return da +def histogram_xyt(x, y, t, binstep=100, detx=256, dety=256): + """YG.DEV@CHX Mar, 2017 to get a histogram of phonton counts by giving + x (photon hit pos_x), y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + x: coordinate-x + y: coordinate-y + t: photon hit time + bin t in binstep (in t unit) period + detx,dety: the detector size in x and y + Output: + the hitorgram of photons with bins as binstep (in time unit) + + + """ + L = np.max((t - t[0]) // binstep) + 1 + # print(L,x,y, (t-t[0])//binstep) + arr = np.ravel_multi_index([x, y, (t - t[0]) // binstep], [detx, dety, L]) + M, N = arr.max(), arr.min() + da = np.zeros([detx, dety, L]) + da.flat[np.arange(N, M)] = np.bincount(arr - N) + return da + def get_FD_end_num(FD, maxend=1e10): N = maxend - for i in range(0,int(maxend)): + for i in range(0, int(maxend)): try: FD.seekimg(i) except: - N = i + N = i break FD.seekimg(0) return N -def compress_timepix_data( pos, t, tbins, filename=None, md=None, force_compress=False, nobytes=2, - with_pickle=True ): - - ''' YG.Dev@CHX Nov 20, 2017 - Compress the timepixeldata, in a format of x, y, t - x: pos_x in pixel - y: pos_y in pixel - timepix3 det size 256, 256 - TODOLIST: mask is not working now - Input: - pos: 256 * y + x - t: arrival time in sec - filename: the output filename - md: a dict to describle the data info - force_compress: if False, - if already compressed, just it - else: compress - if True, compress and, if exist, overwrite the already-coompress data - Return: - avg_img, imgsum, N (frame number) - - ''' + +def compress_timepix_data( + pos, t, tbins, filename=None, md=None, force_compress=False, nobytes=2, with_pickle=True +): + """YG.Dev@CHX Nov 20, 2017 + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * y + x + t: arrival time in sec + filename: the output filename + md: a dict to describle the data info + force_compress: if False, + if already compressed, just it + else: compress + if True, compress and, if exist, overwrite the already-coompress data + Return: + avg_img, imgsum, N (frame number) + + """ if filename is None: - filename= '/XF11ID/analysis/Compressed_Data' +'/timpix_uid_%s.cmp'%md['uid'] - + filename = "/XF11ID/analysis/Compressed_Data" + "/timpix_uid_%s.cmp" % md["uid"] + if force_compress: - print ("Create a new compress file with filename as :%s."%filename) - return init_compress_timepix_data( pos, t, tbins, filename=filename, md=md, nobytes= nobytes, - with_pickle=with_pickle ) + print("Create a new compress file with filename as :%s." % filename) + return init_compress_timepix_data( + pos, t, tbins, filename=filename, md=md, nobytes=nobytes, with_pickle=with_pickle + ) else: - if not os.path.exists( filename ): - print ("Create a new compress file with filename as :%s."%filename) - return init_compress_timepix_data( pos, t, tbins, filename=filename, md=md, nobytes= nobytes, - with_pickle=with_pickle ) - else: - print ("Using already created compressed file with filename as :%s."%filename) - return pkl.load( open(filename + '.pkl', 'rb' ) ) - - #FD = Multifile(filename, 0, int(1e25) ) - #return get_FD_end_num(FD) - - - - - -def create_timepix_compress_header( md, filename, nobytes=2, bins=1 ): - ''' + if not os.path.exists(filename): + print("Create a new compress file with filename as :%s." % filename) + return init_compress_timepix_data( + pos, t, tbins, filename=filename, md=md, nobytes=nobytes, with_pickle=with_pickle + ) + else: + print("Using already created compressed file with filename as :%s." % filename) + return pkl.load(open(filename + ".pkl", "rb")) + + # FD = Multifile(filename, 0, int(1e25) ) + # return get_FD_end_num(FD) + + +def create_timepix_compress_header(md, filename, nobytes=2, bins=1): + """ Create the head for a compressed eiger data, this function is for parallel compress - ''' - fp = open( filename,'wb' ) - #Make Header 1024 bytes - #md = images.md - if bins!=1: - nobytes=8 - Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', - md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], - md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], - - nobytes, md['sy'], md['sx'], - 0,256, - 0,256 - ) - fp.write( Header) - fp.close() - - -def init_compress_timepix_data( pos, t, binstep, filename, mask=None, - md = None, nobytes=2,with_pickle=True ): - ''' YG.Dev@CHX Nov 19, 2017 with optimal algorithm by using complex index techniques - - Compress the timepixeldata, in a format of x, y, t - x: pos_x in pixel - y: pos_y in pixel - timepix3 det size 256, 256 - TODOLIST: mask is not working now - Input: - pos: 256 * x + y #can't be 256*x + y - t: arrival time in sec - binstep: int, binstep (in t unit) period - filename: the output filename - md: a dict to describle the data info - Return: - N (frame number) - - ''' - fp = open( filename,'wb' ) + """ + fp = open(filename, "wb") + # Make Header 1024 bytes + # md = images.md + if bins != 1: + nobytes = 8 + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMPtpx1", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["sy"], + md["sx"], + 0, + 256, + 0, + 256, + ) + fp.write(Header) + fp.close() + + +def init_compress_timepix_data(pos, t, binstep, filename, mask=None, md=None, nobytes=2, with_pickle=True): + """YG.Dev@CHX Nov 19, 2017 with optimal algorithm by using complex index techniques + + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * x + y #can't be 256*x + y + t: arrival time in sec + binstep: int, binstep (in t unit) period + filename: the output filename + md: a dict to describle the data info + Return: + N (frame number) + + """ + fp = open(filename, "wb") if md is None: - md={} - md['beam_center_x'] = 0 - md['beam_center_y'] = 0 - md['count_time'] = 0 - md['detector_distance'] = 0 - md['frame_time'] = 0 - md['incident_wavelength'] =0 - md['x_pixel_size'] = 45 - md['y_pixel_size'] = 45 - #nobytes = 2 - md['sx'] = 256 - md['sy'] = 256 - - - #TODList: for different detector using different md structure, March 2, 2017, - - #8d include, + md = {} + md["beam_center_x"] = 0 + md["beam_center_y"] = 0 + md["count_time"] = 0 + md["detector_distance"] = 0 + md["frame_time"] = 0 + md["incident_wavelength"] = 0 + md["x_pixel_size"] = 45 + md["y_pixel_size"] = 45 + # nobytes = 2 + md["sx"] = 256 + md["sy"] = 256 + + # TODList: for different detector using different md structure, March 2, 2017, + + # 8d include, #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) - Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', - md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], - md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], - - nobytes, md['sy'], md['sx'], - 0,256, - 0,256 - ) - fp.write( Header) - - N_ = np.int( np.ceil( (t.max() -t.min()) / binstep ) ) - print('There are %s frames to be compressed...'%(N_-1)) - - ps,vs,cs = get_pvlist_from_post( pos, t, binstep, detx= md['sx'], dety= md['sy'] ) - N = len(cs) - 1 #the last one might don't have full number for bings, so kick off + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMPtpx1", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["sy"], + md["sx"], + 0, + 256, + 0, + 256, + ) + fp.write(Header) + + N_ = np.int(np.ceil((t.max() - t.min()) / binstep)) + print("There are %s frames to be compressed..." % (N_ - 1)) + + ps, vs, cs = get_pvlist_from_post(pos, t, binstep, detx=md["sx"], dety=md["sy"]) + N = len(cs) - 1 # the last one might don't have full number for bings, so kick off css = np.cumsum(cs) - imgsum = np.zeros( N ) + imgsum = np.zeros(N) good_count = 0 - avg_img = np.zeros( [ md['sy'], md['sx'] ], dtype= np.float64 ) # changed depreciated np.float to np.float64 LW @06/11/2023 - - for i in tqdm( range(0,N) ): - if i ==0: + avg_img = np.zeros( + [md["sy"], md["sx"]], dtype=np.float64 + ) # changed depreciated np.float to np.float64 LW @06/11/2023 + + for i in tqdm(range(0, N)): + if i == 0: ind1 = 0 ind2 = css[i] else: - ind1 = css[i-1] - ind2 = css[i] - #print( ind1, ind2 ) - good_count +=1 - psi = ps[ ind1:ind2 ] - vsi = vs[ ind1:ind2 ] - dlen = cs[i] - imgsum[i] = vsi.sum() - np.ravel(avg_img )[psi] += vsi - #print(vs.sum()) - fp.write( struct.pack( '@I', dlen )) - fp.write( struct.pack( '@{}i'.format( dlen), *psi)) - fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *vsi)) + ind1 = css[i - 1] + ind2 = css[i] + # print( ind1, ind2 ) + good_count += 1 + psi = ps[ind1:ind2] + vsi = vs[ind1:ind2] + dlen = cs[i] + imgsum[i] = vsi.sum() + np.ravel(avg_img)[psi] += vsi + # print(vs.sum()) + fp.write(struct.pack("@I", dlen)) + fp.write(struct.pack("@{}i".format(dlen), *psi)) + fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *vsi)) fp.close() avg_img /= good_count - #return N -1 - if with_pickle: - pkl.dump( [ avg_img, imgsum, N ], open(filename + '.pkl', 'wb' ) ) - return avg_img, imgsum, N - - - - - -def init_compress_timepix_data_light_duty( pos, t, binstep, filename, mask=None, - md = None, nobytes=2,with_pickle=True ): - ''' YG.Dev@CHX Nov 19, 2017 - Compress the timepixeldata, in a format of x, y, t - x: pos_x in pixel - y: pos_y in pixel - timepix3 det size 256, 256 - TODOLIST: mask is not working now - Input: - pos: 256 * x + y #can't be 256*x + y - t: arrival time in sec - filename: the output filename - md: a dict to describle the data info - Return: - N (frame number) - - ''' - fp = open( filename,'wb' ) + # return N -1 + if with_pickle: + pkl.dump([avg_img, imgsum, N], open(filename + ".pkl", "wb")) + return avg_img, imgsum, N + + +def init_compress_timepix_data_light_duty( + pos, t, binstep, filename, mask=None, md=None, nobytes=2, with_pickle=True +): + """YG.Dev@CHX Nov 19, 2017 + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * x + y #can't be 256*x + y + t: arrival time in sec + filename: the output filename + md: a dict to describle the data info + Return: + N (frame number) + + """ + fp = open(filename, "wb") if md is None: - md={} - md['beam_center_x'] = 0 - md['beam_center_y'] = 0 - md['count_time'] = 0 - md['detector_distance'] = 0 - md['frame_time'] = 0 - md['incident_wavelength'] =0 - md['x_pixel_size'] = 45 - md['y_pixel_size'] = 45 - #nobytes = 2 - md['sx'] = 256 - md['sy'] = 256 - - - #TODList: for different detector using different md structure, March 2, 2017, - - #8d include, + md = {} + md["beam_center_x"] = 0 + md["beam_center_y"] = 0 + md["count_time"] = 0 + md["detector_distance"] = 0 + md["frame_time"] = 0 + md["incident_wavelength"] = 0 + md["x_pixel_size"] = 45 + md["y_pixel_size"] = 45 + # nobytes = 2 + md["sx"] = 256 + md["sy"] = 256 + + # TODList: for different detector using different md structure, March 2, 2017, + + # 8d include, #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) - Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', - md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], - md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], - - nobytes, md['sy'], md['sx'], - 0,256, - 0,256 - ) - fp.write( Header) - - tx = np.arange( t.min(), t.max(), binstep ) - N = len(tx) - imgsum = np.zeros( N-1 ) - print('There are %s frames to be compressed...'%(N-1)) + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMPtpx1", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["sy"], + md["sx"], + 0, + 256, + 0, + 256, + ) + fp.write(Header) + + tx = np.arange(t.min(), t.max(), binstep) + N = len(tx) + imgsum = np.zeros(N - 1) + print("There are %s frames to be compressed..." % (N - 1)) good_count = 0 - avg_img = np.zeros( [ md['sy'], md['sx'] ], dtype= np.float64 ) # changed depreciated np.float to np.float64 LW @06/11/2023 - for i in tqdm( range(N-1) ): - ind1 = np.argmin( np.abs( tx[i] - t) ) - ind2 = np.argmin( np.abs( tx[i+1] - t ) ) - #print( 'N=%d:'%i, ind1, ind2 ) - p_i = pos[ind1: ind2] - ps,vs = np.unique( p_i, return_counts= True ) - np.ravel(avg_img )[ps] += vs - good_count +=1 - dlen = len(ps) - imgsum[i] = vs.sum() - #print(vs.sum()) - fp.write( struct.pack( '@I', dlen )) - fp.write( struct.pack( '@{}i'.format( dlen), *ps)) - fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *vs)) + avg_img = np.zeros( + [md["sy"], md["sx"]], dtype=np.float64 + ) # changed depreciated np.float to np.float64 LW @06/11/2023 + for i in tqdm(range(N - 1)): + ind1 = np.argmin(np.abs(tx[i] - t)) + ind2 = np.argmin(np.abs(tx[i + 1] - t)) + # print( 'N=%d:'%i, ind1, ind2 ) + p_i = pos[ind1:ind2] + ps, vs = np.unique(p_i, return_counts=True) + np.ravel(avg_img)[ps] += vs + good_count += 1 + dlen = len(ps) + imgsum[i] = vs.sum() + # print(vs.sum()) + fp.write(struct.pack("@I", dlen)) + fp.write(struct.pack("@{}i".format(dlen), *ps)) + fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *vs)) fp.close() avg_img /= good_count - #return N -1 - if with_pickle: - pkl.dump( [ avg_img, imgsum, N-1 ], open(filename + '.pkl', 'wb' ) ) - return avg_img, imgsum, N-1 - - - - - - -def compress_timepix_data_old( data_pixel, filename, rois=None, - md = None, nobytes=2 ): - ''' - Compress the timepixeldata - md: a dict to describle the data info - rois: [y1,y2, x1, x2] - - ''' - fp = open( filename,'wb' ) + # return N -1 + if with_pickle: + pkl.dump([avg_img, imgsum, N - 1], open(filename + ".pkl", "wb")) + return avg_img, imgsum, N - 1 + + +def compress_timepix_data_old(data_pixel, filename, rois=None, md=None, nobytes=2): + """ + Compress the timepixeldata + md: a dict to describle the data info + rois: [y1,y2, x1, x2] + + """ + fp = open(filename, "wb") if md is None: - md={} - md['beam_center_x'] = 0 - md['beam_center_y'] = 0 - md['count_time'] = 0 - md['detector_distance'] = 0 - md['frame_time'] = 0 - md['incident_wavelength'] =0 - md['x_pixel_size'] =25 - md['y_pixel_size'] =25 - #nobytes = 2 - md['sx'] = 256 - md['sy'] = 256 - md['roi_rb']= 0 - md['roi_re']= md['sy'] - md['roi_cb']= 0 - md['roi_ce']= md['sx'] + md = {} + md["beam_center_x"] = 0 + md["beam_center_y"] = 0 + md["count_time"] = 0 + md["detector_distance"] = 0 + md["frame_time"] = 0 + md["incident_wavelength"] = 0 + md["x_pixel_size"] = 25 + md["y_pixel_size"] = 25 + # nobytes = 2 + md["sx"] = 256 + md["sy"] = 256 + md["roi_rb"] = 0 + md["roi_re"] = md["sy"] + md["roi_cb"] = 0 + md["roi_ce"] = md["sx"] if rois is not None: - md['roi_rb']= rois[2] - md['roi_re']= rois[3] - md['roi_cb']= rois[1] - md['roi_ce']= rois[0] - - md['sy'] = md['roi_cb'] - md['roi_ce'] - md['sx'] = md['roi_re'] - md['roi_rb'] - - #TODList: for different detector using different md structure, March 2, 2017, - - #8d include, + md["roi_rb"] = rois[2] + md["roi_re"] = rois[3] + md["roi_cb"] = rois[1] + md["roi_ce"] = rois[0] + + md["sy"] = md["roi_cb"] - md["roi_ce"] + md["sx"] = md["roi_re"] - md["roi_rb"] + + # TODList: for different detector using different md structure, March 2, 2017, + + # 8d include, #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) - Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', - md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], - md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], - - nobytes, md['sy'], md['sx'], - md['roi_rb'], md['roi_re'],md['roi_cb'],md['roi_ce'] - ) - - fp.write( Header) - fp.write( data_pixel ) - - - + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMPtpx1", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["sy"], + md["sx"], + md["roi_rb"], + md["roi_re"], + md["roi_cb"], + md["roi_ce"], + ) + + fp.write(Header) + fp.write(data_pixel) + + class Get_TimePixel_Arrayc(object): - ''' - a class to get intested pixels from a images sequence, - load ROI of all images into memory + """ + a class to get intested pixels from a images sequence, + load ROI of all images into memory get_data: to get a 2-D array, shape as (len(images), len(pixellist)) - - One example: + + One example: data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() - ''' - - def __init__(self, pos, hitime, tbins, pixelist, beg=None, end=None, norm=None,flat_correction=None, - detx = 256, dety = 256): - ''' + """ + + def __init__( + self, pos, hitime, tbins, pixelist, beg=None, end=None, norm=None, flat_correction=None, detx=256, dety=256 + ): + """ indexable: a images sequences pixelist: 1-D array, interest pixel list #flat_correction, normalized by flatfield #norm, normalized by total intensity, like a incident beam intensity - ''' + """ self.hitime = hitime - self.tbins = tbins - self.tx = np.arange( self.hitime.min(), self.hitime.max(), self.tbins ) - N = len(self.tx) + self.tbins = tbins + self.tx = np.arange(self.hitime.min(), self.hitime.max(), self.tbins) + N = len(self.tx) if beg is None: beg = 0 if end is None: end = N - + self.beg = beg - self.end = end - self.length = self.end - self.beg + self.end = end + self.length = self.end - self.beg self.pos = pos - self.pixelist = pixelist - self.norm = norm + self.pixelist = pixelist + self.norm = norm self.flat_correction = flat_correction self.detx = detx self.dety = dety - - def get_data(self ): - ''' + + def get_data(self): + """ To get intested pixels array Return: 2-D array, shape as (len(images), len(pixellist)) - ''' + """ norm = self.norm - data_array = np.zeros([ self.length-1,len(self.pixelist)]) - print( data_array.shape) - - #fra_pix = np.zeros_like( pixelist, dtype=np.float64) - timg = np.zeros( self.detx * self.dety, dtype=np.int32 ) - timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) - n=0 + data_array = np.zeros([self.length - 1, len(self.pixelist)]) + print(data_array.shape) + + # fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros(self.detx * self.dety, dtype=np.int32) + timg[self.pixelist] = np.arange(1, len(self.pixelist) + 1) + n = 0 tx = self.tx N = len(self.tx) - print( 'The Produced Array Length is %d.'%(N-1) ) + print("The Produced Array Length is %d." % (N - 1)) flat_correction = self.flat_correction - #imgsum = np.zeros( N ) - for i in tqdm( range(N-1) ): - ind1 = np.argmin( np.abs( tx[i] - self.hitime ) ) - ind2 = np.argmin( np.abs( tx[i+1] - self.hitime ) ) - #print( 'N=%d:'%i, ind1, ind2 ) - p_i = self.pos[ind1: ind2] - pos,val = np.unique( p_i, return_counts= True ) - #print( val.sum() ) - w = np.where( timg[pos] )[0] - pxlist = timg[ pos[w] ] -1 - #print( val[w].sum() ) - #fra_pix[ pxlist] = v[w] + # imgsum = np.zeros( N ) + for i in tqdm(range(N - 1)): + ind1 = np.argmin(np.abs(tx[i] - self.hitime)) + ind2 = np.argmin(np.abs(tx[i + 1] - self.hitime)) + # print( 'N=%d:'%i, ind1, ind2 ) + p_i = self.pos[ind1:ind2] + pos, val = np.unique(p_i, return_counts=True) + # print( val.sum() ) + w = np.where(timg[pos])[0] + pxlist = timg[pos[w]] - 1 + # print( val[w].sum() ) + # fra_pix[ pxlist] = v[w] if flat_correction is not None: - #normalized by flatfield - data_array[n][ pxlist] = val[w] + # normalized by flatfield + data_array[n][pxlist] = val[w] else: - data_array[n][ pxlist] = val[w] / flat_correction[pxlist] #-1.0 - if norm is not None: - #normalized by total intensity, like a incident beam intensity - data_array[n][ pxlist] /= norm[i] - n += 1 - return data_array - - - -def apply_timepix_mask( x,y,t, roi ): - y1,y2, x1,x2 = roi - w = (x < x2) & (x >= x1) & (y < y2) & (y >= y1) - return x[w],y[w], t[w] - - - - - - -def get_timepixel_data_from_series( data_dir, filename_prefix, - total_filenum = 72, colms = int(1e5) ): - x = np.zeros( total_filenum * colms ) - y = np.zeros( total_filenum * colms ) - t = zeros( total_filenum * colms ) - for n in range( total_filenum): - filename = filename_prefix + '_%s.csv'%n - data = get_timepixel_data( data_dir, filename ) - if n!=total_filenum-1: - ( x[n*colms: (n+1)*colms ], y[n*colms: (n+1)*colms ], t[n*colms: (n+1)*colms ] )= ( - data[0], data[1], data[2]) + data_array[n][pxlist] = val[w] / flat_correction[pxlist] # -1.0 + if norm is not None: + # normalized by total intensity, like a incident beam intensity + data_array[n][pxlist] /= norm[i] + n += 1 + return data_array + + +def apply_timepix_mask(x, y, t, roi): + y1, y2, x1, x2 = roi + w = (x < x2) & (x >= x1) & (y < y2) & (y >= y1) + return x[w], y[w], t[w] + + +def get_timepixel_data_from_series(data_dir, filename_prefix, total_filenum=72, colms=int(1e5)): + x = np.zeros(total_filenum * colms) + y = np.zeros(total_filenum * colms) + t = zeros(total_filenum * colms) + for n in range(total_filenum): + filename = filename_prefix + "_%s.csv" % n + data = get_timepixel_data(data_dir, filename) + if n != total_filenum - 1: + (x[n * colms : (n + 1) * colms], y[n * colms : (n + 1) * colms], t[n * colms : (n + 1) * colms]) = ( + data[0], + data[1], + data[2], + ) else: - #print( filename_prefix + '_%s.csv'%n ) + # print( filename_prefix + '_%s.csv'%n ) ln = len(data[0]) - #print( ln ) - ( x[n*colms: n*colms + ln ], y[n*colms: n*colms + ln ], t[n*colms: n*colms + ln ] )= ( - data[0], data[1], data[2]) - - return x[:n*colms + ln] ,y[:n*colms + ln],t[:n*colms + ln] - - - -def get_timepixel_avg_image( x,y,t, det_shape = [256, 256], delta_time = None ): - '''YG.Dev@CHX, 2016 + # print( ln ) + (x[n * colms : n * colms + ln], y[n * colms : n * colms + ln], t[n * colms : n * colms + ln]) = ( + data[0], + data[1], + data[2], + ) + + return x[: n * colms + ln], y[: n * colms + ln], t[: n * colms + ln] + + +def get_timepixel_avg_image(x, y, t, det_shape=[256, 256], delta_time=None): + """YG.Dev@CHX, 2016 give x,y, t data to get image in a period of delta_time (in second) x, pos_x in pixel y, pos_y in pixel t, arrival time - - - ''' - t0 = t.min() - tm = t.max() - + + + """ + t0 = t.min() + tm = t.max() + if delta_time is not None: - delta_time *=1e12 + delta_time *= 1e12 if delta_time > tm: - delta_time = tm + delta_time = tm else: delta_time = t.max() - #print( delta_time) - t_ = t[t10: #print progress... - if n %( noframes / 10) ==0: + """Do correlation for xyt file, + noframes is the frame number to be correlated + """ + start_time = time.time() + for n in range(1, noframes + 1): # the main loop for correlator + gg2 = self.autocor_xytframe(n) + if n == 1: + g2 = zeros_like(gg2) + g2 += (gg2 - g2) / float(n) # average g2 + # print n + if noframes > 10: # print progress... + if n % (noframes / 10) == 0: sys.stdout.write("#") - sys.stdout.flush() + sys.stdout.flush() elapsed_time = time.time() - start_time - print ( 'Total time: %.2f min' %(elapsed_time/60.) ) + print("Total time: %.2f min" % (elapsed_time / 60.0)) return g2 - - def plot(self, y,x=None): - '''a simple plot''' - if x is None:x=arange( len(y)) - plt.plot(x,y,'ro', ls='-') + def plot(self, y, x=None): + """a simple plot""" + if x is None: + x = arange(len(y)) + plt.plot(x, y, "ro", ls="-") plt.show() - - def g2_to_pds(self, dly, g2, tscale = None): - '''convert g2 to a pandas frame''' - if len(g2.shape)==1:g2=g2.reshape( [len(g2),1] ) + def g2_to_pds(self, dly, g2, tscale=None): + """convert g2 to a pandas frame""" + if len(g2.shape) == 1: + g2 = g2.reshape([len(g2), 1]) tn, qn = g2.shape - tindex=xrange( tn ) - qcolumns = ['t'] + [ 'g2' ] - if tscale is None:tscale = 1.0 - g2t = hstack( [dly[:tn].reshape(tn,1) * tscale, g2 ]) - g2p = pd.DataFrame(data=g2t, index=tindex,columns=qcolumns) + tindex = xrange(tn) + qcolumns = ["t"] + ["g2"] + if tscale is None: + tscale = 1.0 + g2t = hstack([dly[:tn].reshape(tn, 1) * tscale, g2]) + g2p = pd.DataFrame(data=g2t, index=tindex, columns=qcolumns) return g2p - def show(self,g2p,title): - t = g2p.t - N = len( g2p ) - ylim = [g2p.g2.min(),g2p[1:N].g2.max()] - g2p.plot(x=t,y='g2',marker='o',ls='--',logx=T,ylim=ylim); - plt.xlabel('time delay, ns',fontsize=12) + def show(self, g2p, title): + t = g2p.t + N = len(g2p) + ylim = [g2p.g2.min(), g2p[1:N].g2.max()] + g2p.plot(x=t, y="g2", marker="o", ls="--", logx=T, ylim=ylim) + plt.xlabel("time delay, ns", fontsize=12) plt.title(title) - plt.savefig( RES_DIR + title +'.png' ) + plt.savefig(RES_DIR + title + ".png") plt.show() - ###################################################### - + if False: - xp=xpcs(); #use the xpcs class + xp = xpcs() + # use the xpcs class dly = xp.delays() if T: fnum = 100 - g2=xp.autocor( fnum ) - filename='g2_-%s-'%(fnum) - save( RES_DIR + FOUT + filename, g2) + g2 = xp.autocor(fnum) + filename = "g2_-%s-" % (fnum) + save(RES_DIR + FOUT + filename, g2) ##g2= load(RES_DIR + FOUT + filename +'.npy') - g2p = xp.g2_to_pds(dly,g2, tscale = 20) - xp.show(g2p,'g2_run_%s'%fnum) + g2p = xp.g2_to_pds(dly, g2, tscale=20) + xp.show(g2p, "g2_run_%s" % fnum) From 01350b28da127b818cc2893a3c780be924dc2a73 Mon Sep 17 00:00:00 2001 From: Max Rakitin Date: Thu, 2 May 2024 14:05:48 -0400 Subject: [PATCH 4/6] Add fixes from Lutz (5/2/24) --- chx_packages_local.py | 302 ++++++++++++++++++++++++++++++ pyCHX/chx_packages.py | 1 + pyCHX/chx_xpcs_xsvs_jupyter_V1.py | 8 +- 3 files changed, 307 insertions(+), 4 deletions(-) create mode 100644 chx_packages_local.py diff --git a/chx_packages_local.py b/chx_packages_local.py new file mode 100644 index 0000000..828cb12 --- /dev/null +++ b/chx_packages_local.py @@ -0,0 +1,302 @@ +### This enables local import of pyCHX for testing + +import pickle as cpk + +import historydict +from eiger_io.fs_handler import EigerImages +from skimage.draw import line, line_aa, polygon + +# changes to current version of chx_packages.py +# added load_dask_data in generic_functions + + +#from pyCHX.chx_handlers import use_dask, use_pims +from chx_handlers import use_dask, use_pims +# from pyCHX.chx_libs import ( +from chx_libs import ( + EigerHandler, + Javascript, + LogNorm, + Model, + cmap_albula, + cmap_vge, + datetime, + db, + getpass, + h5py, + multi_tau_lags, + np, + os, + pims, + plt, + random, + roi, + time, + tqdm, + utils, + warnings, +) + +use_pims(db) # use pims for importing eiger data, register_handler 'AD_EIGER2' and 'AD_EIGER' + +# from pyCHX.chx_compress import ( +from chx_compress import ( + MultifileBNLCustom, + combine_binary_files, + create_compress_header, + para_compress_eigerdata, + para_segment_compress_eigerdata, + segment_compress_eigerdata, +) +# from pyCHX.chx_compress_analysis import ( +from chx_compress_analysis import ( + Multifile, + cal_each_ring_mean_intensityc, + cal_waterfallc, + compress_eigerdata, + get_avg_imgc, + get_each_frame_intensityc, + get_each_ring_mean_intensityc, + get_time_edge_avg_img, + mean_intensityc, + plot_each_ring_mean_intensityc, + plot_waterfallc, + read_compressed_eigerdata, +) +# from pyCHX.chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq +from chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq +# from pyCHX.chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF +from chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF +# from pyCHX.chx_crosscor import CrossCorrelator2, run_para_ccorr_sym +from chx_crosscor import CrossCorrelator2, run_para_ccorr_sym +#from pyCHX.chx_generic_functions import ( +from chx_generic_functions import ( + R_2, + apply_mask, + average_array_withNan, + check_bad_uids, + check_lost_metadata, + check_ROI_intensity, + check_shutter_open, + combine_images, + copy_data, + create_cross_mask, + create_fullImg_with_box, + create_hot_pixel_mask, + create_multi_rotated_rectangle_mask, + create_polygon_mask, + create_rectangle_mask, + create_ring_mask, + create_seg_ring, + create_time_slice, + create_user_folder, + delete_data, + extract_data_from_file, + filter_roi_mask, + find_bad_pixels, + find_bad_pixels_FD, + find_good_xpcs_uids, + find_index, + find_uids, + fit_one_peak_curve, + get_averaged_data_from_multi_res, + get_avg_img, + get_bad_frame_list, + get_base_all_filenames, + get_cross_point, + get_current_pipeline_filename, + get_current_pipeline_fullpath, + get_curve_turning_points, + get_detector, + get_detectors, + get_each_frame_intensity, + get_echos, + get_eigerImage_per_file, + get_fit_by_two_linear, + get_fra_num_by_dose, + get_g2_fit_general, + get_image_edge, + get_image_with_roi, + get_img_from_iq, + get_last_uids, + get_mass_center_one_roi, + get_max_countc, + get_meta_data, + get_multi_tau_lag_steps, + get_non_uniform_edges, + get_print_uids, + get_q_rate_fit_general, + get_qval_dict, + get_qval_qwid_dict, + get_roi_mask_qval_qwid_by_shift, + get_roi_nr, + get_series_g2_taus, + get_SG_norm, + get_sid_filenames, + get_today_date, + get_touched_qwidth, + get_waxs_beam_center, + lin2log_g2, + linear_fit, + load_dask_data, + load_data, + load_mask, + load_pilatus, + ls_dir, + mask_badpixels, + mask_exclude_badpixel, + move_beamstop, + pad_length, + pload_obj, + plot1D, + plot_fit_two_linear_fit, + plot_g2_general, + plot_q_g2fitpara_general, + plot_q_rate_fit_general, + plot_q_rate_general, + plot_xy_with_fit, + plot_xy_x2, + print_dict, + psave_obj, + read_dict_csv, + refine_roi_mask, + RemoveHot, + reverse_updown, + ring_edges, + run_time, + save_array_to_tiff, + save_arrays, + save_current_pipeline, + save_dict_csv, + save_g2_fit_para_tocsv, + save_g2_general, + save_lists, + save_oavs_tifs, + sgolay2d, + shift_mask, + show_img, + show_ROI_on_image, + shrink_image, + trans_data_to_pd, + update_qval_dict, + update_roi_mask, + validate_uid, +) +# from pyCHX.chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file +from chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file +# from pyCHX.chx_specklecp import ( +from chx_specklecp import ( + get_binned_his_std, + get_contrast, + get_his_std_from_pds, + get_xsvs_fit, + plot_g2_contrast, + plot_xsvs_fit, + save_bin_his_std, + save_KM, + xsvsc, + xsvsp, +) +# from pyCH.chx_xpcs_xsvs_jupyter_V1 import( +from chx_xpcs_xsvs_jupyter_V1 import( + get_t_iqc_uids, + plot_t_iqtMq2, + plot_t_iqc_uids, + plot_entries_from_csvlist, + plot_entries_from_uids, + get_iq_from_uids, + wait_func, + wait_data_acquistion_finish, + get_uids_by_range, + get_uids_in_time_period, + do_compress_on_line, + realtime_xpcs_analysis, + compress_multi_uids, + get_two_time_mulit_uids, + get_series_g2_from_g12, + get_fra_num_by_dose, + get_series_one_time_mulit_uids, + plot_dose_g2, + run_xpcs_xsvs_single, +) +# from pyCHX.Create_Report import ( +from Create_Report import ( + create_multi_pdf_reports_for_uids, + create_one_pdf_reports_for_uids, + create_pdf_report, + export_xpcs_results_to_h5, + extract_xpcs_results_from_h5, + make_pdf_report, +) +#from pyCHX.DataGonio import qphiavg +from DataGonio import qphiavg +# from pyCHX.SAXS import ( +from SAXS import ( + fit_form_factor, + fit_form_factor2, + form_factor_residuals_bg_lmfit, + form_factor_residuals_lmfit, + get_form_factor_fit_lmfit, + poly_sphere_form_factor_intensity, + show_saxs_qmap, +) +#from pyCHX.Two_Time_Correlation_Function import ( +from Two_Time_Correlation_Function import ( + get_aged_g2_from_g12, + get_aged_g2_from_g12q, + get_four_time_from_two_time, + get_one_time_from_two_time, + rotate_g12q_to_rectangle, + show_C12, +) +# from pyCHX.XPCS_GiSAXS import ( +from XPCS_GiSAXS import ( + cal_1d_qr, + convert_gisaxs_pixel_to_q, + fit_qr_qz_rate, + get_1d_qr, + get_each_box_mean_intensity, + get_gisaxs_roi, + get_qedge, + get_qmap_label, + get_qr_tick_label, + get_qzr_map, + get_qzrmap, + get_reflected_angles, + get_t_qrc, + multi_uids_gisaxs_xpcs_analysis, + plot_gisaxs_g4, + plot_gisaxs_two_g2, + plot_qr_1d_with_ROI, + plot_qrt_pds, + plot_qzr_map, + plot_t_qrc, + show_qzr_map, + show_qzr_roi, +) +# from pyCHX.XPCS_SAXS import ( +from XPCS_SAXS import ( + cal_g2, + combine_two_roi_mask, + create_hot_pixel_mask, + get_angular_mask, + get_circular_average, + get_cirucular_average_std, + get_each_ring_mean_intensity, + get_QrQw_From_RoiMask, + get_ring_mask, + get_seg_from_ring_mask, + get_t_iq, + get_t_iqc, + multi_uids_saxs_xpcs_analysis, + plot_circular_average, + plot_qIq_with_ROI, + plot_t_iqc, + recover_img_from_iq, + save_lists, +) +#from pyCHX.chx_outlier_detection import ( +from chx_outlier_detection import ( + is_outlier, + outlier_mask +) \ No newline at end of file diff --git a/pyCHX/chx_packages.py b/pyCHX/chx_packages.py index c3087c8..d4b4d63 100644 --- a/pyCHX/chx_packages.py +++ b/pyCHX/chx_packages.py @@ -123,6 +123,7 @@ get_waxs_beam_center, lin2log_g2, linear_fit, + load_dask_data, load_data, load_mask, load_pilatus, diff --git a/pyCHX/chx_xpcs_xsvs_jupyter_V1.py b/pyCHX/chx_xpcs_xsvs_jupyter_V1.py index 2c9b9e3..b8b6192 100644 --- a/pyCHX/chx_xpcs_xsvs_jupyter_V1.py +++ b/pyCHX/chx_xpcs_xsvs_jupyter_V1.py @@ -8,10 +8,10 @@ from pyCHX.chx_libs import colors, markers from pyCHX.chx_packages import * -ip = get_ipython() -ip.run_line_magic( - "run", "/nsls2/data/chx/shared/CHX_Software/packages/environment_management/chx_analysis_setup.ipynb" -) +# ip = get_ipython() +# ip.run_line_magic( +# "run", "/nsls2/data/chx/shared/CHX_Software/packages/environment_management/chx_analysis_setup.ipynb" +# ) def get_t_iqc_uids(uid_list, setup_pargs, slice_num=10, slice_width=1): From a4a22647b6cf5f64a414bfd2305fc57d2014e28e Mon Sep 17 00:00:00 2001 From: Max Rakitin Date: Thu, 2 May 2024 14:09:49 -0400 Subject: [PATCH 5/6] STY: fix style of new files with black/pre-commit --- chx_packages_local.py | 75 +++++++++++++++++++++++++------------------ 1 file changed, 44 insertions(+), 31 deletions(-) diff --git a/chx_packages_local.py b/chx_packages_local.py index 828cb12..7ae1abe 100644 --- a/chx_packages_local.py +++ b/chx_packages_local.py @@ -3,15 +3,10 @@ import pickle as cpk import historydict -from eiger_io.fs_handler import EigerImages -from skimage.draw import line, line_aa, polygon - -# changes to current version of chx_packages.py -# added load_dask_data in generic_functions - -#from pyCHX.chx_handlers import use_dask, use_pims +# from pyCHX.chx_handlers import use_dask, use_pims from chx_handlers import use_dask, use_pims + # from pyCHX.chx_libs import ( from chx_libs import ( EigerHandler, @@ -36,6 +31,12 @@ utils, warnings, ) +from eiger_io.fs_handler import EigerImages +from skimage.draw import line, line_aa, polygon + +# changes to current version of chx_packages.py +# added load_dask_data in generic_functions + use_pims(db) # use pims for importing eiger data, register_handler 'AD_EIGER2' and 'AD_EIGER' @@ -48,6 +49,7 @@ para_segment_compress_eigerdata, segment_compress_eigerdata, ) + # from pyCHX.chx_compress_analysis import ( from chx_compress_analysis import ( Multifile, @@ -63,15 +65,20 @@ plot_waterfallc, read_compressed_eigerdata, ) + # from pyCHX.chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq from chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq + # from pyCHX.chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF from chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF + # from pyCHX.chx_crosscor import CrossCorrelator2, run_para_ccorr_sym from chx_crosscor import CrossCorrelator2, run_para_ccorr_sym -#from pyCHX.chx_generic_functions import ( + +# from pyCHX.chx_generic_functions import ( from chx_generic_functions import ( R_2, + RemoveHot, apply_mask, average_array_withNan, check_bad_uids, @@ -160,7 +167,6 @@ psave_obj, read_dict_csv, refine_roi_mask, - RemoveHot, reverse_updown, ring_edges, run_time, @@ -182,8 +188,13 @@ update_roi_mask, validate_uid, ) + # from pyCHX.chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file from chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file + +# from pyCHX.chx_outlier_detection import ( +from chx_outlier_detection import is_outlier, outlier_mask + # from pyCHX.chx_specklecp import ( from chx_specklecp import ( get_binned_his_std, @@ -197,28 +208,30 @@ xsvsc, xsvsp, ) + # from pyCH.chx_xpcs_xsvs_jupyter_V1 import( -from chx_xpcs_xsvs_jupyter_V1 import( - get_t_iqc_uids, - plot_t_iqtMq2, - plot_t_iqc_uids, - plot_entries_from_csvlist, - plot_entries_from_uids, - get_iq_from_uids, - wait_func, - wait_data_acquistion_finish, - get_uids_by_range, - get_uids_in_time_period, - do_compress_on_line, - realtime_xpcs_analysis, +from chx_xpcs_xsvs_jupyter_V1 import ( compress_multi_uids, - get_two_time_mulit_uids, - get_series_g2_from_g12, + do_compress_on_line, get_fra_num_by_dose, + get_iq_from_uids, + get_series_g2_from_g12, get_series_one_time_mulit_uids, + get_t_iqc_uids, + get_two_time_mulit_uids, + get_uids_by_range, + get_uids_in_time_period, plot_dose_g2, + plot_entries_from_csvlist, + plot_entries_from_uids, + plot_t_iqc_uids, + plot_t_iqtMq2, + realtime_xpcs_analysis, run_xpcs_xsvs_single, + wait_data_acquistion_finish, + wait_func, ) + # from pyCHX.Create_Report import ( from Create_Report import ( create_multi_pdf_reports_for_uids, @@ -228,8 +241,10 @@ extract_xpcs_results_from_h5, make_pdf_report, ) -#from pyCHX.DataGonio import qphiavg + +# from pyCHX.DataGonio import qphiavg from DataGonio import qphiavg + # from pyCHX.SAXS import ( from SAXS import ( fit_form_factor, @@ -240,7 +255,8 @@ poly_sphere_form_factor_intensity, show_saxs_qmap, ) -#from pyCHX.Two_Time_Correlation_Function import ( + +# from pyCHX.Two_Time_Correlation_Function import ( from Two_Time_Correlation_Function import ( get_aged_g2_from_g12, get_aged_g2_from_g12q, @@ -249,6 +265,7 @@ rotate_g12q_to_rectangle, show_C12, ) + # from pyCHX.XPCS_GiSAXS import ( from XPCS_GiSAXS import ( cal_1d_qr, @@ -274,6 +291,7 @@ show_qzr_map, show_qzr_roi, ) + # from pyCHX.XPCS_SAXS import ( from XPCS_SAXS import ( cal_g2, @@ -295,8 +313,3 @@ recover_img_from_iq, save_lists, ) -#from pyCHX.chx_outlier_detection import ( -from chx_outlier_detection import ( - is_outlier, - outlier_mask -) \ No newline at end of file From fae01d5749d608a3e39cbd39c86433a17b53642e Mon Sep 17 00:00:00 2001 From: Max Rakitin Date: Fri, 3 May 2024 09:51:27 -0400 Subject: [PATCH 6/6] Comment-out unused `get_ipython` import --- pyCHX/chx_xpcs_xsvs_jupyter_V1.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pyCHX/chx_xpcs_xsvs_jupyter_V1.py b/pyCHX/chx_xpcs_xsvs_jupyter_V1.py index b8b6192..31ec64e 100644 --- a/pyCHX/chx_xpcs_xsvs_jupyter_V1.py +++ b/pyCHX/chx_xpcs_xsvs_jupyter_V1.py @@ -1,13 +1,14 @@ import pandas as pds +from pyCHX.chx_libs import colors, markers +from pyCHX.chx_packages import * + # from pyCHX.chx_generic_functions import get_short_long_labels_from_qval_dict # RUN_GUI = False # from pyCHX.chx_libs import markers -from IPython import get_ipython -from pyCHX.chx_libs import colors, markers -from pyCHX.chx_packages import * +# from IPython import get_ipython # ip = get_ipython() # ip.run_line_magic( # "run", "/nsls2/data/chx/shared/CHX_Software/packages/environment_management/chx_analysis_setup.ipynb"