From 619f47ddc3fd0f1fe7fbb6e930e2771f4868a482 Mon Sep 17 00:00:00 2001 From: Eugene Matviychuk Date: Fri, 28 Mar 2025 09:25:07 -0400 Subject: [PATCH] UPD: move all current code --- patches/chx_outlier_detection.py | 98 + patches/fix_get_sid_filenames.py | 29 + patches/polygonmask_fix.py | 99 + patches/roi_nr_2019_3_0_1.py | 49 + pyCHX/XPCS_GiSAXS.py | 5 +- pyCHX/backups/Create_Report_05012024.py | 1940 +++++ pyCHX/backups/chx_compress_01242025.py | 1476 ++++ pyCHX/backups/chx_compress_05012024.py | 1189 +++ pyCHX/backups/chx_correlationc_05012024.py | 1676 +++++ .../backups/chx_generic_functions_01252025.py | 6365 ++++++++++++++++ .../backups/chx_generic_functions_05012024.py | 5809 +++++++++++++++ pyCHX/backups/chx_olog_012925.py | 140 + .../backups/chx_outlier_detection_05012024.py | 98 + pyCHX/backups/chx_packages_local-20240502.py | 302 + .../chx_xpcs_xsvs_jupyter_V1_05012024.py | 1698 +++++ pyCHX/backups/pyCHX-backup/Badpixels.py | 167 + .../pyCHX-backup/Compress_readerNew.py | 352 + pyCHX/backups/pyCHX-backup/Create_Report.py | 2166 ++++++ pyCHX/backups/pyCHX-backup/DEVs.py | 578 ++ pyCHX/backups/pyCHX-backup/DataGonio.py | 758 ++ pyCHX/backups/pyCHX-backup/SAXS.py | 1104 +++ pyCHX/backups/pyCHX-backup/Stitching.py | 527 ++ .../Two_Time_Correlation_Function.py | 1305 ++++ pyCHX/backups/pyCHX-backup/XPCS_GiSAXS.py | 2596 +++++++ pyCHX/backups/pyCHX-backup/XPCS_SAXS.py | 2773 +++++++ .../XPCS_XSVS_SAXS_Multi_2017_V4.py | 606 ++ pyCHX/backups/pyCHX-backup/__init__.py | 6 + pyCHX/backups/pyCHX-backup/_version.py | 455 ++ .../backups/Create_Report_05012024.py | 1940 +++++ .../backups/chx_compress_01242025.py | 1476 ++++ .../backups/chx_compress_05012024.py | 1189 +++ .../backups/chx_correlationc_05012024.py | 1676 +++++ .../backups/chx_generic_functions_01252025.py | 6365 ++++++++++++++++ .../backups/chx_generic_functions_05012024.py | 5809 +++++++++++++++ .../pyCHX-backup/backups/chx_olog_012925.py | 140 + .../backups/chx_outlier_detection_05012024.py | 98 + .../backups/chx_packages_local-20240502.py | 302 + .../chx_xpcs_xsvs_jupyter_V1_05012024.py | 1698 +++++ .../backups/xpcs_timepixel_05012024.py | 830 +++ pyCHX/backups/pyCHX-backup/chx_Fitters2D.py | 337 + pyCHX/backups/pyCHX-backup/chx_compress.py | 1481 ++++ .../pyCHX-backup/chx_compress_analysis.py | 383 + pyCHX/backups/pyCHX-backup/chx_correlation.py | 1167 +++ .../backups/pyCHX-backup/chx_correlationc.py | 1873 +++++ .../backups/pyCHX-backup/chx_correlationp.py | 957 +++ .../backups/pyCHX-backup/chx_correlationp2.py | 804 ++ pyCHX/backups/pyCHX-backup/chx_crosscor.py | 831 +++ .../pyCHX-backup/chx_generic_functions.py | 6446 +++++++++++++++++ pyCHX/backups/pyCHX-backup/chx_handlers.py | 48 + pyCHX/backups/pyCHX-backup/chx_libs.py | 441 ++ pyCHX/backups/pyCHX-backup/chx_olog.py | 136 + .../pyCHX-backup/chx_outlier_detection.py | 143 + pyCHX/backups/pyCHX-backup/chx_packages.py | 259 + .../pyCHX-backup/chx_packages_local.py | 323 + pyCHX/backups/pyCHX-backup/chx_speckle.py | 1145 +++ pyCHX/backups/pyCHX-backup/chx_specklecp.py | 2036 ++++++ .../pyCHX-backup/chx_xpcs_xsvs_jupyter_V1.py | 2464 +++++++ pyCHX/backups/pyCHX-backup/movie_maker.py | 241 + pyCHX/backups/pyCHX-backup/xpcs_timepixel.py | 907 +++ pyCHX/backups/pychx-repo-obsolete | 1 + pyCHX/backups/xpcs_timepixel_05012024.py | 830 +++ pyCHX/chx_compress.py | 30 +- pyCHX/chx_generic_functions.py | 172 +- pyCHX/chx_olog.py | 23 +- pyCHX/chx_outlier_detection.py | 6 + pyCHX/chx_packages.py | 3 + pyCHX/chx_packages_local.py | 323 + standard_functions/standard_functions.py | 469 ++ 68 files changed, 80098 insertions(+), 70 deletions(-) create mode 100644 patches/chx_outlier_detection.py create mode 100644 patches/fix_get_sid_filenames.py create mode 100644 patches/polygonmask_fix.py create mode 100644 patches/roi_nr_2019_3_0_1.py create mode 100644 pyCHX/backups/Create_Report_05012024.py create mode 100644 pyCHX/backups/chx_compress_01242025.py create mode 100644 pyCHX/backups/chx_compress_05012024.py create mode 100644 pyCHX/backups/chx_correlationc_05012024.py create mode 100644 pyCHX/backups/chx_generic_functions_01252025.py create mode 100644 pyCHX/backups/chx_generic_functions_05012024.py create mode 100644 pyCHX/backups/chx_olog_012925.py create mode 100644 pyCHX/backups/chx_outlier_detection_05012024.py create mode 100644 pyCHX/backups/chx_packages_local-20240502.py create mode 100644 pyCHX/backups/chx_xpcs_xsvs_jupyter_V1_05012024.py create mode 100644 pyCHX/backups/pyCHX-backup/Badpixels.py create mode 100644 pyCHX/backups/pyCHX-backup/Compress_readerNew.py create mode 100644 pyCHX/backups/pyCHX-backup/Create_Report.py create mode 100644 pyCHX/backups/pyCHX-backup/DEVs.py create mode 100644 pyCHX/backups/pyCHX-backup/DataGonio.py create mode 100644 pyCHX/backups/pyCHX-backup/SAXS.py create mode 100644 pyCHX/backups/pyCHX-backup/Stitching.py create mode 100644 pyCHX/backups/pyCHX-backup/Two_Time_Correlation_Function.py create mode 100644 pyCHX/backups/pyCHX-backup/XPCS_GiSAXS.py create mode 100644 pyCHX/backups/pyCHX-backup/XPCS_SAXS.py create mode 100644 pyCHX/backups/pyCHX-backup/XPCS_XSVS_SAXS_Multi_2017_V4.py create mode 100644 pyCHX/backups/pyCHX-backup/__init__.py create mode 100644 pyCHX/backups/pyCHX-backup/_version.py create mode 100644 pyCHX/backups/pyCHX-backup/backups/Create_Report_05012024.py create mode 100644 pyCHX/backups/pyCHX-backup/backups/chx_compress_01242025.py create mode 100644 pyCHX/backups/pyCHX-backup/backups/chx_compress_05012024.py create mode 100644 pyCHX/backups/pyCHX-backup/backups/chx_correlationc_05012024.py create mode 100644 pyCHX/backups/pyCHX-backup/backups/chx_generic_functions_01252025.py create mode 100644 pyCHX/backups/pyCHX-backup/backups/chx_generic_functions_05012024.py create mode 100644 pyCHX/backups/pyCHX-backup/backups/chx_olog_012925.py create mode 100644 pyCHX/backups/pyCHX-backup/backups/chx_outlier_detection_05012024.py create mode 100644 pyCHX/backups/pyCHX-backup/backups/chx_packages_local-20240502.py create mode 100644 pyCHX/backups/pyCHX-backup/backups/chx_xpcs_xsvs_jupyter_V1_05012024.py create mode 100644 pyCHX/backups/pyCHX-backup/backups/xpcs_timepixel_05012024.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_Fitters2D.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_compress.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_compress_analysis.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_correlation.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_correlationc.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_correlationp.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_correlationp2.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_crosscor.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_generic_functions.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_handlers.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_libs.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_olog.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_outlier_detection.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_packages.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_packages_local.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_speckle.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_specklecp.py create mode 100644 pyCHX/backups/pyCHX-backup/chx_xpcs_xsvs_jupyter_V1.py create mode 100644 pyCHX/backups/pyCHX-backup/movie_maker.py create mode 100644 pyCHX/backups/pyCHX-backup/xpcs_timepixel.py create mode 160000 pyCHX/backups/pychx-repo-obsolete create mode 100644 pyCHX/backups/xpcs_timepixel_05012024.py create mode 100644 pyCHX/chx_packages_local.py create mode 100644 standard_functions/standard_functions.py diff --git a/patches/chx_outlier_detection.py b/patches/chx_outlier_detection.py new file mode 100644 index 0000000..e211742 --- /dev/null +++ b/patches/chx_outlier_detection.py @@ -0,0 +1,98 @@ +def is_outlier(points,thresh=3.5,verbose=False): + """MAD test + """ + points.tolist() + if len(points) ==1: + points=points[:,None] + if verbose: + print('input to is_outlier is a single point...') + median = np.median(points)*np.ones(np.shape(points))#, axis=0) + + diff = (points-median)**2 + diff=np.sqrt(diff) + med_abs_deviation= np.median(diff) + modified_z_score = .6745*diff/med_abs_deviation + return modified_z_score > thresh + +def outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False): + """ + outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) + avg_img: average image data (2D) + mask: 2D array, same size as avg_img with pixels that are already masked + roi_mask: 2D array, same size as avg_img, ROI labels 'encoded' as mask values (i.e. all pixels belonging to ROI 5 have the value 5) + outlier_threshold: threshold for MAD test + maximum_outlier_fraction: maximum fraction of pixels in an ROI that can be classifed as outliers. If the detected fraction is higher, no outliers will be masked for that ROI. + verbose: 'True' enables message output + plot: 'True' enables visualization of outliers + returns: mask (dtype=float): 0 for pixels that have been classified as outliers, 1 else + dependency: is_outlier() + + function does outlier detection for each ROI separately based on pixel intensity in avg_img*mask and ROI specified by roi_mask, using the median-absolute-deviation (MAD) method + + by LW 06/21/2023 + """ + hhmask = np.ones(np.shape(roi_mask)) + pc=1 + + for rn in np.arange(1,np.max(roi_mask)+1,1): + rm=np.zeros(np.shape(roi_mask));rm=rm-1;rm[np.where( roi_mask == rn)]=1 + pixel = roi.roi_pixel_values(avg_img*rm, roi_mask, [rn] ) + out_l = is_outlier((avg_img*mask*rm)[rm>-1], thresh=outlier_threshold) + if np.nanmax(out_l)>0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l<1]) + if verbose: print('ROI #%s\naverage ROI intensity: %s'%(rn,ave_roi_int)) + try: + upper_outlier_threshold = np.nanmin((out_l*pixel[0][0])[out_l*pixel[0][0]>ave_roi_int]) + if verbose: print('upper outlier threshold: %s'%upper_outlier_threshold) + except: + upper_outlier_threshold = False + if verbose: print('no upper outlier threshold found') + ind1 = (out_l*pixel[0][0])>0; ind2 = (out_l*pixel[0][0])< ave_roi_int + try: + lower_outlier_threshold = np.nanmax((out_l*pixel[0][0])[ind1*ind2]) + except: + lower_outlier_threshold = False + if verbose: print('no lower outlier threshold found') + else: + if verbose: print('ROI #%s: no outliers detected'%rn) + + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l)/len(pixel[0][0]) + if verbose: print('fraction of pixel values detected as outliers: %s'%np.round(outlier_fraction,2)) + if outlier_fraction > maximum_outlier_fraction: + if verbose: print('fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed'%maximum_outlier_fraction) + upper_outlier_threshold = False; lower_outlier_threshold = False + + if upper_outlier_threshold: + hhmask[avg_img*rm > upper_outlier_threshold] = 0 + if lower_outlier_threshold: + hhmask[avg_img*rm < lower_outlier_threshold] = 0 + + if plot: + if pc == 1: fig,ax = plt.subplots(1,5,figsize=(24,4)) + plt.subplot(1,5,pc);pc+=1; + if pc>5: pc=1 + pixel = roi.roi_pixel_values(avg_img*rm*mask, roi_mask, [rn] ) + plt.plot( pixel[0][0] ,'bo',markersize=1.5 ) + if upper_outlier_threshold or lower_outlier_threshold: + x=np.arange(len(out_l)) + plt.plot([x[0],x[-1]],[ave_roi_int,ave_roi_int],'g--',label='ROI average: %s'%np.round(ave_roi_int,4)) + if upper_outlier_threshold: + ind=(out_l*pixel[0][0])> upper_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[upper_outlier_threshold,upper_outlier_threshold],'r--',label='upper thresh.: %s'%np.round(upper_outlier_threshold,4)) + if lower_outlier_threshold: + ind=(out_l*pixel[0][0])< lower_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[lower_outlier_threshold,lower_outlier_threshold],'r--',label='lower thresh.: %s'%np.round(upper_outlier_threshold,4)) + plt.ylabel('Intensity') ;plt.xlabel('pixel');plt.title('ROI #: %s'%rn);plt.legend(loc='best',fontsize=8) + + if plot: + fig,ax = plt.subplots() + plt.imshow(hhmask) + hot_dark=np.nonzero(hhmask<1) + cmap = plt.cm.get_cmap('viridis') + plt.plot(hot_dark[1],hot_dark[0],'+',color=cmap(0)) + plt.xlabel('pixel');plt.ylabel('pixel');plt.title('masked pixels with outlier threshold: %s'%outlier_threshold) + + return hhmask diff --git a/patches/fix_get_sid_filenames.py b/patches/fix_get_sid_filenames.py new file mode 100644 index 0000000..b769592 --- /dev/null +++ b/patches/fix_get_sid_filenames.py @@ -0,0 +1,29 @@ +def get_sid_filenames(hdr,verbose=False): + import glob + from time import strftime, localtime + start_doc = hdr.start + stop_doc = hdr.stop + success = False + + ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{start_doc['data path']}*_{start_doc['sequence id']}_master.h5")) # looking for (eiger) datafile at the path specified in metadata + if len(ret[2])==0: + if verbose: print('could not find detector filename from "data_path" in metadata: %s'%start_doc['data path']) + else: + if verbose: print('Found detector filename from "data_path" in metadata!');success=True + + if not success: # looking at path in metadata, but taking the date from the run start document + data_path=start_doc['data path'][:-11]+strftime("%Y/%m/%d/",localtime(start_doc['time'])) + ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5")) + if len(ret[2])==0: + if verbose: print('could not find detector filename in %s'%data_path) + else: + if verbose: print('Found detector filename in %s'%data_path);success=True + + if not success: # looking at path in metadata, but taking the date from the run stop document (in case the date rolled over between creating the start doc and staging the detector) + data_path=start_doc['data path'][:-11]+strftime("%Y/%m/%d/",localtime(stop_doc['time'])) + ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5")) + if len(ret[2])==0: + if verbose: print('Sorry, could not find detector filename....') + else: + if verbose: print('Found detector filename in %s'%data_path);success=True + return ret \ No newline at end of file diff --git a/patches/polygonmask_fix.py b/patches/polygonmask_fix.py new file mode 100644 index 0000000..85f3700 --- /dev/null +++ b/patches/polygonmask_fix.py @@ -0,0 +1,99 @@ +# fix for newer environments on jupyter hub: polygon( y,x, shape = image.shape) -> need to be specific about shape +# Not needed for older, local environments on srv1,2,3. Fix above will probably also work there, BUT there would be a problem with disk <-> circle that has been renamed in skimage at some point +# -> older, local environments on srv1,2,3 work without this fix, only apply when running on jupyter hub +def create_multi_rotated_rectangle_mask( image, center=None, length=100, width=50, angles=[0] ): + ''' Developed at July 10, 2017 by Y.G.@CHX, NSLS2 + Create multi rectangle-shaped mask by rotating a rectangle with a list of angles + The original rectangle is defined by four corners, i.e., + [ (center[1] - width//2, center[0]), + (center[1] + width//2, center[0]), + (center[1] + width//2, center[0] + length), + (center[1] - width//2, center[0] + length) + ] + + Parameters: + image: 2D numpy array, to give mask shape + center: integer list, if None, will be the center of the image + length: integer, the length of the non-ratoted rectangle + width: integer, the width of the non-ratoted rectangle + angles: integer list, a list of rotated angles + + Return: + mask: 2D bool-type numpy array + ''' + + from skimage.draw import polygon + from skimage.transform import rotate + cx,cy = center + imy, imx = image.shape + mask = np.zeros( image.shape, dtype = bool) + wy = length + wx = width + x = np.array( [ max(0, cx - wx//2), min(imx, cx+wx//2), min(imx, cx+wx//2), max(0,cx-wx//2 ) ]) + y = np.array( [ cy, cy, min( imy, cy + wy) , min(imy, cy + wy) ]) + rr, cc = polygon( y,x, shape = image.shape) + mask[rr,cc] =1 + mask_rot= np.zeros( image.shape, dtype = bool) + for angle in angles: + mask_rot += np.array( rotate( mask, angle, center= center ), dtype=bool) #, preserve_range=True) + return ~mask_rot + +def create_cross_mask( image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4, + center_disk = True, center_radius=10 + ): + ''' + Give image and the beam center to create a cross-shaped mask + wy_left: the width of left h-line + wy_right: the width of rigth h-line + wx_up: the width of up v-line + wx_down: the width of down v-line + center_disk: if True, create a disk with center and center_radius + + Return: + the cross mask + ''' + from skimage.draw import line_aa, line, polygon, disk + + imy, imx = image.shape + cx,cy = center + bst_mask = np.zeros_like( image , dtype = bool) + ### + #for right part + wy = wy_right + x = np.array( [ cx, imx, imx, cx ]) + y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy]) + rr, cc = polygon( y,x,shape=image.shape) + bst_mask[rr,cc] =1 + + ### + #for left part + wy = wy_left + x = np.array( [0, cx, cx,0 ]) + y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy]) + rr, cc = polygon( y,x,shape=image.shape) + bst_mask[rr,cc] =1 + + ### + #for up part + wx = wx_up + x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) + y = np.array( [ cy, cy, imy, imy]) + rr, cc = polygon( y,x,shape=image.shape) + bst_mask[rr,cc] =1 + + ### + #for low part + wx = wx_down + x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) + y = np.array( [ 0,0, cy, cy]) + rr, cc = polygon( y,x,shape=image.shape) + bst_mask[rr,cc] =1 + + if center_radius!=0: + rr, cc = disk((cy, cx), center_radius, shape = bst_mask.shape) + bst_mask[rr,cc] =1 + + + full_mask= ~bst_mask + + return full_mask \ No newline at end of file diff --git a/patches/roi_nr_2019_3_0_1.py b/patches/roi_nr_2019_3_0_1.py new file mode 100644 index 0000000..17d9683 --- /dev/null +++ b/patches/roi_nr_2019_3_0_1.py @@ -0,0 +1,49 @@ +def get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False,q_thresh=0, p_thresh=0, silent=True, qprecision=5): + """ + function to return roi number from qval_dict, corresponding Q and phi, lists (sets) of all available Qs and phis + [roi_nr,Q,phi,Q_list,phi_list]=get_roi_nr(..) + calling sequence: get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False, verbose=True) + qdict: qval_dict from analysis pipeline/hdf5 result file + q: q of interest, can be either value (q_nr=False) or q-number (q_nr=True) + q_thresh: threshold for comparing Q-values, set to 0 for exact comparison + phi: phi of interest, can be either value (phi_nr=False) or q-number (phi_nr=True) + p_thresh: threshold for comparing phi values, set to 0 for exact comparison + silent=True/False: Don't/Do print lists of available qs and phis, q and phi of interest + by LW 10/21/2017 + update by LW 08/22/2018: introduced thresholds for comparison of Q and phi values (before: exact match required) + update 2019/09/28 add qprecision to get unique Q + update 2020/3/12 explicitly order input dictionary to fix problem with environment after 2019-3.0.1 + """ + import collections + from collections import OrderedDict + qdict = collections.OrderedDict(sorted(qdict.items())) + qs=[] + phis=[] + for i in qdict.keys(): + qs.append(qdict[i][0]) + phis.append(qdict[i][1]) + qslist=list(OrderedDict.fromkeys(qs)) + qslist = np.unique( np.round(qslist, qprecision ) ) + phislist=list(OrderedDict.fromkeys(phis)) + qslist=list(np.sort(qslist)) + phislist=list(np.sort(phislist)) + if q_nr: + qinterest=qslist[q] + qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] + else: + qinterest=q + qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] # new + if phi_nr: + phiinterest=phislist[phi] + phiindices = [i for i,x in enumerate(phis) if x == phiinterest] + else: + phiinterest=phi + phiindices = [i for i,x in enumerate(phis) if np.abs(x-phiinterest) < p_thresh] # new + ret_list=[list(set(qindices).intersection(phiindices))[0],qinterest,phiinterest,qslist,phislist] #-> this is the original + if silent == False: + print('list of available Qs:') + print(qslist) + print('list of available phis:') + print(phislist) + print('Roi number for Q= '+str(ret_list[1])+' and phi= '+str(ret_list[2])+': '+str(ret_list[0])) + return ret_list \ No newline at end of file diff --git a/pyCHX/XPCS_GiSAXS.py b/pyCHX/XPCS_GiSAXS.py index 8c57ff8..a2feda5 100644 --- a/pyCHX/XPCS_GiSAXS.py +++ b/pyCHX/XPCS_GiSAXS.py @@ -4,6 +4,8 @@ This module is for the GiSAXS XPCS analysis """ + + from skbeam.core.accumulators.binned_statistic import BinnedStatistic1D, BinnedStatistic2D from pyCHX.chx_compress import ( @@ -771,7 +773,8 @@ def show_label_array_on_image( """ ax.set_aspect("equal") if log_img: - im = ax.imshow(image, cmap=imshow_cmap, interpolation="none", norm=LogNorm(norm), **kwargs) # norm=norm, + norm=LogNorm(vmin=vmin, vmax=vmax) + im = ax.imshow(image, cmap=imshow_cmap, interpolation="none", norm=norm) # norm=norm, else: im = ax.imshow(image, cmap=imshow_cmap, interpolation="none", norm=norm, **kwargs) # norm=norm, diff --git a/pyCHX/backups/Create_Report_05012024.py b/pyCHX/backups/Create_Report_05012024.py new file mode 100644 index 0000000..f434328 --- /dev/null +++ b/pyCHX/backups/Create_Report_05012024.py @@ -0,0 +1,1940 @@ +''' +Yugang Created at Aug 08, 2016, CHX-NSLS-II + +Create a PDF file from XPCS data analysis results, which are generated by CHX data analysis pipeline + +How to use: +python Create_Report.py full_file_path uid output_dir (option) + +An exmplae to use: +python Create_Report.py /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/ af8f66 + +python Create_Report.py /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/ af8f66 /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/test/ + +''' + +def check_dict_keys( dicts, key): + if key not in list(dicts.keys()): + dicts[key] = 'unknown' + + + +import h5py + +from reportlab.pdfgen import canvas +from reportlab.lib.units import inch, cm , mm +from reportlab.lib.colors import pink, green, brown, white, black, red, blue + + +from reportlab.lib.styles import getSampleStyleSheet +#from reportlab.platypus import Image, Paragraph, Table + +from reportlab.lib.pagesizes import letter, A4 +from pyCHX.chx_generic_functions import (pload_obj ) + + +from PIL import Image +from time import time +from datetime import datetime + +import sys,os +import pandas as pds +import numpy as np + + +def add_one_line_string( c, s, top, left=30, fontsize = 11 ): + if (fontsize*len(s )) >1000: + fontsize = 1000./(len(s)) + c.setFont("Helvetica", fontsize ) + c.drawString(left, top, s) + + + +def add_image_string( c, imgf, data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top, return_ = False ): + + image = data_dir + imgf + if os.path.exists(image): + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= img_height + width = height/ratio + #if width>400: + # width = 350 + # height = width*ratio + c.drawImage( image, img_left, img_top, width= width,height=height,mask=None) + + c.setFont("Helvetica", 16) + c.setFillColor( blue ) + c.drawString(str1_left, str1_top,str1 ) + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString(str2_left, str2_top, 'filename: %s'%imgf ) + if return_: + return height/ratio + + else: + c.setFillColor( blue ) + c.drawString( str1_left, str1_top, str1) + c.setFillColor(red) + c.drawString( str1_left, str1_top -40, '-->Not Calculated!' ) + + + +class create_pdf_report( object ): + + '''Aug 16, YG@CHX-NSLS-II + Create a pdf report by giving data_dir, uid, out_dir + data_dir: the input data directory, including all necessary images + the images names should be: + meta_file = 'uid=%s-md'%uid + avg_img_file = 'uid=%s--img-avg-.png'%uid + ROI_on_img_file = 'uid=%s--ROI-on-Image-.png'%uid + qiq_file = 'uid=%s--Circular-Average-.png'%uid + ROI_on_Iq_file = 'uid=%s--ROI-on-Iq-.png'%uid + + Iq_t_file = 'uid=%s--Iq-t-.png'%uid + img_sum_t_file = 'uid=%s--img-sum-t.png'%uid + wat_file= 'uid=%s--Waterfall-.png'%uid + Mean_inten_t_file= 'uid=%s--Mean-intensity-of-each-ROI-.png'%uid + + g2_file = 'uid=%s--g2-.png'%uid + g2_fit_file = 'uid=%s--g2--fit-.png'%uid + q_rate_file = 'uid=--%s--Q-Rate--fit-.png'%uid + + two_time_file = 'uid=%s--Two-time-.png'%uid + two_g2_file = 'uid=%s--g2--two-g2-.png'%uid + + uid: the unique id + out_dir: the output directory + report_type: + 'saxs': report saxs results + 'gisaxs': report gisaxs results + + + Output: + A PDF file with name as "XPCS Analysis Report for uid=%s"%uid in out_dir folder + ''' + + def __init__( self, data_dir, uid, out_dir=None, filename=None, load=True, user=None, + report_type='saxs',md=None, res_h5_filename=None ): + from datetime import datetime + self.data_dir = data_dir + self.uid = uid + self.md = md + #print(md) + if user is None: + user = 'chx' + self.user = user + if out_dir is None: + out_dir = data_dir + if not os.path.exists(out_dir): + os.makedirs(out_dir) + self.out_dir=out_dir + + self.styles = getSampleStyleSheet() + self.width, self.height = letter + + self.report_type = report_type + dt =datetime.now() + CurTime = '%02d/%02d/%s/-%02d/%02d/' % ( dt.month, dt.day, dt.year,dt.hour,dt.minute) + self.CurTime = CurTime + if filename is None: + filename="XPCS_Analysis_Report_for_uid=%s.pdf"%uid + filename=out_dir + filename + c = canvas.Canvas( filename, pagesize=letter) + self.filename= filename + self.res_h5_filename = res_h5_filename + #c.setTitle("XPCS Analysis Report for uid=%s"%uid) + c.setTitle(filename) + self.c = c + if load: + self.load_metadata() + + def load_metadata(self): + uid=self.uid + data_dir = self.data_dir + #load metadata + meta_file = 'uid=%s_md'%uid + self.metafile = data_dir + meta_file + if self.md is None: + md = pload_obj( data_dir + meta_file ) + self.md = md + else: + md = self.md + #print('Get md from giving md') + #print(md) + self.sub_title_num = 0 + uid_g2 = None + uid_c12 = None + if 'uid_g2' in list(md.keys()): + uid_g2 = md['uid_g2'] + if 'uid_c12' in list(md.keys()): + uid_c12 = md['uid_c12'] + + '''global definition''' + + if 'beg_OneTime' in list( md.keys()): + beg_OneTime = md['beg_OneTime'] + end_OneTime = md['end_OneTime'] + else: + beg_OneTime = None + end_OneTime = None + + if 'beg_TwoTime' in list( md.keys()): + beg_TwoTime = md['beg_TwoTime'] + end_TwoTime = md['end_TwoTime'] + else: + beg_TwoTime = None + end_TwoTime = None + + + try: + beg = md['beg'] + end= md['end'] + uid_ = uid + '_fra_%s_%s'%(beg, end) + if beg_OneTime is None: + uid_OneTime = uid + '_fra_%s_%s'%(beg, end) + else: + uid_OneTime = uid + '_fra_%s_%s'%(beg_OneTime, end_OneTime) + if beg_TwoTime is None: + uid_TwoTime = uid + '_fra_%s_%s'%(beg, end) + else: + uid_TwoTime = uid + '_fra_%s_%s'%(beg_TwoTime, end_TwoTime) + + except: + uid_ = uid + uid_OneTime = uid + if beg is None: + uid_ = uid + uid_OneTime = uid + + self.avg_img_file = 'uid=%s_img_avg.png'%uid + self.ROI_on_img_file = 'uid=%s_ROI_on_Image.png'%uid + + self.qiq_file = 'uid=%s_q_Iq.png'%uid + self.qiq_fit_file = 'uid=%s_form_factor_fit.png'%uid + #self.qr_1d_file = 'uid=%s_Qr_ROI.png'%uid + if self.report_type =='saxs' or self.report_type =='ang_saxs': + self.ROI_on_Iq_file = 'uid=%s_ROI_on_Iq.png'%uid + + elif self.report_type =='gi_saxs': + self.ROI_on_Iq_file = 'uid=%s_Qr_ROI.png'%uid + + self.Iq_t_file = 'uid=%s_q_Iqt.png'%uid + self.img_sum_t_file = 'uid=%s_img_sum_t.png'%uid + self.wat_file= 'uid=%s_waterfall.png'%uid + self.Mean_inten_t_file= 'uid=%s_t_ROIs.png'%uid + self.oavs_file = 'uid=%s_OAVS.png'%uid + + if uid_g2 is None: + uid_g2 = uid_OneTime + self.g2_file = 'uid=%s_g2.png'%uid_g2 + self.g2_fit_file = 'uid=%s_g2_fit.png'%uid_g2 + #print( self.g2_fit_file ) + self.g2_new_page = False + self.g2_fit_new_page = False + if self.report_type =='saxs': + jfn = 'uid=%s_g2.png'%uid_g2 + if os.path.exists( data_dir + jfn): + self.g2_file = jfn + else: + jfn = 'uid=%s_g2__joint.png'%uid_g2 + if os.path.exists( data_dir + jfn): + self.g2_file = jfn + self.g2_new_page = True + #self.g2_new_page = True + jfn = 'uid=%s_g2_fit.png'%uid_g2 + if os.path.exists(data_dir + jfn ): + self.g2_fit_file = jfn + #self.g2_fit_new_page = True + else: + jfn = 'uid=%s_g2_fit__joint.png'%uid_g2 + if os.path.exists(data_dir + jfn ): + self.g2_fit_file = jfn + self.g2_fit_new_page = True + + else: + jfn = 'uid=%s_g2__joint.png'%uid_g2 + if os.path.exists( data_dir + jfn): + self.g2_file = jfn + self.g2_new_page = True + jfn = 'uid=%s_g2_fit__joint.png'%uid_g2 + if os.path.exists(data_dir + jfn ): + self.g2_fit_file = jfn + self.g2_fit_new_page = True + + self.q_rate_file = 'uid=%s_Q_Rate_fit.png'%uid_g2 + self.q_rate_loglog_file = 'uid=%s_Q_Rate_loglog.png'%uid_g2 + self.g2_q_fitpara_file = 'uid=%s_g2_q_fitpara_plot.png'%uid_g2 + + + #print( self.q_rate_file ) + if uid_c12 is None: + uid_c12 = uid_ + self.q_rate_two_time_fit_file = 'uid=%s_two_time_Q_Rate_fit.png'%uid_c12 + #print( self.q_rate_two_time_fit_file ) + + self.two_time_file = 'uid=%s_Two_time.png'%uid_c12 + self.two_g2_file = 'uid=%s_g2_two_g2.png'%uid_c12 + + if self.report_type =='saxs': + + jfn = 'uid=%s_g2_two_g2.png'%uid_c12 + self.two_g2_new_page = False + if os.path.exists( data_dir + jfn ): + #print( 'Here we go') + self.two_g2_file = jfn + #self.two_g2_new_page = True + else: + jfn = 'uid=%s_g2_two_g2__joint.png'%uid_c12 + self.two_g2_new_page = False + if os.path.exists( data_dir + jfn ): + #print( 'Here we go') + self.two_g2_file = jfn + self.two_g2_new_page = True + else: + jfn = 'uid=%s_g2_two_g2__joint.png'%uid_c12 + self.two_g2_new_page = False + if os.path.exists( data_dir + jfn ): + #print( 'Here we go') + self.two_g2_file = jfn + self.two_g2_new_page = True + + + self.four_time_file = 'uid=%s_g4.png'%uid_ + jfn = 'uid=%s_g4__joint.png'%uid_ + self.g4_new_page = False + if os.path.exists( data_dir + jfn ): + self.four_time_file = jfn + self.g4_new_page = True + + self.xsvs_fit_file = 'uid=%s_xsvs_fit.png'%uid_ + self.contrast_file = 'uid=%s_contrast.png'%uid_ + self.dose_file = 'uid=%s_dose_analysis.png'%uid_ + + jfn = 'uid=%s_dose_analysis__joint.png'%uid_ + self.dose_file_new_page = False + if os.path.exists( data_dir + jfn ): + self.dose_file = jfn + self.dose_file_new_page = True + + #print( self.dose_file ) + if False: + self.flow_g2v = 'uid=%s_1a_mqv_g2_v_fit.png'%uid_ + self.flow_g2p = 'uid=%s_1a_mqp_g2_p_fit.png'%uid_ + self.flow_g2v_rate_fit = 'uid=%s_v_fit_rate_Q_Rate_fit.png'%uid_ + self.flow_g2p_rate_fit = 'uid=%s_p_fit_rate_Q_Rate_fit.png'%uid_ + + if True: + self.two_time = 'uid=%s_pv_two_time.png'%uid_ + #self.two_time_v = 'uid=%s_pv_two_time.png'%uid_ + + #self.flow_g2bv = 'uid=%s_g2b_v_fit.png'%uid_ + #self.flow_g2bp = 'uid=%s_g2b_p_fit.png'%uid_ + self.flow_g2_g2b_p = 'uid=%s_g2_two_g2_p.png'%uid_ + self.flow_g2_g2b_v = 'uid=%s_g2_two_g2_v.png'%uid_ + + self.flow_g2bv_rate_fit = 'uid=%s_vertb_Q_Rate_fit.png'%uid_ + self.flow_g2bp_rate_fit = 'uid=%s_parab_Q_Rate_fit.png'%uid_ + + self.flow_g2v = 'uid=%s_g2_v_fit.png'%uid_ + self.flow_g2p = 'uid=%s_g2_p_fit.png'%uid_ + self.flow_g2v_rate_fit = 'uid=%s_vert_Q_Rate_fit.png'%uid_ + self.flow_g2p_rate_fit = 'uid=%s_para_Q_Rate_fit.png'%uid_ + + #self.report_header(page=1, top=730, new_page=False) + #self.report_meta(new_page=False) + + self.q2Iq_file = 'uid=%s_q2_iq.png'%uid + self.iq_invariant_file = 'uid=%s_iq_invariant.png'%uid + + def report_invariant( self, top= 300, new_page=False): + '''create the invariant analysis report + two images: + ROI on average intensity image + ROI on circular average + ''' + uid=self.uid + c= self.c + #add sub-title, static images + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 230 + self.sub_title_num +=1 + c.drawString(10, top, "%s. I(q) Invariant Analysis"%self.sub_title_num ) #add title + #add q2Iq + c.setFont("Helvetica", 14) + imgf = self.q2Iq_file + #print( imgf ) + label = 'q^2*I(q)' + add_image_string( c, imgf, self.data_dir, img_left= 60, img_top=top - ds*1.15, img_height=180, + str1_left=110, str1_top = top-35,str1=label, + str2_left = 60, str2_top = top -320 ) + + #add iq_invariant + imgf = self.iq_invariant_file + img_height= 180 + img_left,img_top =320, top - ds*1.15 + str1_left, str1_top,str1= 420, top- 35, 'I(q) Invariant' + str2_left, str2_top = 350, top- 320 + + #print ( imgf ) + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + if new_page: + c.showPage() + c.save() + + + + def report_header(self, page=1, new_page=False): + '''create headers, including title/page number''' + c= self.c + CurTime = self.CurTime + uid=self.uid + user=self.user + c.setFillColor(black) + c.setFont("Helvetica", 14) + #add page number + c.drawString(250, 10, "Page--%s--"%( page ) ) + #add time stamp + + #c.drawString(350, 10, "Created at %s@CHX-by-%s"%( CurTime,user ) ) + s_ = "Created at %s@CHX-By-%s"%( CurTime,user ) + add_one_line_string( c, s_, 10, left=350,fontsize = 11 ) + + #add title + #c.setFont("Helvetica", 22) + title = "XPCS Analysis Report for uid=%s"%uid + c.setFont("Helvetica", 1000/( len(title) ) ) + #c.drawString(180,760, "XPCS Report of uid=%s"%uid ) #add title + c.drawString(50,760, "XPCS Analysis Report for uid=%s"%uid ) #add title + #add a line under title + c.setStrokeColor( red ) + c.setLineWidth(width=1.5) + c.line( 50, 750, 550, 750 ) + if new_page: + c.showPage() + c.save() + + + def report_meta(self, top=740, new_page=False): + '''create the meta data report, + the meta data include: + uid + Sample: + Measurement + Wavelength + Detector-Sample Distance + Beam Center + Mask file + Data dir + Pipeline notebook + ''' + + c=self.c + #load metadata + md = self.md + try: + uid = md['uid'] + except: + uid=self.uid + #add sub-title, metadata + c.setFont("Helvetica", 20) + ds = 15 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Metadata"%self.sub_title_num ) #add title + top = top - 5 + fontsize = 11 + c.setFont("Helvetica", fontsize) + + nec_keys = [ 'sample', 'start_time', 'stop_time','Measurement' ,'exposure time' ,'incident_wavelength', 'cam_acquire_t', + 'frame_time','detector_distance', 'feedback_x', 'feedback_y', 'shutter mode', + 'beam_center_x', 'beam_center_y', 'beam_refl_center_x', 'beam_refl_center_y','mask_file','bad_frame_list', 'transmission', 'roi_mask_file'] + for key in nec_keys: + check_dict_keys(md, key) + + try:#try exp time from detector + exposuretime= md['count_time'] #exposure time in sec + except: + exposuretime= md['cam_acquire_time'] #exposure time in sec + + try:#try acq time from detector + acquisition_period = md['frame_time'] + except: + try: + acquisition_period = md['acquire period'] + except: + uid = md['uid'] + acquisition_period = float( db[uid]['start']['acquire period'] ) + + + s = [] + s.append( 'UID: %s'%uid ) ###line 1, for uid + s.append('Sample: %s'%md['sample'] ) ####line 2 sample + s.append('Data Acquisition From: %s To: %s'%(md['start_time'], md['stop_time']))####line 3 Data Acquisition time + s.append( 'Measurement: %s'%md['Measurement'] ) ####line 4 'Measurement + + #print( md['incident_wavelength'], int(md['number of images']), md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) + #print(acquisition_period) + s.append( 'Wavelength: %s A | Num of Image: %d | Exposure time: %s ms | Acquire period: %s ms'%( md['incident_wavelength'], int(md['number of images']),round(float(exposuretime)*1000,4), round(float( acquisition_period )*1000,4) ) ) ####line 5 'lamda... + + s.append( 'Detector-Sample Distance: %s m| FeedBack Mode: x -> %s & y -> %s| Shutter Mode: %s'%( + md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) ) ####line 6 'Detector-Sample Distance.. + if self.report_type == 'saxs': + s7= 'Beam Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) + elif self.report_type == 'gi_saxs': + s7= ('Incident Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) + + ' || ' + + 'Reflect Center: [%s, %s] (pixel)'%(md['beam_refl_center_x'], md['beam_refl_center_y']) ) + elif self.report_type == 'ang_saxs' or self.report_type == 'gi_waxs' : + s7= 'Beam Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) + else: + s7 = '' + + s7 += ' || ' + 'BadLen: %s'%len(md['bad_frame_list']) + s7 += ' || ' + 'Transmission: %s'%md['transmission'] + s.append( s7 ) ####line 7 'Beam center... + m = 'Mask file: %s'%md['mask_file'] + ' || ' + 'ROI mask file: %s'%md['roi_mask_file'] + #s.append( 'Mask file: %s'%md['mask_file'] ) ####line 8 mask filename + #s.append( ) ####line 8 mask filename + s.append(m) + + if self.res_h5_filename is not None: + self.data_dir_ = self.data_dir + self.res_h5_filename + else: + self.data_dir_ = self.data_dir + s.append( 'Analysis Results Dir: %s'%self.data_dir_ ) ####line 9 results folder + + + s.append( 'Metadata Dir: %s.csv-&.pkl'%self.metafile ) ####line 10 metadata folder + try: + s.append( 'Pipeline notebook: %s'%md['NOTEBOOK_FULL_PATH'] ) ####line 11 notebook folder + except: + pass + #print( 'here' ) + line =1 + for s_ in s: + add_one_line_string( c, s_, top -ds*line , left=30,fontsize = fontsize ) + line += 1 + + if new_page: + c.showPage() + c.save() + + def report_static( self, top=560, new_page=False, iq_fit=False): + '''create the static analysis report + two images: + average intensity image + circular average + + ''' + #add sub-title, static images + + c= self.c + c.setFont("Helvetica", 20) + uid=self.uid + + ds = 220 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Static Analysis"%self.sub_title_num ) #add title + + #add average image + c.setFont("Helvetica", 14) + + imgf = self.avg_img_file + + if self.report_type == 'saxs': + ipos = 60 + dshift=0 + elif self.report_type == 'gi_saxs': + ipos = 200 + dshift= 140 + elif self.report_type == 'ang_saxs': + ipos = 200 + dshift= 140 + else: + ipos = 200 + dshift= 140 + + + add_image_string( c, imgf, self.data_dir, img_left= ipos, img_top=top-ds, img_height=180, + str1_left=90 + dshift, str1_top = top-35,str1='Average Intensity Image', + str2_left = 80 + dshift, str2_top = top -230 ) + + #add q_Iq + if self.report_type == 'saxs': + imgf = self.qiq_file + #print(imgf) + if iq_fit: + imgf = self.qiq_fit_file + label = 'Circular Average' + lab_pos = 390 + fn_pos = 320 + add_image_string( c, imgf, self.data_dir, img_left=320, img_top=top-ds, img_height=180, + str1_left=lab_pos, str1_top = top-35,str1=label, + str2_left = fn_pos, str2_top = top -230 ) + else: + if False: + imgf = self.ROI_on_Iq_file #self.qr_1d_file + label = 'Qr-1D' + lab_pos = 420 + fn_pos = 350 + + add_image_string( c, imgf, self.data_dir, img_left=320, img_top=top-ds, img_height=180, + str1_left=lab_pos, str1_top = top-35,str1=label, + str2_left = fn_pos, str2_top = top -230 ) + if new_page: + c.showPage() + c.save() + + def report_ROI( self, top= 300, new_page=False): + '''create the static analysis report + two images: + ROI on average intensity image + ROI on circular average + ''' + uid=self.uid + c= self.c + #add sub-title, static images + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 230 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Define of ROI"%self.sub_title_num ) #add title + #add ROI on image + c.setFont("Helvetica", 14) + imgf = self.ROI_on_img_file + label = 'ROI on Image' + add_image_string( c, imgf, self.data_dir, img_left= 60, img_top=top - ds*1.15, img_height=240, + str1_left=110, str1_top = top-35,str1=label, + str2_left = 60, str2_top = top -260 ) + + #add q_Iq + if self.report_type == 'saxs' or self.report_type == 'gi_saxs' or self.report_type == 'ang_saxs': + imgf = self.ROI_on_Iq_file + img_height=180 + img_left,img_top =320, top - ds + str1_left, str1_top,str1= 420, top- 35, 'ROI on Iq' + str2_left, str2_top = 350, top- 260 + + #print ( imgf ) + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + if new_page: + c.showPage() + c.save() + + + def report_time_analysis( self, top= 720,new_page=False): + '''create the time dependent analysis report + four images: + each image total intensity as a function of time + iq~t + waterfall + mean intensity of each ROI as a function of time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + top1=top + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Time Dependent Plot"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + + + top = top1 - 160 + + #add img_sum_t + if self.report_type == 'saxs': + ipos = 80 + elif self.report_type == 'gi_saxs': + ipos = 200 + elif self.report_type == 'ang_saxs': + ipos = 200 + else: + ipos = 200 + + imgf = self.img_sum_t_file + img_height=140 + img_left,img_top = ipos, top + str1_left, str1_top,str1= ipos + 60, top1 - 20 , 'img sum ~ t' + str2_left, str2_top = ipos, top- 5 + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + #plot iq~t + if self.report_type == 'saxs': + imgf = self.Iq_t_file + image = self.data_dir + imgf + + + img_height=140 + img_left,img_top = 350, top + str1_left, str1_top,str1= 420, top1-20 , 'iq ~ t' + str2_left, str2_top = 360, top- 5 + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + elif self.report_type == 'gi_saxs': + pass + + top = top1 - 340 + #add waterfall plot + imgf = self.wat_file + + img_height=160 + img_left,img_top = 80, top + str1_left, str1_top,str1= 140, top + img_height, 'waterfall plot' + str2_left, str2_top = 80, top- 5 + + if self.report_type != 'ang_saxs': + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + else: + pass + + #add mean-intensity of each roi + imgf = self.Mean_inten_t_file + + img_height=160 + img_left,img_top = 360, top + str1_left, str1_top,str1= 330, top + img_height, 'Mean-intensity-of-each-ROI' + str2_left, str2_top = 310, top- 5 + if self.report_type != 'ang_saxs': + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + else: + pass + + if new_page: + c.showPage() + c.save() + + def report_oavs( self, top= 350, oavs_file=None, new_page=False): + '''create the oavs images report + + ''' + + c= self.c + uid=self.uid + #add sub-title, One Time Correlation Function + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. OAVS Images"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + #add g2 plot + if oavs_file is None: + imgf = self.oavs_file + else: + imgf = oavs_file + #print(self.data_dir + imgf) + + if os.path.exists(self.data_dir + imgf): + im = Image.open( self.data_dir+imgf ) + ratio = float(im.size[1])/im.size[0] + img_width = 600 + img_height= img_width * ratio #img_height + #width = height/ratio + + if not new_page: + #img_height= 550 + top = top - 600 + str2_left, str2_top = 80, top - 400 + img_left,img_top = 1, top + + if new_page: + #img_height= 150 + top = top - img_height - 50 + str2_left, str2_top = 80, top - 50 + img_left,img_top = 10, top + + str1_left, str1_top, str1= 150, top + img_height, 'OAVS images' + img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top, return_=True ) + #print( imgf,self.data_dir ) + print(img_width, img_height) + + + + def report_one_time( self, top= 350, g2_fit_file=None, q_rate_file=None, new_page=False): + '''create the one time correlation function report + Two images: + One Time Correlation Function with fit + q-rate fit + ''' + + c= self.c + uid=self.uid + #add sub-title, One Time Correlation Function + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. One Time Correlation Function"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + #add g2 plot + if g2_fit_file is None: + imgf = self.g2_fit_file + else: + imgf = g2_fit_file + + if self.report_type != 'ang_saxs': + img_height= 300 + top = top - 320 + str2_left, str2_top = 80, top- 0 + + else: + img_height= 550 + top = top - 600 + str2_left, str2_top = 80, top - 400 + #add one_time caculation + img_left,img_top = 1, top + if self.g2_fit_new_page or self.g2_new_page: + + img_height= 550 + top = top - 250 + str2_left, str2_top = 80, top - 0 + img_left,img_top = 60, top + + str1_left, str1_top,str1= 150, top + img_height, 'g2 fit plot' + img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top, return_=True ) + #print( imgf,self.data_dir ) + #add g2 plot fit + #print(self.q_rate_file ) + if os.path.isfile( self.data_dir + self.q_rate_file ): + #print('here') + #print(self.q_rate_file ) + top = top + 70 # + if q_rate_file is None: + imgf = self.q_rate_file + else: + imgf = q_rate_file + if self.report_type != 'ang_saxs': + #print(img_width) + if img_width > 400: + img_height = 90 + else: + img_height= 180 + img_left,img_top = img_width-10, top #350, top + str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot' + else: + img_height= 300 + img_left,img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 180, 'q-rate fit plot' + if self.g2_fit_new_page or self.g2_new_page: + top = top - 200 + img_height= 180 + img_left,img_top = 350, top + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot' + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + else: + top = top + 320 # + if q_rate_file is None: + imgf = self.q_rate_loglog_file + else: + imgf = q_rate_file + #print(imgf) + if self.report_type != 'ang_saxs': + #print(img_width) + if img_width > 400: + img_height = 90/2 + else: + img_height= 180 /2 + img_left,img_top = img_width-10, top #350, top + str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'q-rate loglog plot' + else: + img_height= 300/2 + img_left,img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 180, 'q-rate loglog plot' + if self.g2_fit_new_page or self.g2_new_page: + top = top - 200 + 50 + img_height= 180 / 1.5 + img_left,img_top = 350, top + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 120, 'q-rate loglog plot' + + #print('here') + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + top = top - 100 # + if q_rate_file is None: + imgf = self.g2_q_fitpara_file + else: + imgf = q_rate_file + if self.report_type != 'ang_saxs': + #print(img_width) + if img_width > 400: + img_height = 90 + else: + img_height= 180 + img_left,img_top = img_width-10, top #350, top + str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'g2 fit para' + else: + img_height= 300 + img_left,img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 180, 'g2 fit para' + if self.g2_fit_new_page or self.g2_new_page: + top = top - 200 + img_height= 180 * 1.5 + img_left,img_top = 350, top + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 280, 'g2 fit para' + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + if new_page: + c.showPage() + c.save() + + + + def report_mulit_one_time( self, top= 720,new_page=False): + '''create the mulit one time correlation function report + Two images: + One Time Correlation Function with fit + q-rate fit + ''' + c= self.c + uid=self.uid + #add sub-title, One Time Correlation Function + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. One Time Correlation Function"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + #add g2 plot + top = top - 320 + + imgf = self.g2_fit_file + image = self.data_dir + imgf + if not os.path.exists(image): + image = self.data_dir + self.g2_file + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= 300 + c.drawImage( image, 1, top, width= height/ratio,height=height, mask= 'auto') + #c.drawImage( image, 1, top, width= height/ratio,height=height, mask= None ) + c.setFont("Helvetica", 16) + c.setFillColor( blue) + c.drawString( 150, top + height , 'g2 fit plot' ) + + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString( 80, top- 0, 'filename: %s'%imgf ) + + #add g2 plot fit + top = top + 70 # + imgf = self.q_rate_file + image = self.data_dir + imgf + if os.path.exists(image): + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= 180 + c.drawImage( image, 350, top, width= height/ratio,height=height,mask= 'auto') + + c.setFont("Helvetica", 16) + c.setFillColor( blue) + c.drawString( 450, top + 230, 'q-rate fit plot' ) + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString( 380, top- 5, 'filename: %s'%imgf ) + + if new_page: + c.showPage() + c.save() + + + + def report_two_time( self, top= 720, new_page=False): + '''create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Two Time Correlation Function"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + + top1=top + top = top1 - 330 + #add q_Iq_t + imgf = self.two_time_file + + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 180, top + 300, 'two time correlation function' + str2_left, str2_top = 180, top - 10 + img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top, return_=True ) + + + + top = top - 340 + #add q_Iq_t + imgf = self.two_g2_file + + if True:#not self.two_g2_new_page: + + img_height= 300 + img_left,img_top = 100 -70, top + str1_left, str1_top,str1= 210-70, top + 310, 'compared g2' + str2_left, str2_top = 180-70, top - 10 + + if self.two_g2_new_page: + img_left,img_top = 100, top + print(imgf ) + img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top,return_=True ) + #print(imgf) + top = top + 50 + imgf = self.q_rate_two_time_fit_file + #print(imgf, img_width, top) + if img_width < 400: + img_height= 140 + img_left,img_top = 350, top + 30 + str2_left, str2_top = 380 - 80, top - 5 + str1_left, str1_top,str1= 450 -80 , top + 230, 'q-rate fit from two-time' + + else: + img_height = 90 + img_left,img_top = img_width-10, top #350, top + str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot' + + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + + + + if new_page: + c.showPage() + c.save() + + def report_four_time( self, top= 720, new_page=False): + '''create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Four Time Correlation Function"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + + top1=top + top = top1 - 330 + #add q_Iq_t + imgf = self.four_time_file + + if not self.g4_new_page: + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 180, top + 300, 'four time correlation function' + str2_left, str2_top = 180, top - 10 + else: + img_height= 600 + top -= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 180, top + 300-250, 'four time correlation function' + str2_left, str2_top = 180, top - 10 + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + if new_page: + c.showPage() + c.save() + + def report_dose( self, top= 720, new_page=False): + + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Dose Analysis"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + + top1=top + top = top1 - 530 + #add q_Iq_t + imgf = self.dose_file + + img_height= 500 + img_left,img_top = 80, top + str1_left, str1_top,str1= 180, top + 500, 'dose analysis' + str2_left, str2_top = 180, top - 10 + + #print( self.data_dir + self.dose_file) + if os.path.exists( self.data_dir + imgf): + #print( self.dose_file) + im = Image.open( self.data_dir + imgf ) + ratio = float(im.size[1])/im.size[0] + width = img_height/ratio + #print(width) + if width >450: + img_height = 450*ratio + + if self.dose_file_new_page: + #img_left,img_top = 180, top + img_left,img_top = 100, top + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + if new_page: + c.showPage() + c.save() + + + + def report_flow_pv_g2( self, top= 720, new_page=False): + '''create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Flow One Time Analysis"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + + top1=top + top = top1 - 330 + #add xsvs fit + + imgf = self.flow_g2v + image = self.data_dir + imgf + + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'XPCS Vertical Flow' + str2_left, str2_top = 180, top - 10 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + imgf = self.flow_g2v_rate_fit + img_height= 200 + img_left,img_top = 350, top +50 + str1_left, str1_top,str1= 210, top + 300, '' + str2_left, str2_top = 350, top - 10 + 50 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + + top = top - 340 + #add contrast fit + imgf = self.flow_g2p + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'XPCS Parallel Flow' + str2_left, str2_top = 180, top - 10 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + imgf = self.flow_g2p_rate_fit + img_height= 200 + img_left,img_top = 350, top +50 + str1_left, str1_top,str1= 210, top + 300, '' + str2_left, str2_top = 350, top - 10 + 50 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + if new_page: + c.showPage() + c.save() + + + def report_flow_pv_two_time( self, top= 720, new_page=False): + '''create the two time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Flow One &Two Time Comparison"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + + top1=top + top = top1 - 330 + #add xsvs fit + + + if False: + imgf = self.two_time + image = self.data_dir + imgf + + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'Two_time' + str2_left, str2_top = 180, top - 10 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + imgf = self.flow_g2_g2b_p + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'XPCS Vertical Flow by two-time' + str2_left, str2_top = 180, top - 10 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + imgf = self.flow_g2bp_rate_fit + img_height= 200 + img_left,img_top = 350, top +50 + str1_left, str1_top,str1= 210, top + 300, '' + str2_left, str2_top = 350, top - 10 + 50 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + + top = top - 340 + #add contrast fit + imgf = self.flow_g2_g2b_v + + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'XPCS Parallel Flow by two-time' + str2_left, str2_top = 180, top - 10 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + imgf = self.flow_g2bv_rate_fit + img_height= 200 + img_left,img_top = 350, top +50 + str1_left, str1_top,str1= 210, top + 300, '' + str2_left, str2_top = 350, top - 10 + 50 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + if new_page: + c.showPage() + c.save() + + def report_xsvs( self, top= 720, new_page=False): + '''create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Visibility Analysis"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + top = top - 330 + #add xsvs fit + imgf = self.xsvs_fit_file + add_image_string( c, imgf, self.data_dir, img_left=100, img_top=top, img_height= 300, + + str1_left=210, str1_top = top +300,str1='XSVS_Fit_by_Negtive_Binomal Function', + str2_left = 180, str2_top = top -10 ) + + #add contrast fit + top = top -340 + imgf = self.contrast_file + add_image_string( c, imgf, self.data_dir, img_left=100, img_top=top, img_height= 300, + + str1_left=210, str1_top = top + 310,str1='contrast get from xsvs and xpcs', + str2_left = 180, str2_top = top -10 ) + + if False: + top1=top + top = top1 - 330 + #add xsvs fit + imgf = self.xsvs_fit_file + image = self.data_dir + imgf + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= 300 + c.drawImage( image, 100, top, width= height/ratio,height=height,mask=None) + c.setFont("Helvetica", 16) + c.setFillColor( blue) + c.drawString( 210, top + 300 , 'XSVS_Fit_by_Negtive_Binomal Function' ) + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString( 180, top- 10, 'filename: %s'%imgf ) + top = top - 340 + #add contrast fit + imgf = self.contrast_file + image = self.data_dir + imgf + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= 300 + c.drawImage( image, 100, top, width= height/ratio,height=height,mask=None) + + c.setFont("Helvetica", 16) + c.setFillColor( blue) + c.drawString( 210, top + 310, 'contrast get from xsvs and xpcs' ) + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString( 180, top- 10, 'filename: %s'%imgf ) + + + if new_page: + c.showPage() + c.save() + + + + + def new_page(self): + c=self.c + c.showPage() + + def save_page(self): + c=self.c + c.save() + + def done(self): + out_dir = self.out_dir + uid=self.uid + + print() + print('*'*40) + print ('The pdf report is created with filename as: %s'%(self.filename )) + print('*'*40) + + + + +def create_multi_pdf_reports_for_uids( uids, g2, data_dir, report_type='saxs', append_name='' ): + ''' Aug 16, YG@CHX-NSLS-II + Create multi pdf reports for each uid in uids + uids: a list of uids to be reported + g2: a dictionary, {run_num: sub_num: g2_of_each_uid} + data_dir: + Save pdf report in data dir + ''' + for key in list( g2.keys()): + i=1 + for sub_key in list( g2[key].keys() ): + uid_i = uids[key][sub_key] + data_dir_ = os.path.join( data_dir, '%s/'%uid_i ) + if append_name!='': + uid_name = uid_i + append_name + else: + uid_name = uid_i + c= create_pdf_report( data_dir_, uid_i,data_dir, + report_type=report_type, filename="XPCS_Analysis_Report_for_uid=%s.pdf"%uid_name ) + #Page one: Meta-data/Iq-Q/ROI + c.report_header(page=1) + c.report_meta( top=730) + #c.report_one_time( top= 500 ) + #c.new_page() + if report_type =='flow': + c.report_flow_pv_g2( top= 720) + c.save_page() + c.done() + + + + + +def create_one_pdf_reports_for_uids( uids, g2, data_dir, filename='all_in_one', report_type='saxs' ): + ''' Aug 16, YG@CHX-NSLS-II + Create one pdf reports for each uid in uids + uids: a list of uids to be reported + g2: a dictionary, {run_num: sub_num: g2_of_each_uid} + data_dir: + Save pdf report in data dir + ''' + c= create_pdf_report( data_dir, uid=filename, out_dir=data_dir, load=False, report_type= report_type) + page=1 + + for key in list( g2.keys()): + i=1 + for sub_key in list( g2[key].keys() ): + uid_i = uids[key][sub_key] + data_dir_ = os.path.join( data_dir, '%s/'%uid_i) + + c.uid = uid_i + c.data_dir = data_dir_ + c.load_metadata() + + #Page one: Meta-data/Iq-Q/ROI + c.report_header(page=page) + c.report_meta( top=730) + c.report_one_time( top= 500 ) + c.new_page() + page += 1 + c.uid = filename + c.save_page() + c.done() + + +def save_res_h5( full_uid, data_dir, save_two_time=False ): + ''' + YG. Nov 10, 2016 + save the results to a h5 file + will save meta data/avg_img/mask/roi (ring_mask or box_mask)/ + will aslo save multi-tau calculated one-time correlation function g2/taus + will also save two-time derived one-time correlation function /g2b/taus2 + if save_two_time if True, will save two-time correaltion function + ''' + with h5py.File(data_dir + '%s.h5'%full_uid, 'w') as hf: + #write meta data + meta_data = hf.create_dataset("meta_data", (1,), dtype='i') + for key in md.keys(): + try: + meta_data.attrs[key] = md[key] + except: + pass + + shapes = md['avg_img'].shape + avg_h5 = hf.create_dataset("avg_img", data = md['avg_img'] ) + mask_h5 = hf.create_dataset("mask", data = md['mask'] ) + roi_h5 = hf.create_dataset("roi", data = md['ring_mask'] ) + + g2_h5 = hf.create_dataset("g2", data = g2 ) + taus_h5 = hf.create_dataset("taus", data = taus ) + + if save_two_time: + g12b_h5 = hf.create_dataset("g12b", data = g12b ) + g2b_h5 = hf.create_dataset("g2b", data = g2b ) + taus2_h5 = hf.create_dataset("taus2", data = taus2 ) + +def printname(name): + print (name) +#f.visit(printname) +def load_res_h5( full_uid, data_dir ): + '''YG. Nov 10, 2016 + load results from a h5 file + will load meta data/avg_img/mask/roi (ring_mask or box_mask)/ + will aslo load multi-tau calculated one-time correlation function g2/taus + will also load two-time derived one-time correlation function /g2b/taus2 + if save_two_time if True, will load two-time correaltion function + + ''' + with h5py.File(data_dir + '%s.h5'%full_uid, 'r') as hf: + meta_data_h5 = hf.get( "meta_data" ) + meta_data = {} + for att in meta_data_h5.attrs: + meta_data[att] = meta_data_h5.attrs[att] + avg_h5 = np.array( hf.get("avg_img" ) ) + mask_h5 = np.array(hf.get("mask" )) + roi_h5 =np.array( hf.get("roi" )) + g2_h5 = np.array( hf.get("g2" )) + taus_h5 = np.array( hf.get("taus" )) + g2b_h5 = np.array( hf.get("g2b")) + taus2_h5 = np.array( hf.get("taus2")) + if 'g12b' in hf: + g12b_h5 = np.array( hf.get("g12b")) + + if 'g12b' in hf: + return meta_data, avg_h5, mask_h5,roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5, g12b + else: + return meta_data, avg_h5, mask_h5,roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5 + + + + +def make_pdf_report( data_dir, uid, pdf_out_dir, pdf_filename, username, + run_fit_form, run_one_time, run_two_time, run_four_time, run_xsvs, run_dose=None, + oavs_report = False,report_type='saxs', md=None,report_invariant=False, return_class=False, res_h5_filename=None + ): + + if uid.startswith("uid=") or uid.startswith("Uid="): + uid = uid[4:] + c= create_pdf_report( data_dir, uid, pdf_out_dir, filename= pdf_filename, user= username, report_type=report_type, md = md, res_h5_filename=res_h5_filename ) + #print( c.md) + #Page one: Meta-data/Iq-Q/ROI + c.report_header(page=1) + c.report_meta( top=730) + c.report_static( top=540, iq_fit = run_fit_form ) + c.report_ROI( top= 290) + page = 1 + ##Page Two for plot OVAS images if oavs_report is True + if oavs_report: + c.new_page() + c.report_header(page=2) + c.report_oavs( top= 720, oavs_file=None, new_page=True) + page +=1 + + #Page Two: img~t/iq~t/waterfall/mean~t/g2/rate~q + c.new_page() + page +=1 + c.report_header(page=page) + + if c.report_type != 'ang_saxs': + c.report_time_analysis( top= 720) + if run_one_time: + if c.report_type != 'ang_saxs': + top = 350 + else: + top = 500 + if c.g2_fit_new_page: + c.new_page() + page +=1 + top = 720 + c.report_one_time( top= top ) + + + #self.two_g2_new_page = True + #self.g2_fit_new_page = True + + #Page Three: two-time/two g2 + + if run_two_time: + c.new_page() + page +=1 + c.report_header(page= page) + c.report_two_time( top= 720 ) + + if run_four_time: + c.new_page() + page +=1 + c.report_header(page= page) + c.report_four_time( top= 720 ) + + if run_xsvs: + c.new_page() + page +=1 + c.report_header(page= page) + c.report_xsvs( top= 720 ) + if run_dose: + c.new_page() + page +=1 + c.report_header(page= page) + c.report_dose( top = 702) + if report_invariant: + c.new_page() + page +=1 + c.report_header(page= page) + c.report_invariant( top = 702) + + else: + c.report_flow_pv_g2( top= 720, new_page= True) + c.report_flow_pv_two_time( top= 720, new_page= True ) + + c.save_page() + c.done() + if return_class: + return c + + +###################################### +###Deal with saving dict to hdf5 file +def save_dict_to_hdf5(dic, filename): + """ + .... + """ + with h5py.File(filename, 'w') as h5file: + recursively_save_dict_contents_to_group(h5file, '/', dic) + +def load_dict_from_hdf5(filename): + """ + .... + """ + with h5py.File(filename, 'r') as h5file: + return recursively_load_dict_contents_from_group(h5file, '/') + +def recursively_save_dict_contents_to_group( h5file, path, dic): + """...""" + # argument type checking + if not isinstance(dic, dict): + raise ValueError("must provide a dictionary") + + if not isinstance(path, str): + raise ValueError("path must be a string") + if not isinstance(h5file, h5py._hl.files.File): + raise ValueError("must be an open h5py file") + # save items to the hdf5 file + for key, item in dic.items(): + #print(key,item) + key = str(key) + if isinstance(item, list): + item = np.array(item) + #print(item) + if not isinstance(key, str): + raise ValueError("dict keys must be strings to save to hdf5") + # save strings, numpy.int64, and numpy.float64 types + if isinstance(item, (np.int64, np.float64, str, float, np.float32,int)): # removed depreciated np.float LW @06/11/2023 + #print( 'here' ) + h5file[path + key] = item + if not h5file[path + key].value == item: + raise ValueError('The data representation in the HDF5 file does not match the original dict.') + # save numpy arrays + elif isinstance(item, np.ndarray): + try: + h5file[path + key] = item + except: + item = np.array(item).astype('|S9') + h5file[path + key] = item + if not np.array_equal(h5file[path + key].value, item): + raise ValueError('The data representation in the HDF5 file does not match the original dict.') + # save dictionaries + elif isinstance(item, dict): + recursively_save_dict_contents_to_group(h5file, path + key + '/', item) + # other types cannot be saved and will result in an error + else: + #print(item) + raise ValueError('Cannot save %s type.' % type(item)) + + +def recursively_load_dict_contents_from_group( h5file, path): + """...""" + ans = {} + for key, item in h5file[path].items(): + if isinstance(item, h5py._hl.dataset.Dataset): + ans[key] = item.value + elif isinstance(item, h5py._hl.group.Group): + ans[key] = recursively_load_dict_contents_from_group(h5file, path + key + '/') + return ans + + +def export_xpcs_results_to_h5( filename, export_dir, export_dict ): + ''' + YG. May 10, 2017 + save the results to a h5 file + + YG. Aug28 2019 modify, add try in export pandas to h5 to fit the new version of pandas + + filename: the h5 file name + export_dir: the exported file folder + export_dict: dict, with keys as md, g2, g4 et.al. + ''' + + fout = export_dir + filename + dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p'] + dict_nest=['taus_uids', 'g2_uids' ] + + with h5py.File(fout, 'w') as hf: + flag=False + for key in list(export_dict.keys()): + #print( key ) + if key in dicts: #=='md' or key == 'qval_dict': + md= export_dict[key] + meta_data = hf.create_dataset( key, (1,), dtype='i') + for key_ in md.keys(): + try: + meta_data.attrs[str(key_)] = md[key_] + except: + pass + elif key in dict_nest: + #print(key) + try: + recursively_save_dict_contents_to_group(hf, '/%s/'%key, export_dict[key] ) + except: + print("Can't export the key: %s in this dataset."%key) + + elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + try: + export_dict[key].to_hdf( fout, key=key, mode='a', ) + except: + flag=True + else: + data = hf.create_dataset(key, data = export_dict[key] ) + #add this fill line at Octo 27, 2017 + data.set_fill_value = np.nan + if flag: + for key in list(export_dict.keys()): + if key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + export_dict[key].to_hdf( fout, key=key, mode='a', ) + + print( 'The xpcs analysis results are exported to %s with filename as %s'%(export_dir , filename)) + + + +def extract_xpcs_results_from_h5_debug( filename, import_dir, onekey=None, exclude_keys=None ): + ''' + YG. Dec 22, 2016 + extract data from a h5 file + + filename: the h5 file name + import_dir: the imported file folder + onekey: string, if not None, only extract that key + return: + extact_dict: dict, with keys as md, g2, g4 et.al. + ''' + + import pandas as pds + import numpy as np + extract_dict = {} + fp = import_dir + filename + pds_type_keys = [] + dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p', 'taus_uids', 'g2_uids'] + if exclude_keys is None: + exclude_keys =[] + if onekey is None: + for k in dicts: + extract_dict[k] = {} + with h5py.File( fp, 'r') as hf: + #print (list( hf.keys()) ) + for key in list( hf.keys()): + if key not in exclude_keys: + if key in dicts: + extract_dict[key] = recursively_load_dict_contents_from_group(hf, '/' + key + '/') + elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + pds_type_keys.append( key ) + else: + extract_dict[key] = np.array( hf.get( key )) + for key in pds_type_keys: + if key not in exclude_keys: + extract_dict[key] = pds.read_hdf(fp, key= key ) + else: + if onekey == 'md': + with h5py.File( fp, 'r') as hf: + md = hf.get('md') + for key in list(md.attrs): + extract_dict['md'][key] = md.attrs[key] + elif onekey in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + extract_dict[onekey] = pds.read_hdf(fp, key= onekey ) + else: + try: + with h5py.File( fp, 'r') as hf: + extract_dict[onekey] = np.array( hf.get( onekey )) + except: + print("The %s dosen't have this %s value"%(fp, onekey) ) + return extract_dict + + + + + + + + +def export_xpcs_results_to_h5_old( filename, export_dir, export_dict ): + ''' + YG. Dec 22, 2016 + save the results to a h5 file + + filename: the h5 file name + export_dir: the exported file folder + export_dict: dict, with keys as md, g2, g4 et.al. + ''' + import h5py + fout = export_dir + filename + dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p'] #{k1: { }} + dict_nest= ['taus_uids', 'g2_uids'] #{k1: {k2:}} + with h5py.File(fout, 'w') as hf: + for key in list(export_dict.keys()): + #print( key ) + if key in dicts: #=='md' or key == 'qval_dict': + md= export_dict[key] + meta_data = hf.create_dataset( key, (1,), dtype='i') + for key_ in md.keys(): + try: + meta_data.attrs[str(key_)] = md[key_] + except: + pass + elif key in dict_nest: + k1 = export_dict[key] + v1 = hf.create_dataset( key, (1,), dtype='i') + for k2 in k1.keys(): + + v2 = hf.create_dataset( k1, (1,), dtype='i') + + + elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + export_dict[key].to_hdf( fout, key=key, mode='a', ) + else: + data = hf.create_dataset(key, data = export_dict[key] ) + print( 'The xpcs analysis results are exported to %s with filename as %s'%(export_dir , filename)) + + +def extract_xpcs_results_from_h5( filename, import_dir, onekey=None, exclude_keys=None, two_time_qindex = None ): + ''' + YG. Dec 22, 2016 + extract data from a h5 file + + filename: the h5 file name + import_dir: the imported file folder + onekey: string, if not None, only extract that key + return: + extact_dict: dict, with keys as md, g2, g4 et.al. + ''' + + import pandas as pds + import numpy as np + extract_dict = {} + fp = import_dir + filename + pds_type_keys = [] + dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p', 'taus_uids', 'g2_uids'] + if exclude_keys is None: + exclude_keys =[] + if onekey is None: + for k in dicts: + extract_dict[k] = {} + with h5py.File( fp, 'r') as hf: + #print (list( hf.keys()) ) + for key in list( hf.keys()): + if key not in exclude_keys: + if key in dicts: + md = hf.get(key) + for key_ in list(md.attrs): + #print(key, key_) + if key == 'qval_dict': + extract_dict[key][int(key_)] = md.attrs[key_] + else: + extract_dict[key][key_] = md.attrs[key_] + + elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + pds_type_keys.append( key ) + else: + if key == 'g12b': + if two_time_qindex is not None: + extract_dict[key] = hf.get( key )[:,:,two_time_qindex] + else: + extract_dict[key] = hf.get( key )[:] + else: + extract_dict[key] = hf.get( key )[:] #np.array( hf.get( key )) + + for key in pds_type_keys: + if key not in exclude_keys: + extract_dict[key] = pds.read_hdf(fp, key= key ) + else: + if onekey == 'md': + with h5py.File( fp, 'r') as hf: + md = hf.get('md') + for key in list(md.attrs): + extract_dict['md'][key] = md.attrs[key] + elif onekey in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + extract_dict[onekey] = pds.read_hdf(fp, key= onekey ) + else: + try: + with h5py.File( fp, 'r') as hf: + if key == 'g12b': + if two_time_qindex is not None: + extract_dict[key] = hf.get( key )[:,:,two_time_qindex] + else: + extract_dict[key] = hf.get( key )[:] + else: + extract_dict[key] = hf.get( key )[:] #np.array( hf.get( key )) + #extract_dict[onekey] = hf.get( key )[:] #np.array( hf.get( onekey )) + except: + print("The %s dosen't have this %s value"%(fp, onekey) ) + return extract_dict + + + + + +def read_contrast_from_multi_csv( uids, path, times=None, unit=20 ): + '''Y.G. 2016, Dec 23, load contrast from multi csv file''' + + N = len(uids) + if times is None: + times = np.array( [0] + [2**i for i in range(N)] )*unit + for i, uid in enumerate(uids): + fp = path + uid + '/uid=%s--contrast_factorL.csv'%uid + contri = pds.read_csv( fp ) + qs = np.array( contri[contri.columns[0]] ) + contri_ = np.array( contri[contri.columns[1]] ) + if i ==0: + contr = np.zeros( [ N, len(qs)]) + contr[i] = contri_ + #contr[0,:] = np.nan + return times, contr + +def read_contrast_from_multi_h5( uids, path, ): + '''Y.G. 2016, Dec 23, load contrast from multi h5 file''' + N = len(uids) + times_xsvs = np.zeros( N ) + for i, uid in enumerate(uids): + t = extract_xpcs_results_from_h5( filename= '%s_Res.h5'%uid, + import_dir = path + uid + '/' , onekey= 'times_xsvs') + times_xsvs[i] = t['times_xsvs'][0] + contri = extract_xpcs_results_from_h5( filename= '%s_Res.h5'%uid, + import_dir = path + uid + '/' , onekey= 'contrast_factorL') + if i ==0: + contr = np.zeros( [ N, contri['contrast_factorL'].shape[0] ]) + contr[i] = contri['contrast_factorL'][:,0] + return times_xsvs, contr + + + + + + diff --git a/pyCHX/backups/chx_compress_01242025.py b/pyCHX/backups/chx_compress_01242025.py new file mode 100644 index 0000000..b42d538 --- /dev/null +++ b/pyCHX/backups/chx_compress_01242025.py @@ -0,0 +1,1476 @@ +import gc +import os +import pickle as pkl +import shutil +import struct +import sys +from contextlib import closing +from glob import iglob +from multiprocessing import Pool + +import dill +import matplotlib.pyplot as plt + +# imports handler from CHX +# this is where the decision is made whether or not to use dask +# from chxtools.handlers import EigerImages, EigerHandler +from eiger_io.fs_handler import EigerHandler, EigerImages +from tqdm import tqdm + +from pyCHX.chx_generic_functions import ( + copy_data, + create_time_slice, + delete_data, + get_detector, + get_eigerImage_per_file, + get_sid_filenames, + load_data, + reverse_updown, + rot90_clockwise, +) +from pyCHX.chx_libs import RUN_GUI, LogNorm, datetime, db, getpass, np, os, roi, time + + +def run_dill_encoded(what): + fun, args = dill.loads(what) + return fun(*args) + + +def apply_async(pool, fun, args, callback=None): + return pool.apply_async(run_dill_encoded, (dill.dumps((fun, args)),), callback=callback) + + +def map_async(pool, fun, args): + return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),)) + + +def pass_FD(FD, n): + # FD.rdframe(n) + try: + FD.seekimg(n) + except: + pass + return False + + +def go_through_FD(FD): + if not pass_FD(FD, FD.beg): + for i in range(FD.beg, FD.end): + pass_FD(FD, i) + else: + pass + + +def compress_eigerdata( + images, + mask, + md, + filename=None, + force_compress=False, + bad_pixel_threshold=1e15, + bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, + nobytes=2, + bins=1, + bad_frame_list=None, + para_compress=False, + num_sub=100, + dtypes="uid", + reverse=True, + rot90=False, + num_max_para_process=500, + with_pickle=False, + direct_load_data=True, + data_path=None, + images_per_file=100, + copy_rawdata=True, + new_path="/tmp_data/data/", +): + """ + Init 2016, YG@CHX + DEV 2018, June, make images_per_file a dummy, will be determined by get_eigerImage_per_file if direct_load_data + Add copy_rawdata opt. + + """ + + end = len(images) // bins + if filename is None: + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % md["uid"] + if dtypes != "uid": + para_compress = False + else: + if para_compress: + images = "foo" + # para_compress= True + # print( dtypes ) + if direct_load_data: + images_per_file = get_eigerImage_per_file(data_path) + if data_path is None: + sud = get_sid_filenames(db[uid]) + data_path = sud[2][0] + if force_compress: + print("Create a new compress file with filename as :%s." % filename) + if para_compress: + # stop connection to be before forking... (let it reset again); 11/09/2024 this seems to fail with 'registry doesn't have attribute disconnect... -> try making this optional; this might have been a leftover: if compression happens "natuarally" (not as force_compress=True) this disconnect/reconnect is already missing...we definitely had this error before... + try: + db.reg.disconnect() + db.mds.reset_connection() + except: + pass + print("Using a multiprocess to compress the data.") + return para_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + num_sub=num_sub, + dtypes=dtypes, + rot90=rot90, + reverse=reverse, + num_max_para_process=num_max_para_process, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + copy_rawdata=copy_rawdata, + new_path=new_path, + ) + else: + return init_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + ) + else: + if not os.path.exists(filename): + print("Create a new compress file with filename as :%s." % filename) + if para_compress: + print("Using a multiprocess to compress the data.") + return para_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + num_sub=num_sub, + dtypes=dtypes, + reverse=reverse, + rot90=rot90, + num_max_para_process=num_max_para_process, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + copy_rawdata=copy_rawdata, + ) + else: + return init_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + ) + else: + print("Using already created compressed file with filename as :%s." % filename) + beg = 0 + return read_compressed_eigerdata( + mask, + filename, + beg, + end, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + bad_frame_list=bad_frame_list, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + ) + + +def read_compressed_eigerdata( + mask, + filename, + beg, + end, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + bad_frame_list=None, + with_pickle=False, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + Read already compress eiger data + Return + mask + avg_img + imsum + bad_frame_list + + """ + # should use try and except instead of with_pickle in the future! + CAL = False + if not with_pickle: + CAL = True + else: + try: + mask, avg_img, imgsum, bad_frame_list_ = pkl.load(open(filename + ".pkl", "rb")) + except: + CAL = True + if CAL: + FD = Multifile(filename, beg, end) + imgsum = np.zeros(FD.end - FD.beg, dtype=np.float64) + avg_img = np.zeros([FD.md["ncols"], FD.md["nrows"]], dtype=np.float64) + imgsum, bad_frame_list_ = get_each_frame_intensityc( + FD, + sampling=1, + bad_pixel_threshold=bad_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + hot_pixel_threshold=hot_pixel_threshold, + plot_=False, + bad_frame_list=bad_frame_list, + ) + avg_img = get_avg_imgc(FD, beg=None, end=None, sampling=1, plot_=False, bad_frame_list=bad_frame_list_) + FD.FID.close() + + return mask, avg_img, imgsum, bad_frame_list_ + + +def para_compress_eigerdata( + images, + mask, + md, + filename, + num_sub=100, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + dtypes="uid", + reverse=True, + rot90=False, + num_max_para_process=500, + cpu_core_number=72, + with_pickle=True, + direct_load_data=False, + data_path=None, + images_per_file=100, + copy_rawdata=True, + new_path="/tmp_data/data/", +): + + data_path_ = data_path + if dtypes == "uid": + uid = md["uid"] # images + if not direct_load_data: + detector = get_detector(db[uid]) + images_ = load_data(uid, detector, reverse=reverse, rot90=rot90) + else: + # print('Here for images_per_file: %s'%images_per_file) + # images_ = EigerImages( data_path, images_per_file=images_per_file) + # print('here') + if not copy_rawdata: + images_ = EigerImages(data_path, images_per_file, md) + else: + print("Due to a IO problem running on GPFS. The raw data will be copied to /tmp_data/Data.") + print("Copying...") + copy_data(data_path, new_path) + # print(data_path, new_path) + new_master_file = new_path + os.path.basename(data_path) + data_path_ = new_master_file + images_ = EigerImages(new_master_file, images_per_file, md) + # print(md) + if reverse: + images_ = reverse_updown(images_) # Why not np.flipud? + if rot90: + images_ = rot90_clockwise(images_) + + N = len(images_) + + else: + N = len(images) + N = int(np.ceil(N / bins)) + Nf = int(np.ceil(N / num_sub)) + if Nf > cpu_core_number: + print("The process number is larger than %s (XF11ID server core number)" % cpu_core_number) + num_sub_old = num_sub + num_sub = int(np.ceil(N / cpu_core_number)) + Nf = int(np.ceil(N / num_sub)) + print("The sub compressed file number was changed from %s to %s" % (num_sub_old, num_sub)) + create_compress_header(md, filename + "-header", nobytes, bins, rot90=rot90) + # print( 'done for header here') + # print(data_path_, images_per_file) + results = para_segment_compress_eigerdata( + images=images, + mask=mask, + md=md, + filename=filename, + num_sub=num_sub, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + dtypes=dtypes, + num_max_para_process=num_max_para_process, + reverse=reverse, + rot90=rot90, + direct_load_data=direct_load_data, + data_path=data_path_, + images_per_file=images_per_file, + ) + + res_ = [results[k].get() for k in list(sorted(results.keys()))] + imgsum = np.zeros(N) + bad_frame_list = np.zeros(N, dtype=bool) + good_count = 1 + for i in range(Nf): + mask_, avg_img_, imgsum_, bad_frame_list_ = res_[i] + imgsum[i * num_sub : (i + 1) * num_sub] = imgsum_ + bad_frame_list[i * num_sub : (i + 1) * num_sub] = bad_frame_list_ + if i == 0: + mask = mask_ + avg_img = np.zeros_like(avg_img_) + else: + mask *= mask_ + if not np.sum(np.isnan(avg_img_)): + avg_img += avg_img_ + good_count += 1 + + bad_frame_list = np.where(bad_frame_list)[0] + avg_img /= good_count + + if len(bad_frame_list): + print("Bad frame list are: %s" % bad_frame_list) + else: + print("No bad frames are involved.") + print("Combining the seperated compressed files together...") + combine_compressed(filename, Nf, del_old=True) + del results + del res_ + if with_pickle: + pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) + if copy_rawdata: + delete_data(data_path, new_path) + return mask, avg_img, imgsum, bad_frame_list + + +def combine_compressed(filename, Nf, del_old=True): + old_files = [filename + "-header"] + for i in range(Nf): + old_files.append(filename + "_temp-%i.tmp" % i) + combine_binary_files(filename, old_files, del_old) + + +def combine_binary_files(filename, old_files, del_old=False): + """Combine binary files together""" + fn_ = open(filename, "wb") + for ftemp in old_files: + shutil.copyfileobj(open(ftemp, "rb"), fn_) + if del_old: + os.remove(ftemp) + fn_.close() + + +def para_segment_compress_eigerdata( + images, + mask, + md, + filename, + num_sub=100, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + dtypes="images", + reverse=True, + rot90=False, + num_max_para_process=50, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + parallelly compressed eiger data without header, this function is for parallel compress + """ + if dtypes == "uid": + uid = md["uid"] # images + if not direct_load_data: + detector = get_detector(db[uid]) + images_ = load_data(uid, detector, reverse=reverse, rot90=rot90) + else: + images_ = EigerImages(data_path, images_per_file, md) + if reverse: + images_ = reverse_updown(images_) + if rot90: + images_ = rot90_clockwise(images_) + + N = len(images_) + + else: + N = len(images) + + # N = int( np.ceil( N/ bins ) ) + num_sub *= bins + if N % num_sub: + Nf = N // num_sub + 1 + print("The average image intensity would be slightly not correct, about 1% error.") + print("Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image") + else: + Nf = N // num_sub + print("It will create %i temporary files for parallel compression." % Nf) + + if Nf > num_max_para_process: + N_runs = np.int(np.ceil(Nf / float(num_max_para_process))) + print("The parallel run number: %s is larger than num_max_para_process: %s" % (Nf, num_max_para_process)) + else: + N_runs = 1 + result = {} + # print( mask_filename )# + '*'* 10 + 'here' ) + for nr in range(N_runs): + if (nr + 1) * num_max_para_process > Nf: + inputs = range(num_max_para_process * nr, Nf) + else: + inputs = range(num_max_para_process * nr, num_max_para_process * (nr + 1)) + fns = [filename + "_temp-%i.tmp" % i for i in inputs] + # print( nr, inputs, ) + pool = Pool(processes=len(inputs)) # , maxtasksperchild=1000 ) + # print( inputs ) + for i in inputs: + if i * num_sub <= N: + result[i] = pool.apply_async( + segment_compress_eigerdata, + [ + images, + mask, + md, + filename + "_temp-%i.tmp" % i, + bad_pixel_threshold, + hot_pixel_threshold, + bad_pixel_low_threshold, + nobytes, + bins, + i * num_sub, + (i + 1) * num_sub, + dtypes, + reverse, + rot90, + direct_load_data, + data_path, + images_per_file, + ], + ) + + pool.close() + pool.join() + pool.terminate() + return result + + +def segment_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + N1=None, + N2=None, + dtypes="images", + reverse=True, + rot90=False, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + Create a compressed eiger data without header, this function is for parallel compress + for parallel compress don't pass any non-scalar parameters + """ + if dtypes == "uid": + uid = md["uid"] # images + if not direct_load_data: + detector = get_detector(db[uid]) + images = load_data(uid, detector, reverse=reverse, rot90=rot90)[N1:N2] + else: + images = EigerImages(data_path, images_per_file, md)[N1:N2] + if reverse: + images = reverse_updown(EigerImages(data_path, images_per_file, md))[N1:N2] + if rot90: + images = rot90_clockwise(images) + + Nimg_ = len(images) + M, N = images[0].shape + avg_img = np.zeros([M, N], dtype=np.float64) + Nopix = float(avg_img.size) + n = 0 + good_count = 0 + # frac = 0.0 + if nobytes == 2: + dtype = np.int16 + elif nobytes == 4: + dtype = np.int32 + elif nobytes == 8: + dtype = np.float64 + else: + print("Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype = np.int32 + + # Nimg = Nimg_//bins + Nimg = int(np.ceil(Nimg_ / bins)) + time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bins)) + # print( time_edge, Nimg_, Nimg, bins, N1, N2 ) + imgsum = np.zeros(Nimg) + if bins != 1: + # print('The frames will be binned by %s'%bins) + dtype = np.float64 + + fp = open(filename, "wb") + for n in range(Nimg): + t1, t2 = time_edge[n] + if bins != 1: + img = np.array(np.average(images[t1:t2], axis=0), dtype=dtype) + else: + img = np.array(images[t1], dtype=dtype) + mask &= img < hot_pixel_threshold + p = np.where((np.ravel(img) > 0) * np.ravel(mask))[0] # don't use masked data + v = np.ravel(np.array(img, dtype=dtype))[p] + dlen = len(p) + imgsum[n] = v.sum() + if (dlen == 0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <= bad_pixel_low_threshold): + dlen = 0 + fp.write(struct.pack("@I", dlen)) + else: + np.ravel(avg_img)[p] += v + good_count += 1 + fp.write(struct.pack("@I", dlen)) + fp.write(struct.pack("@{}i".format(dlen), *p)) + if bins == 1: + fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) + else: + fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) # n +=1 + del p, v, img + fp.flush() + fp.close() + avg_img /= good_count + bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) + sys.stdout.write("#") + sys.stdout.flush() + # del images, mask, avg_img, imgsum, bad_frame_list + # print( 'Should release memory here') + return mask, avg_img, imgsum, bad_frame_list + + +def create_compress_header(md, filename, nobytes=4, bins=1, rot90=False): + """ + Create the head for a compressed eiger data, this function is for parallel compress + """ + fp = open(filename, "wb") + # Make Header 1024 bytes + # md = images.md + if bins != 1: + nobytes = 8 + flag = True + # print( list(md.keys()) ) + # print(md) + if "pixel_mask" in list(md.keys()): + sx, sy = md["pixel_mask"].shape[0], md["pixel_mask"].shape[1] + elif "img_shape" in list(md.keys()): + sx, sy = md["img_shape"][0], md["img_shape"][1] + else: + sx, sy = 2167, 2070 # by default for 4M + # print(flag) + klst = [ + "beam_center_x", + "beam_center_y", + "count_time", + "detector_distance", + "frame_time", + "incident_wavelength", + "x_pixel_size", + "y_pixel_size", + ] + vs = [0, 0, 0, 0, 0, 0, 75, 75] + for i, k in enumerate(klst): + if k in list(md.keys()): + vs[i] = md[k] + if flag: + if rot90: + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + vs[0], + vs[1], + vs[2], + vs[3], + vs[4], + vs[5], + vs[6], + vs[7], + nobytes, + sx, + sy, + 0, + sx, + 0, + sy, + ) + + else: + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + vs[0], + vs[1], + vs[2], + vs[3], + vs[4], + vs[5], + vs[6], + vs[7], + # md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], #md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, + sy, + sx, + 0, + sy, + 0, + sx, + ) + + fp.write(Header) + fp.close() + + +def init_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + with_pickle=True, + reverse=True, + rot90=False, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + Compress the eiger data + + Create a new mask by remove hot_pixel + Do image average + Do each image sum + Find badframe_list for where image sum above bad_pixel_threshold + Generate a compressed data with filename + + if bins!=1, will bin the images with bin number as bins + + Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ] + + Return + mask + avg_img + imsum + bad_frame_list + + """ + fp = open(filename, "wb") + # Make Header 1024 bytes + # md = images.md + if bins != 1: + nobytes = 8 + if "count_time" not in list(md.keys()): + md["count_time"] = 0 + if "detector_distance" not in list(md.keys()): + md["detector_distance"] = 0 + if "frame_time" not in list(md.keys()): + md["frame_time"] = 0 + if "incident_wavelength" not in list(md.keys()): + md["incident_wavelength"] = 0 + if "y_pixel_size" not in list(md.keys()): + md["y_pixel_size"] = 0 + if "x_pixel_size" not in list(md.keys()): + md["x_pixel_size"] = 0 + if "beam_center_x" not in list(md.keys()): + md["beam_center_x"] = 0 + if "beam_center_y" not in list(md.keys()): + md["beam_center_y"] = 0 + + if not rot90: + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["pixel_mask"].shape[1], + md["pixel_mask"].shape[0], + 0, + md["pixel_mask"].shape[1], + 0, + md["pixel_mask"].shape[0], + ) + else: + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["pixel_mask"].shape[0], + md["pixel_mask"].shape[1], + 0, + md["pixel_mask"].shape[0], + 0, + md["pixel_mask"].shape[1], + ) + + fp.write(Header) + + Nimg_ = len(images) + avg_img = np.zeros_like(images[0], dtype=np.float64) + Nopix = float(avg_img.size) + n = 0 + good_count = 0 + frac = 0.0 + if nobytes == 2: + dtype = np.int16 + elif nobytes == 4: + dtype = np.int32 + elif nobytes == 8: + dtype = np.float64 + else: + print("Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype = np.int32 + + Nimg = Nimg_ // bins + time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bins)) + + imgsum = np.zeros(Nimg) + if bins != 1: + print("The frames will be binned by %s" % bins) + + for n in tqdm(range(Nimg)): + t1, t2 = time_edge[n] + img = np.average(images[t1:t2], axis=0) + mask &= img < hot_pixel_threshold + p = np.where((np.ravel(img) > 0) & np.ravel(mask))[0] # don't use masked data + v = np.ravel(np.array(img, dtype=dtype))[p] + dlen = len(p) + imgsum[n] = v.sum() + if (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <= bad_pixel_low_threshold): + # if imgsum[n] >=bad_pixel_threshold : + dlen = 0 + fp.write(struct.pack("@I", dlen)) + else: + np.ravel(avg_img)[p] += v + good_count += 1 + frac += dlen / Nopix + # s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2]) + fp.write(struct.pack("@I", dlen)) + fp.write(struct.pack("@{}i".format(dlen), *p)) + if bins == 1: + if nobytes != 8: + fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) + else: + fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) + else: + fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) + # n +=1 + + fp.close() + frac /= good_count + print("The fraction of pixel occupied by photon is %6.3f%% " % (100 * frac)) + avg_img /= good_count + + bad_frame_list = np.where( + (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) + )[0] + # bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0] + # bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0] + # bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) ) + + if len(bad_frame_list): + print("Bad frame list are: %s" % bad_frame_list) + else: + print("No bad frames are involved.") + if with_pickle: + pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) + return mask, avg_img, imgsum, bad_frame_list + + +""" Description: + + This is code that Mark wrote to open the multifile format + in compressed mode, translated to python. + This seems to work for DALSA, FCCD and EIGER in compressed mode. + It should be included in the respective detector.i files + Currently, this refers to the compression mode being '6' + Each file is image descriptor files chunked together as follows: + Header (1024 bytes) + |--------------IMG N begin--------------| + | Dlen + |---------------------------------------| + | Pixel positions (dlen*4 bytes | + | (0 based indexing in file) | + |---------------------------------------| + | Pixel data(dlen*bytes bytes) | + | (bytes is found in header | + | at position 116) | + |--------------IMG N end----------------| + |--------------IMG N+1 begin------------| + |----------------etc.....---------------| + + + Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End, + + + +""" + + +class Multifile: + """The class representing the multifile. + The recno is in 1 based numbering scheme (first record is 1) + This is efficient for reading in increasing order. + Note: reading same image twice in a row is like reading an earlier + numbered image and means the program starts for the beginning again. + + """ + + def __init__(self, filename, beg, end, reverse=False): + """Multifile initialization. Open the file. + Here I use the read routine which returns byte objects + (everything is an object in python). I use struct.unpack + to convert the byte object to other data type (int object + etc) + NOTE: At each record n, the file cursor points to record n+1 + """ + self.FID = open(filename, "rb") + # self.FID.seek(0,os.SEEK_SET) + self.filename = filename + # br: bytes read + br = self.FID.read(1024) + self.beg = beg + self.end = end + self.reverse = reverse + ms_keys = [ + "beam_center_x", + "beam_center_y", + "count_time", + "detector_distance", + "frame_time", + "incident_wavelength", + "x_pixel_size", + "y_pixel_size", + "bytes", + "nrows", + "ncols", + "rows_begin", + "rows_end", + "cols_begin", + "cols_end", + ] + + magic = struct.unpack("@16s", br[:16]) + md_temp = struct.unpack("@8d7I916x", br[16:]) + self.md = dict(zip(ms_keys, md_temp)) + + self.imgread = 0 + self.recno = 0 + + if reverse: + nrows = self.md["nrows"] + ncols = self.md["ncols"] + self.md["nrows"] = ncols + self.md["ncols"] = nrows + rbeg = self.md["rows_begin"] + rend = self.md["rows_end"] + cbeg = self.md["cols_begin"] + cend = self.md["cols_end"] + self.md["rows_begin"] = cbeg + self.md["rows_end"] = cend + self.md["cols_begin"] = rbeg + self.md["cols_end"] = rend + + # some initialization stuff + self.byts = self.md["bytes"] + if self.byts == 2: + self.valtype = np.uint16 + elif self.byts == 4: + self.valtype = np.uint32 + elif self.byts == 8: + self.valtype = np.float64 + # now convert pieces of these bytes to our data + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + + # now read first image + # print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts) + + def _readHeader(self): + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + + def _readImageRaw(self): + + p = np.fromfile(self.FID, dtype=np.int32, count=self.dlen) + v = np.fromfile(self.FID, dtype=self.valtype, count=self.dlen) + self.imgread = 1 + return (p, v) + + def _readImage(self): + (p, v) = self._readImageRaw() + img = np.zeros((self.md["ncols"], self.md["nrows"])) + np.put(np.ravel(img), p, v) + return img + + def seekimg(self, n=None): + """Position file to read the nth image. + For now only reads first image ignores n + """ + # the logic involving finding the cursor position + if n is None: + n = self.recno + if n < self.beg or n > self.end: + raise IndexError("Error, record out of range") + # print (n, self.recno, self.FID.tell() ) + if (n == self.recno) and (self.imgread == 0): + pass # do nothing + + else: + if n <= self.recno: # ensure cursor less than search pos + self.FID.seek(1024, os.SEEK_SET) + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + self.recno = 0 + self.imgread = 0 + if n == 0: + return + # have to iterate on seeking since dlen varies + # remember for rec recno, cursor is always at recno+1 + if self.imgread == 0: # move to next header if need to + self.FID.seek(self.dlen * (4 + self.byts), os.SEEK_CUR) + for i in range(self.recno + 1, n): + # the less seeks performed the faster + # print (i) + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + # print 's',self.dlen + self.FID.seek(self.dlen * (4 + self.byts), os.SEEK_CUR) + + # we are now at recno in file, read the header and data + # self._clearImage() + self._readHeader() + self.imgread = 0 + self.recno = n + + def rdframe(self, n): + if self.seekimg(n) != -1: + return self._readImage() + + def rdrawframe(self, n): + if self.seekimg(n) != -1: + return self._readImageRaw() + + +class Multifile_Bins(object): + """ + Bin a compressed file with bins number + See Multifile for details for Multifile_class + """ + + def __init__(self, FD, bins=100): + """ + FD: the handler of a compressed Eiger frames + bins: bins number + """ + + self.FD = FD + if (FD.end - FD.beg) % bins: + print("Please give a better bins number and make the length of FD/bins= integer") + else: + self.bins = bins + self.md = FD.md + # self.beg = FD.beg + self.beg = 0 + Nimg = FD.end - FD.beg + slice_num = Nimg // bins + self.end = slice_num + self.time_edge = np.array(create_time_slice(N=Nimg, slice_num=slice_num, slice_width=bins)) + FD.beg + self.get_bin_frame() + + def get_bin_frame(self): + FD = self.FD + self.frames = np.zeros([FD.md["ncols"], FD.md["nrows"], len(self.time_edge)]) + for n in tqdm(range(len(self.time_edge))): + # print (n) + t1, t2 = self.time_edge[n] + # print( t1, t2) + self.frames[:, :, n] = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False, show_progress=False) + + def rdframe(self, n): + return self.frames[:, :, n] + + def rdrawframe(self, n): + x_ = np.ravel(self.rdframe(n)) + p = np.where(x_)[0] + v = np.array(x_[p]) + return (np.array(p, dtype=np.int32), v) + + +class MultifileBNL: + """ + Re-write multifile from scratch. + """ + + HEADER_SIZE = 1024 + + def __init__(self, filename, mode="rb"): + """ + Prepare a file for reading or writing. + mode : either 'rb' or 'wb' + """ + if mode == "wb": + raise ValueError("Write mode 'wb' not supported yet") + if mode != "rb" and mode != "wb": + raise ValueError("Error, mode must be 'rb' or 'wb'" "got : {}".format(mode)) + self._filename = filename + self._mode = mode + # open the file descriptor + # create a memmap + if mode == "rb": + self._fd = np.memmap(filename, dtype="c") + elif mode == "wb": + self._fd = open(filename, "wb") + # these are only necessary for writing + self.md = self._read_main_header() + self._cols = int(self.md["nrows"]) + self._rows = int(self.md["ncols"]) + # some initialization stuff + self.nbytes = self.md["bytes"] + if self.nbytes == 2: + self.valtype = " self.Nframes: + raise KeyError("Error, only {} frames, asked for {}".format(self.Nframes, n)) + # dlen is 4 bytes + cur = self.frame_indexes[n] + dlen = np.frombuffer(self._fd[cur : cur + 4], dtype=" nbytes + vals = self._fd[cur : cur + dlen * self.nbytes] + vals = np.frombuffer(vals, dtype=self.valtype) + return pos, vals + + def rdframe(self, n): + # read header then image + pos, vals = self._read_raw(n) + img = np.zeros((self._rows * self._cols,)) + img[pos] = vals + return img.reshape((self._rows, self._cols)) + + def rdrawframe(self, n): + # read header then image + return self._read_raw(n) + + +class MultifileBNLCustom(MultifileBNL): + def __init__(self, filename, beg=0, end=None, **kwargs): + super().__init__(filename, **kwargs) + self.beg = beg + if end is None: + end = self.Nframes - 1 + self.end = end + + def rdframe(self, n): + if n > self.end or n < self.beg: + raise IndexError("Index out of range") + # return super().rdframe(n - self.beg) + return super().rdframe(n) + + def rdrawframe(self, n): + # return super().rdrawframe(n - self.beg) + if n > self.end or n < self.beg: + raise IndexError("Index out of range") + return super().rdrawframe(n) + + +def get_avg_imgc( + FD, beg=None, end=None, sampling=100, plot_=False, bad_frame_list=None, show_progress=True, *argv, **kwargs +): + """Get average imagef from a data_series by every sampling number to save time""" + # avg_img = np.average(data_series[:: sampling], axis=0) + + if beg is None: + beg = FD.beg + if end is None: + end = FD.end + + avg_img = FD.rdframe(beg) + n = 1 + flag = True + if show_progress: + # print( sampling-1 + beg , end, sampling ) + if bad_frame_list is None: + bad_frame_list = [] + fra_num = int((end - beg) / sampling) - len(bad_frame_list) + for i in tqdm(range(sampling - 1 + beg, end, sampling), desc="Averaging %s images" % fra_num): + if bad_frame_list is not None: + if i in bad_frame_list: + flag = False + else: + flag = True + # print(i, flag) + if flag: + (p, v) = FD.rdrawframe(i) + if len(p) > 0: + np.ravel(avg_img)[p] += v + n += 1 + else: + for i in range(sampling - 1 + beg, end, sampling): + if bad_frame_list is not None: + if i in bad_frame_list: + flag = False + else: + flag = True + if flag: + (p, v) = FD.rdrawframe(i) + if len(p) > 0: + np.ravel(avg_img)[p] += v + n += 1 + + avg_img /= n + if plot_: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots() + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + im = ax.imshow(avg_img, cmap="viridis", origin="lower", norm=LogNorm(vmin=0.001, vmax=1e2)) + # ax.set_title("Masked Averaged Image") + ax.set_title("uid= %s--Masked-Averaged-Image-" % uid) + fig.colorbar(im) + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-" % uid + ".png" + plt.savefig(fp, dpi=fig.dpi) + # plt.show() + return avg_img + + +def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor=False): + """Compute the mean intensity for each ROI in the compressed file (FD), support parallel computation + + Parameters + ---------- + FD: Multifile class + compressed file + labeled_array : array + labeled array; 0 is background. + Each ROI is represented by a nonzero integer. It is not required that + the ROI labels are contiguous + index : int, list, optional + The ROI's to use. If None, this function will extract averages for all + ROIs + + Returns + ------- + mean_intensity : array + The mean intensity of each ROI for all `images` + Dimensions: + len(mean_intensity) == len(index) + len(mean_intensity[0]) == len(images) + index : list + The labels for each element of the `mean_intensity` list + """ + + qind, pixelist = roi.extract_label_indices(labeled_array) + sx, sy = (FD.rdframe(FD.beg)).shape + if labeled_array.shape != (sx, sy): + raise ValueError( + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" + % (sx, sy, labeled_array.shape[0], labeled_array.shape[1]) + ) + # handle various input for `index` + if index is None: + index = list(np.unique(labeled_array)) + index.remove(0) + else: + try: + len(index) + except TypeError: + index = [index] + + index = np.array(index) + # print ('here') + good_ind = np.zeros(max(qind), dtype=np.int32) + good_ind[index - 1] = np.arange(len(index)) + 1 + w = np.where(good_ind[qind - 1])[0] + qind = good_ind[qind[w] - 1] + pixelist = pixelist[w] + + # pre-allocate an array for performance + # might be able to use list comprehension to make this faster + + mean_intensity = np.zeros([int((FD.end - FD.beg) / sampling), len(index)]) + # fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + # maxqind = max(qind) + norm = np.bincount(qind)[1:] + n = 0 + # for i in tqdm(range( FD.beg , FD.end )): + if not multi_cor: + for i in tqdm(range(FD.beg, FD.end, sampling), desc="Get ROI intensity of each frame"): + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + mean_intensity[n] = np.bincount(qind[pxlist], weights=v[w], minlength=len(index) + 1)[1:] + n += 1 + else: + ring_masks = [np.array(labeled_array == i, dtype=np.int64) for i in np.unique(labeled_array)[1:]] + inputs = range(len(ring_masks)) + go_through_FD(FD) + pool = Pool(processes=len(inputs)) + print("Starting assign the tasks...") + results = {} + for i in tqdm(inputs): + results[i] = apply_async(pool, _get_mean_intensity_one_q, (FD, sampling, ring_masks[i])) + pool.close() + print("Starting running the tasks...") + res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] + # return res + for i in inputs: + mean_intensity[:, i] = res[i] + print("ROI mean_intensit calculation is DONE!") + del results + del res + + mean_intensity /= norm + return mean_intensity, index + + +def _get_mean_intensity_one_q(FD, sampling, labels): + mi = np.zeros(int((FD.end - FD.beg) / sampling)) + n = 0 + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + for i in range(FD.beg, FD.end, sampling): + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + mi[n] = np.bincount(qind[pxlist], weights=v[w], minlength=2)[1:] + n += 1 + return mi + + +def get_each_frame_intensityc( + FD, + sampling=1, + bad_pixel_threshold=1e10, + bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, + plot_=False, + bad_frame_list=None, + save=False, + *argv, + **kwargs +): + """Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + """ + + # print ( argv, kwargs ) + # mask &= img < hot_pixel_threshold + imgsum = np.zeros(int((FD.end - FD.beg) / sampling)) + n = 0 + for i in tqdm(range(FD.beg, FD.end, sampling), desc="Get each frame intensity"): + (p, v) = FD.rdrawframe(i) + if len(p) > 0: + imgsum[n] = np.sum(v) + n += 1 + + if plot_: + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + fig, ax = plt.subplots() + ax.plot(imgsum, "bo") + ax.set_title("uid= %s--imgsum" % uid) + ax.set_xlabel("Frame_bin_%s" % sampling) + ax.set_ylabel("Total_Intensity") + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + plt.show() + + bad_frame_list_ = ( + np.where((np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold))[0] + + FD.beg + ) + + if bad_frame_list is not None: + bad_frame_list = np.unique(np.concatenate([bad_frame_list, bad_frame_list_])) + else: + bad_frame_list = bad_frame_list_ + + if len(bad_frame_list): + print("Bad frame list length is: %s" % len(bad_frame_list)) + else: + print("No bad frames are involved.") + return imgsum, bad_frame_list diff --git a/pyCHX/backups/chx_compress_05012024.py b/pyCHX/backups/chx_compress_05012024.py new file mode 100644 index 0000000..706cf7e --- /dev/null +++ b/pyCHX/backups/chx_compress_05012024.py @@ -0,0 +1,1189 @@ +import os,shutil +from glob import iglob + +import matplotlib.pyplot as plt +from pyCHX.chx_libs import (np, roi, time, datetime, os, getpass, db, + LogNorm, RUN_GUI) +from pyCHX.chx_generic_functions import (create_time_slice,get_detector, get_sid_filenames, + load_data,reverse_updown,rot90_clockwise, get_eigerImage_per_file,copy_data,delete_data, ) + + +import struct +from tqdm import tqdm +from contextlib import closing + +from multiprocessing import Pool +import dill +import sys +import gc +import pickle as pkl +# imports handler from CHX +# this is where the decision is made whether or not to use dask +#from chxtools.handlers import EigerImages, EigerHandler +from eiger_io.fs_handler import EigerHandler,EigerImages + +def run_dill_encoded(what): + fun, args = dill.loads(what) + return fun(*args) + +def apply_async(pool, fun, args, callback=None): + return pool.apply_async( run_dill_encoded, (dill.dumps((fun, args)),), callback= callback) + + +def map_async(pool, fun, args ): + return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),)) + + +def pass_FD(FD,n): + #FD.rdframe(n) + try: + FD.seekimg(n) + except: + pass + return False +def go_through_FD(FD): + if not pass_FD(FD,FD.beg): + for i in range(FD.beg, FD.end): + pass_FD(FD,i) + else: + pass + + + + + +def compress_eigerdata( images, mask, md, filename=None, force_compress=False, + bad_pixel_threshold=1e15, bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, nobytes=2,bins=1, bad_frame_list=None, + para_compress= False, num_sub=100, dtypes='uid',reverse =True, rot90=False, + num_max_para_process=500, with_pickle=False, direct_load_data=True, data_path=None, + images_per_file=100, copy_rawdata=True,new_path = '/tmp_data/data/'): + ''' + Init 2016, YG@CHX + DEV 2018, June, make images_per_file a dummy, will be determined by get_eigerImage_per_file if direct_load_data + Add copy_rawdata opt. + + ''' + + end= len(images)//bins + if filename is None: + filename= '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%md['uid'] + if dtypes!= 'uid': + para_compress= False + else: + if para_compress: + images='foo' + #para_compress= True + #print( dtypes ) + if direct_load_data: + images_per_file = get_eigerImage_per_file( data_path ) + if data_path is None: + sud = get_sid_filenames(db[uid]) + data_path = sud[2][0] + if force_compress: + print ("Create a new compress file with filename as :%s."%filename) + if para_compress: + # stop connection to be before forking... (let it reset again) + db.reg.disconnect() + db.mds.reset_connection() + print( 'Using a multiprocess to compress the data.') + return para_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, + bins=bins, num_sub=num_sub, dtypes=dtypes, rot90=rot90, + reverse=reverse, num_max_para_process=num_max_para_process, + with_pickle= with_pickle, direct_load_data= direct_load_data, + data_path=data_path,images_per_file=images_per_file,copy_rawdata=copy_rawdata,new_path=new_path) + else: + return init_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path, + images_per_file=images_per_file) + else: + if not os.path.exists( filename ): + print ("Create a new compress file with filename as :%s."%filename) + if para_compress: + print( 'Using a multiprocess to compress the data.') + return para_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins, + num_sub=num_sub, dtypes=dtypes, reverse=reverse,rot90=rot90, + num_max_para_process=num_max_para_process,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path,images_per_file=images_per_file,copy_rawdata=copy_rawdata) + else: + return init_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path, images_per_file=images_per_file) + else: + print ("Using already created compressed file with filename as :%s."%filename) + beg=0 + return read_compressed_eigerdata( mask, filename, beg, end, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold ,bad_frame_list=bad_frame_list,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path, images_per_file=images_per_file) + + + +def read_compressed_eigerdata( mask, filename, beg, end, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0,bad_frame_list=None,with_pickle= False, + direct_load_data=False,data_path=None,images_per_file=100): + ''' + Read already compress eiger data + Return + mask + avg_img + imsum + bad_frame_list + + ''' + #should use try and except instead of with_pickle in the future! + CAL = False + if not with_pickle: + CAL = True + else: + try: + mask, avg_img, imgsum, bad_frame_list_ = pkl.load( open(filename + '.pkl', 'rb' ) ) + except: + CAL = True + if CAL: + FD = Multifile( filename, beg, end) + imgsum = np.zeros( FD.end- FD.beg, dtype= np.float64 ) + avg_img = np.zeros( [FD.md['ncols'], FD.md['nrows'] ] , dtype= np.float64 ) + imgsum, bad_frame_list_ = get_each_frame_intensityc( FD, sampling = 1, + bad_pixel_threshold=bad_pixel_threshold, bad_pixel_low_threshold=bad_pixel_low_threshold, + hot_pixel_threshold=hot_pixel_threshold, plot_ = False, + bad_frame_list=bad_frame_list) + avg_img = get_avg_imgc( FD, beg=None,end=None,sampling = 1, plot_ = False,bad_frame_list=bad_frame_list_ ) + FD.FID.close() + + return mask, avg_img, imgsum, bad_frame_list_ + +def para_compress_eigerdata( images, mask, md, filename, num_sub=100, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='uid',reverse =True,rot90=False, + num_max_para_process=500, cpu_core_number=72, with_pickle=True, + direct_load_data=False, data_path=None,images_per_file=100, + copy_rawdata=True,new_path = '/tmp_data/data/'): + + data_path_ = data_path + if dtypes=='uid': + uid= md['uid'] #images + if not direct_load_data: + detector = get_detector( db[uid ] ) + images_ = load_data( uid, detector, reverse= reverse,rot90=rot90 ) + else: + #print('Here for images_per_file: %s'%images_per_file) + #images_ = EigerImages( data_path, images_per_file=images_per_file) + #print('here') + if not copy_rawdata: + images_ = EigerImages(data_path,images_per_file, md) + else: + print('Due to a IO problem running on GPFS. The raw data will be copied to /tmp_data/Data.') + print('Copying...') + copy_data( data_path, new_path ) + #print(data_path, new_path) + new_master_file = new_path + os.path.basename(data_path) + data_path_ = new_master_file + images_ = EigerImages( new_master_file, images_per_file, md) + #print(md) + if reverse: + images_ = reverse_updown( images_ ) # Why not np.flipud? + if rot90: + images_ = rot90_clockwise( images_ ) + + N= len(images_) + + else: + N = len(images) + N = int( np.ceil( N/ bins ) ) + Nf = int( np.ceil( N/ num_sub ) ) + if Nf > cpu_core_number: + print("The process number is larger than %s (XF11ID server core number)"%cpu_core_number) + num_sub_old = num_sub + num_sub = int( np.ceil(N/cpu_core_number)) + Nf = int( np.ceil( N/ num_sub ) ) + print ("The sub compressed file number was changed from %s to %s"%( num_sub_old, num_sub )) + create_compress_header( md, filename +'-header', nobytes, bins, rot90=rot90 ) + #print( 'done for header here') + #print(data_path_, images_per_file) + results = para_segment_compress_eigerdata( images=images, mask=mask, md=md,filename=filename, + num_sub=num_sub, bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes=nobytes, bins=bins, dtypes=dtypes, + num_max_para_process=num_max_para_process, + reverse = reverse,rot90=rot90, + direct_load_data=direct_load_data, data_path=data_path_, + images_per_file=images_per_file) + + res_ = [ results[k].get() for k in list(sorted(results.keys())) ] + imgsum = np.zeros( N ) + bad_frame_list = np.zeros( N, dtype=bool ) + good_count = 1 + for i in range( Nf ): + mask_, avg_img_, imgsum_, bad_frame_list_ = res_[i] + imgsum[i*num_sub: (i+1)*num_sub] = imgsum_ + bad_frame_list[i*num_sub: (i+1)*num_sub] = bad_frame_list_ + if i==0: + mask = mask_ + avg_img = np.zeros_like( avg_img_ ) + else: + mask *= mask_ + if not np.sum( np.isnan( avg_img_)): + avg_img += avg_img_ + good_count += 1 + + bad_frame_list = np.where( bad_frame_list )[0] + avg_img /= good_count + + if len(bad_frame_list): + print ('Bad frame list are: %s' %bad_frame_list) + else: + print ('No bad frames are involved.') + print( 'Combining the seperated compressed files together...') + combine_compressed( filename, Nf, del_old=True) + del results + del res_ + if with_pickle: + pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) ) + if copy_rawdata: + delete_data( data_path, new_path ) + return mask, avg_img, imgsum, bad_frame_list + +def combine_compressed( filename, Nf, del_old=True): + old_files = [filename +'-header'] + for i in range(Nf): + old_files.append(filename + '_temp-%i.tmp' % i) + combine_binary_files(filename, old_files, del_old) + +def combine_binary_files(filename, old_files, del_old = False): + '''Combine binary files together''' + fn_ = open(filename, 'wb') + for ftemp in old_files: + shutil.copyfileobj( open(ftemp, 'rb'), fn_) + if del_old: + os.remove( ftemp ) + fn_.close() + +def para_segment_compress_eigerdata( images, mask, md, filename, num_sub=100, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='images', + reverse =True, rot90=False, + num_max_para_process=50,direct_load_data=False, data_path=None, + images_per_file=100): + ''' + parallelly compressed eiger data without header, this function is for parallel compress + ''' + if dtypes=='uid': + uid= md['uid'] #images + if not direct_load_data: + detector = get_detector( db[uid ] ) + images_ = load_data( uid, detector, reverse= reverse, rot90=rot90 ) + else: + images_ = EigerImages(data_path, images_per_file, md) + if reverse: + images_ = reverse_updown( images_ ) + if rot90: + images_ = rot90_clockwise( images_ ) + + N= len(images_) + + else: + N = len(images) + + #N = int( np.ceil( N/ bins ) ) + num_sub *= bins + if N%num_sub: + Nf = N// num_sub +1 + print('The average image intensity would be slightly not correct, about 1% error.') + print( 'Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image') + else: + Nf = N//num_sub + print( 'It will create %i temporary files for parallel compression.'%Nf) + + if Nf> num_max_para_process: + N_runs = np.int( np.ceil( Nf/float(num_max_para_process))) + print('The parallel run number: %s is larger than num_max_para_process: %s'%(Nf, num_max_para_process )) + else: + N_runs= 1 + result = {} + #print( mask_filename )# + '*'* 10 + 'here' ) + for nr in range( N_runs ): + if (nr+1)*num_max_para_process > Nf: + inputs= range( num_max_para_process*nr, Nf ) + else: + inputs= range( num_max_para_process*nr, num_max_para_process*(nr + 1 ) ) + fns = [ filename + '_temp-%i.tmp'%i for i in inputs] + #print( nr, inputs, ) + pool = Pool(processes= len(inputs) ) #, maxtasksperchild=1000 ) + #print( inputs ) + for i in inputs: + if i*num_sub <= N: + result[i] = pool.apply_async( segment_compress_eigerdata, [ + images, mask, md, filename + '_temp-%i.tmp'%i,bad_pixel_threshold, hot_pixel_threshold, bad_pixel_low_threshold, nobytes, bins, i*num_sub, (i+1)*num_sub, dtypes, reverse,rot90, direct_load_data, data_path,images_per_file ] ) + + pool.close() + pool.join() + pool.terminate() + return result + +def segment_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, nobytes=4, bins=1, + N1=None, N2=None, dtypes='images',reverse =True, rot90=False,direct_load_data=False, data_path=None,images_per_file=100 ): + ''' + Create a compressed eiger data without header, this function is for parallel compress + for parallel compress don't pass any non-scalar parameters + ''' + if dtypes=='uid': + uid= md['uid'] #images + if not direct_load_data: + detector = get_detector( db[uid ] ) + images = load_data( uid, detector, reverse= reverse, rot90=rot90 )[N1:N2] + else: + images = EigerImages(data_path, images_per_file, md)[N1:N2] + if reverse: + images = reverse_updown( EigerImages(data_path, images_per_file, md) )[N1:N2] + if rot90: + images = rot90_clockwise( images ) + + Nimg_ = len( images) + M,N = images[0].shape + avg_img = np.zeros( [M,N], dtype= np.float64 ) + Nopix = float( avg_img.size ) + n=0 + good_count = 0 + #frac = 0.0 + if nobytes==2: + dtype= np.int16 + elif nobytes==4: + dtype= np.int32 + elif nobytes==8: + dtype=np.float64 + else: + print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype= np.int32 + + + #Nimg = Nimg_//bins + Nimg = int( np.ceil( Nimg_ / bins ) ) + time_edge = np.array(create_time_slice( N= Nimg_, + slice_num= Nimg, slice_width= bins )) + #print( time_edge, Nimg_, Nimg, bins, N1, N2 ) + imgsum = np.zeros( Nimg ) + if bins!=1: + #print('The frames will be binned by %s'%bins) + dtype=np.float64 + + fp = open( filename,'wb' ) + for n in range(Nimg): + t1,t2 = time_edge[n] + if bins!=1: + img = np.array( np.average( images[t1:t2], axis=0 ) , dtype= dtype) + else: + img = np.array( images[t1], dtype=dtype) + mask &= img < hot_pixel_threshold + p = np.where( (np.ravel(img)>0) * np.ravel(mask) )[0] #don't use masked data + v = np.ravel( np.array( img, dtype= dtype )) [p] + dlen = len(p) + imgsum[n] = v.sum() + if (dlen==0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold): + dlen = 0 + fp.write( struct.pack( '@I', dlen )) + else: + np.ravel( avg_img )[p] += v + good_count +=1 + fp.write( struct.pack( '@I', dlen )) + fp.write( struct.pack( '@{}i'.format( dlen), *p)) + if bins==1: + fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v)) + else: + fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) #n +=1 + del p,v, img + fp.flush() + fp.close() + avg_img /= good_count + bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) + sys.stdout.write('#') + sys.stdout.flush() + #del images, mask, avg_img, imgsum, bad_frame_list + #print( 'Should release memory here') + return mask, avg_img, imgsum, bad_frame_list + + + +def create_compress_header( md, filename, nobytes=4, bins=1, rot90=False ): + ''' + Create the head for a compressed eiger data, this function is for parallel compress + ''' + fp = open( filename,'wb' ) + #Make Header 1024 bytes + #md = images.md + if bins!=1: + nobytes=8 + flag = True + #print( list(md.keys()) ) + #print(md) + if 'pixel_mask' in list(md.keys()): + sx,sy = md['pixel_mask'].shape[0], md['pixel_mask'].shape[1] + elif 'img_shape' in list(md.keys()): + sx,sy = md['img_shape'][0], md['img_shape'][1] + else: + sx,sy= 2167, 2070 #by default for 4M + #print(flag) + klst = [ 'beam_center_x','beam_center_y', 'count_time','detector_distance', + 'frame_time','incident_wavelength', 'x_pixel_size','y_pixel_size'] + vs = [ 0 ,0, 0, 0, + 0, 0, 75, 75] + for i, k in enumerate(klst): + if k in list(md.keys()): + vs[i] = md[k] + if flag: + if rot90: + Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', + vs[0], vs[1], vs[2], vs[3], + vs[4], vs[5], vs[6], vs[7], + nobytes,sx, sy, + 0, sx, + 0,sy ) + + else: + Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', + vs[0], vs[1], vs[2], vs[3], + vs[4], vs[5], vs[6], vs[7], +#md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], #md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, sy,sx, + 0, sy, + 0, sx + ) + + + + fp.write( Header) + fp.close() + + + +def init_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0,nobytes=4, bins=1, with_pickle=True, + reverse =True, rot90=False, + direct_load_data=False, data_path=None,images_per_file=100, + ): + ''' + Compress the eiger data + + Create a new mask by remove hot_pixel + Do image average + Do each image sum + Find badframe_list for where image sum above bad_pixel_threshold + Generate a compressed data with filename + + if bins!=1, will bin the images with bin number as bins + + Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ] + + Return + mask + avg_img + imsum + bad_frame_list + + ''' + fp = open( filename,'wb' ) + #Make Header 1024 bytes + #md = images.md + if bins!=1: + nobytes=8 + if 'count_time' not in list( md.keys() ): + md['count_time']=0 + if 'detector_distance' not in list( md.keys() ): + md['detector_distance']=0 + if 'frame_time' not in list( md.keys() ): + md['frame_time']=0 + if 'incident_wavelength' not in list( md.keys() ): + md['incident_wavelength']=0 + if 'y_pixel_size' not in list( md.keys() ): + md['y_pixel_size']=0 + if 'x_pixel_size' not in list( md.keys() ): + md['x_pixel_size']=0 + if 'beam_center_x' not in list( md.keys() ): + md['beam_center_x']=0 + if 'beam_center_y' not in list( md.keys() ): + md['beam_center_y']=0 + + if not rot90: + Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0], + 0, md['pixel_mask'].shape[1], + 0, md['pixel_mask'].shape[0] + ) + else: + Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, md['pixel_mask'].shape[0], md['pixel_mask'].shape[1], + 0, md['pixel_mask'].shape[0], + 0, md['pixel_mask'].shape[1] + ) + + fp.write( Header) + + Nimg_ = len( images) + avg_img = np.zeros_like( images[0], dtype= np.float64 ) + Nopix = float( avg_img.size ) + n=0 + good_count = 0 + frac = 0.0 + if nobytes==2: + dtype= np.int16 + elif nobytes==4: + dtype= np.int32 + elif nobytes==8: + dtype=np.float64 + else: + print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype= np.int32 + + + Nimg = Nimg_//bins + time_edge = np.array(create_time_slice( N= Nimg_, + slice_num= Nimg, slice_width= bins )) + + imgsum = np.zeros( Nimg ) + if bins!=1: + print('The frames will be binned by %s'%bins) + + for n in tqdm( range(Nimg) ): + t1,t2 = time_edge[n] + img = np.average( images[t1:t2], axis=0 ) + mask &= img < hot_pixel_threshold + p = np.where( (np.ravel(img)>0) & np.ravel(mask) )[0] #don't use masked data + v = np.ravel( np.array( img, dtype= dtype )) [p] + dlen = len(p) + imgsum[n] = v.sum() + if (imgsum[n] >bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold): + #if imgsum[n] >=bad_pixel_threshold : + dlen = 0 + fp.write( struct.pack( '@I', dlen )) + else: + np.ravel(avg_img )[p] += v + good_count +=1 + frac += dlen/Nopix + #s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2]) + fp.write( struct.pack( '@I', dlen )) + fp.write( struct.pack( '@{}i'.format( dlen), *p)) + if bins==1: + if nobytes!=8: + fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v)) + else: + fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) + else: + fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) + #n +=1 + + fp.close() + frac /=good_count + print( "The fraction of pixel occupied by photon is %6.3f%% "%(100*frac) ) + avg_img /= good_count + + bad_frame_list = np.where( (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) )[0] + #bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0] + #bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0] + #bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) ) + + + if len(bad_frame_list): + print ('Bad frame list are: %s' %bad_frame_list) + else: + print ('No bad frames are involved.') + if with_pickle: + pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) ) + return mask, avg_img, imgsum, bad_frame_list + + + +""" Description: + + This is code that Mark wrote to open the multifile format + in compressed mode, translated to python. + This seems to work for DALSA, FCCD and EIGER in compressed mode. + It should be included in the respective detector.i files + Currently, this refers to the compression mode being '6' + Each file is image descriptor files chunked together as follows: + Header (1024 bytes) + |--------------IMG N begin--------------| + | Dlen + |---------------------------------------| + | Pixel positions (dlen*4 bytes | + | (0 based indexing in file) | + |---------------------------------------| + | Pixel data(dlen*bytes bytes) | + | (bytes is found in header | + | at position 116) | + |--------------IMG N end----------------| + |--------------IMG N+1 begin------------| + |----------------etc.....---------------| + + + Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End, + + + +""" + + +class Multifile: + '''The class representing the multifile. + The recno is in 1 based numbering scheme (first record is 1) + This is efficient for reading in increasing order. + Note: reading same image twice in a row is like reading an earlier + numbered image and means the program starts for the beginning again. + + ''' + def __init__(self,filename,beg,end, reverse=False ): + '''Multifile initialization. Open the file. + Here I use the read routine which returns byte objects + (everything is an object in python). I use struct.unpack + to convert the byte object to other data type (int object + etc) + NOTE: At each record n, the file cursor points to record n+1 + ''' + self.FID = open(filename,"rb") +# self.FID.seek(0,os.SEEK_SET) + self.filename = filename + #br: bytes read + br = self.FID.read(1024) + self.beg=beg + self.end=end + self.reverse=reverse + ms_keys = ['beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + 'bytes', + 'nrows', 'ncols', 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' + ] + + magic = struct.unpack('@16s', br[:16]) + md_temp = struct.unpack('@8d7I916x', br[16:]) + self.md = dict(zip(ms_keys, md_temp)) + + self.imgread=0 + self.recno = 0 + + if reverse: + nrows = self.md['nrows'] + ncols = self.md['ncols'] + self.md['nrows'] = ncols + self.md['ncols'] = nrows + rbeg = self.md['rows_begin'] + rend = self.md['rows_end'] + cbeg = self.md['cols_begin'] + cend = self.md['cols_end'] + self.md['rows_begin']=cbeg + self.md['rows_end']=cend + self.md['cols_begin']=rbeg + self.md['cols_end']=rend + + + + # some initialization stuff + self.byts = self.md['bytes'] + if (self.byts==2): + self.valtype = np.uint16 + elif (self.byts == 4): + self.valtype = np.uint32 + elif (self.byts == 8): + self.valtype = np.float64 + #now convert pieces of these bytes to our data + self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] + + # now read first image + #print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts) + + def _readHeader(self): + self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] + + def _readImageRaw(self): + + p= np.fromfile(self.FID, dtype = np.int32,count= self.dlen) + v= np.fromfile(self.FID, dtype = self.valtype,count= self.dlen) + self.imgread=1 + return(p,v) + + def _readImage(self): + (p,v)=self._readImageRaw() + img = np.zeros( ( self.md['ncols'], self.md['nrows'] ) ) + np.put( np.ravel(img), p, v ) + return(img) + + def seekimg(self,n=None): + + '''Position file to read the nth image. + For now only reads first image ignores n + ''' + # the logic involving finding the cursor position + if (n is None): + n = self.recno + if (n < self.beg or n > self.end): + raise IndexError('Error, record out of range') + #print (n, self.recno, self.FID.tell() ) + if ((n == self.recno) and (self.imgread==0)): + pass # do nothing + + else: + if (n <= self.recno): #ensure cursor less than search pos + self.FID.seek(1024,os.SEEK_SET) + self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] + self.recno = 0 + self.imgread=0 + if n == 0: + return + #have to iterate on seeking since dlen varies + #remember for rec recno, cursor is always at recno+1 + if(self.imgread==0 ): #move to next header if need to + self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR) + for i in range(self.recno+1,n): + #the less seeks performed the faster + #print (i) + self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] + #print 's',self.dlen + self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR) + + # we are now at recno in file, read the header and data + #self._clearImage() + self._readHeader() + self.imgread=0 + self.recno = n + def rdframe(self,n): + if self.seekimg(n)!=-1: + return(self._readImage()) + + def rdrawframe(self,n): + if self.seekimg(n)!=-1: + return(self._readImageRaw()) + + + +class Multifile_Bins( object ): + ''' + Bin a compressed file with bins number + See Multifile for details for Multifile_class + ''' + def __init__(self, FD, bins=100): + ''' + FD: the handler of a compressed Eiger frames + bins: bins number + ''' + + self.FD=FD + if (FD.end - FD.beg)%bins: + print ('Please give a better bins number and make the length of FD/bins= integer') + else: + self.bins = bins + self.md = FD.md + #self.beg = FD.beg + self.beg = 0 + Nimg = (FD.end - FD.beg) + slice_num = Nimg//bins + self.end = slice_num + self.time_edge = np.array(create_time_slice( N= Nimg, + slice_num= slice_num, slice_width= bins )) + FD.beg + self.get_bin_frame() + + def get_bin_frame(self): + FD= self.FD + self.frames = np.zeros( [ FD.md['ncols'],FD.md['nrows'], len(self.time_edge)] ) + for n in tqdm( range(len(self.time_edge))): + #print (n) + t1,t2 = self.time_edge[n] + #print( t1, t2) + self.frames[:,:,n] = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1, + plot_ = False, show_progress = False ) + def rdframe(self,n): + return self.frames[:,:,n] + + def rdrawframe(self,n): + x_= np.ravel( self.rdframe(n) ) + p= np.where( x_ ) [0] + v = np.array( x_[ p ]) + return ( np.array(p, dtype=np.int32), v) + + +class MultifileBNL: + ''' + Re-write multifile from scratch. + ''' + HEADER_SIZE = 1024 + def __init__(self, filename, mode='rb'): + ''' + Prepare a file for reading or writing. + mode : either 'rb' or 'wb' + ''' + if mode == 'wb': + raise ValueError("Write mode 'wb' not supported yet") + if mode != 'rb' and mode != 'wb': + raise ValueError("Error, mode must be 'rb' or 'wb'" + "got : {}".format(mode)) + self._filename = filename + self._mode = mode + # open the file descriptor + # create a memmap + if mode == 'rb': + self._fd = np.memmap(filename, dtype='c') + elif mode == 'wb': + self._fd = open(filename, "wb") + # these are only necessary for writing + self.md = self._read_main_header() + self._cols = int(self.md['nrows']) + self._rows = int(self.md['ncols']) + # some initialization stuff + self.nbytes = self.md['bytes'] + if (self.nbytes==2): + self.valtype = " self.Nframes: + raise KeyError("Error, only {} frames, asked for {}".format(self.Nframes, n)) + # dlen is 4 bytes + cur = self.frame_indexes[n] + dlen = np.frombuffer(self._fd[cur:cur+4], dtype=" nbytes + vals = self._fd[cur: cur+dlen*self.nbytes] + vals = np.frombuffer(vals, dtype=self.valtype) + return pos, vals + def rdframe(self, n): + # read header then image + pos, vals = self._read_raw(n) + img = np.zeros((self._rows*self._cols,)) + img[pos] = vals + return img.reshape((self._rows, self._cols)) + def rdrawframe(self, n): + # read header then image + return self._read_raw(n) + +class MultifileBNLCustom(MultifileBNL): + def __init__(self, filename, beg=0, end=None, **kwargs): + super().__init__(filename, **kwargs) + self.beg = beg + if end is None: + end = self.Nframes-1 + self.end = end + def rdframe(self, n): + if n > self.end or n < self.beg: + raise IndexError("Index out of range") + #return super().rdframe(n - self.beg) + return super().rdframe( n ) + def rdrawframe(self, n): + #return super().rdrawframe(n - self.beg) + if n > self.end or n < self.beg: + raise IndexError("Index out of range") + return super().rdrawframe(n ) + + + +def get_avg_imgc( FD, beg=None,end=None, sampling = 100, plot_ = False, bad_frame_list=None, + show_progress=True, *argv,**kwargs): + '''Get average imagef from a data_series by every sampling number to save time''' + #avg_img = np.average(data_series[:: sampling], axis=0) + + if beg is None: + beg = FD.beg + if end is None: + end = FD.end + + avg_img = FD.rdframe(beg) + n=1 + flag=True + if show_progress: + #print( sampling-1 + beg , end, sampling ) + if bad_frame_list is None: + bad_frame_list =[] + fra_num = int( (end - beg )/sampling ) - len( bad_frame_list ) + for i in tqdm(range( sampling-1 + beg , end, sampling ), desc= 'Averaging %s images'% fra_num): + if bad_frame_list is not None: + if i in bad_frame_list: + flag= False + else: + flag=True + #print(i, flag) + if flag: + (p,v) = FD.rdrawframe(i) + if len(p)>0: + np.ravel(avg_img )[p] += v + n += 1 + else: + for i in range( sampling-1 + beg , end, sampling ): + if bad_frame_list is not None: + if i in bad_frame_list: + flag= False + else: + flag=True + if flag: + (p,v) = FD.rdrawframe(i) + if len(p)>0: + np.ravel(avg_img )[p] += v + n += 1 + + avg_img /= n + if plot_: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots() + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + im = ax.imshow(avg_img , cmap='viridis',origin='lower', + norm= LogNorm(vmin=0.001, vmax=1e2)) + #ax.set_title("Masked Averaged Image") + ax.set_title('uid= %s--Masked-Averaged-Image-'%uid) + fig.colorbar(im) + if save: + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs['path'] + if 'uid' in kwargs: + uid = kwargs['uid'] + else: + uid = 'uid' + #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-"%uid + '.png' + plt.savefig( fp, dpi=fig.dpi) + #plt.show() + return avg_img + + + +def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor = False): + """Compute the mean intensity for each ROI in the compressed file (FD), support parallel computation + + Parameters + ---------- + FD: Multifile class + compressed file + labeled_array : array + labeled array; 0 is background. + Each ROI is represented by a nonzero integer. It is not required that + the ROI labels are contiguous + index : int, list, optional + The ROI's to use. If None, this function will extract averages for all + ROIs + + Returns + ------- + mean_intensity : array + The mean intensity of each ROI for all `images` + Dimensions: + len(mean_intensity) == len(index) + len(mean_intensity[0]) == len(images) + index : list + The labels for each element of the `mean_intensity` list + """ + + qind, pixelist = roi.extract_label_indices( labeled_array ) + sx,sy = ( FD.rdframe(FD.beg) ).shape + if labeled_array.shape != ( sx,sy ): + raise ValueError( + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( sx,sy, labeled_array.shape[0], labeled_array.shape[1]) ) + # handle various input for `index` + if index is None: + index = list(np.unique(labeled_array)) + index.remove(0) + else: + try: + len(index) + except TypeError: + index = [index] + + index = np.array( index ) + #print ('here') + good_ind = np.zeros( max(qind), dtype= np.int32 ) + good_ind[ index -1 ] = np.arange( len(index) ) +1 + w = np.where( good_ind[qind -1 ] )[0] + qind = good_ind[ qind[w] -1 ] + pixelist = pixelist[w] + + + # pre-allocate an array for performance + # might be able to use list comprehension to make this faster + + mean_intensity = np.zeros( [ int( ( FD.end - FD.beg)/sampling ) , len(index)] ) + #fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + #maxqind = max(qind) + norm = np.bincount( qind )[1:] + n= 0 + #for i in tqdm(range( FD.beg , FD.end )): + if not multi_cor: + for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get ROI intensity of each frame' ): + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + mean_intensity[n] = np.bincount( qind[pxlist], weights = v[w], minlength = len(index)+1 )[1:] + n +=1 + else: + ring_masks = [ np.array(labeled_array==i, dtype = np.int64) for i in np.unique( labeled_array )[1:] ] + inputs = range( len(ring_masks) ) + go_through_FD(FD) + pool = Pool(processes= len(inputs) ) + print( 'Starting assign the tasks...') + results = {} + for i in tqdm ( inputs ): + results[i] = apply_async( pool, _get_mean_intensity_one_q, ( FD, sampling, ring_masks[i] ) ) + pool.close() + print( 'Starting running the tasks...') + res = [ results[k].get() for k in tqdm( list(sorted(results.keys())) ) ] + #return res + for i in inputs: + mean_intensity[:,i] = res[i] + print( 'ROI mean_intensit calculation is DONE!') + del results + del res + + mean_intensity /= norm + return mean_intensity, index + + +def _get_mean_intensity_one_q( FD, sampling, labels ): + mi = np.zeros( int( ( FD.end - FD.beg)/sampling ) ) + n=0 + qind, pixelist = roi.extract_label_indices( labels ) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + for i in range( FD.beg, FD.end, sampling ): + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + mi[n] = np.bincount( qind[pxlist], weights = v[w], minlength = 2 )[1:] + n +=1 + return mi + + + +def get_each_frame_intensityc( FD, sampling = 1, + bad_pixel_threshold=1e10, bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, + plot_ = False, bad_frame_list=None, save=False, *argv,**kwargs): + '''Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + ''' + + #print ( argv, kwargs ) + #mask &= img < hot_pixel_threshold + imgsum = np.zeros( int( (FD.end - FD.beg )/ sampling ) ) + n=0 + for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get each frame intensity' ): + (p,v) = FD.rdrawframe(i) + if len(p)>0: + imgsum[n] = np.sum( v ) + n += 1 + + if plot_: + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + fig, ax = plt.subplots() + ax.plot( imgsum,'bo') + ax.set_title('uid= %s--imgsum'%uid) + ax.set_xlabel( 'Frame_bin_%s'%sampling ) + ax.set_ylabel( 'Total_Intensity' ) + + if save: + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs['path'] + if 'uid' in kwargs: + uid = kwargs['uid'] + else: + uid = 'uid' + #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-"%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + + plt.show() + + bad_frame_list_ = np.where( ( np.array(imgsum) > bad_pixel_threshold ) | ( np.array(imgsum) <= bad_pixel_low_threshold) )[0] + FD.beg + + if bad_frame_list is not None: + bad_frame_list = np.unique( np.concatenate([bad_frame_list, bad_frame_list_]) ) + else: + bad_frame_list = bad_frame_list_ + + if len(bad_frame_list): + print ('Bad frame list length is: %s' %len(bad_frame_list)) + else: + print ('No bad frames are involved.') + return imgsum,bad_frame_list + + + + diff --git a/pyCHX/backups/chx_correlationc_05012024.py b/pyCHX/backups/chx_correlationc_05012024.py new file mode 100644 index 0000000..af0dbd4 --- /dev/null +++ b/pyCHX/backups/chx_correlationc_05012024.py @@ -0,0 +1,1676 @@ +""" +June 10, Developed by Y.G.@CHX with the assistance of Mark Sutton +yuzhang@bnl.gov +This module is for computation of time correlation by using compressing algorithm +""" + + +from __future__ import absolute_import, division, print_function + +from skbeam.core.utils import multi_tau_lags +from skbeam.core.roi import extract_label_indices +from collections import namedtuple +import numpy as np +import skbeam.core.roi as roi + +import logging +logger = logging.getLogger(__name__) +from tqdm import tqdm + + +def _one_time_process(buf, G, past_intensity_norm, future_intensity_norm, + label_array, num_bufs, num_pixels, img_per_level, + level, buf_no, norm, lev_len): + """Reference implementation of the inner loop of multi-tau one time + correlation + This helper function calculates G, past_intensity_norm and + future_intensity_norm at each level, symmetric normalization is used. + .. warning :: This modifies inputs in place. + Parameters + ---------- + buf : array + image data array to use for correlation + G : array + matrix of auto-correlation function without normalizations + past_intensity_norm : array + matrix of past intensity normalizations + future_intensity_norm : array + matrix of future intensity normalizations + label_array : array + labeled array where all nonzero values are ROIs + num_bufs : int, even + number of buffers(channels) + num_pixels : array + number of pixels in certain ROI's + ROI's, dimensions are : [number of ROI's]X1 + img_per_level : array + to track how many images processed in each level + level : int + the current multi-tau level + buf_no : int + the current buffer number + norm : dict + to track bad images + lev_len : array + length of each level + Notes + ----- + .. math:: + G = + .. math:: + past_intensity_norm = + .. math:: + future_intensity_norm = + """ + img_per_level[level] += 1 + # in multi-tau correlation, the subsequent levels have half as many + # buffers as the first + i_min = num_bufs // 2 if level else 0 + #maxqind=G.shape[1] + for i in range(i_min, min(img_per_level[level], num_bufs)): + # compute the index into the autocorrelation matrix + t_index = int( level * num_bufs / 2 + i ) + delay_no = (buf_no - i) % num_bufs + # get the images for correlating + past_img = buf[level, delay_no] + future_img = buf[level, buf_no] + # find the normalization that can work both for bad_images + # and good_images + ind = int(t_index - lev_len[:level].sum()) + normalize = img_per_level[level] - i - norm[level+1][ind] + # take out the past_ing and future_img created using bad images + # (bad images are converted to np.nan array) + if np.isnan(past_img).any() or np.isnan(future_img).any(): + norm[level + 1][ind] += 1 + else: + for w, arr in zip([past_img*future_img, past_img, future_img], + [G, past_intensity_norm, future_intensity_norm]): + binned = np.bincount(label_array, weights=w)[1:] + #nonz = np.where(w)[0] + #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + arr[t_index] += ((binned / num_pixels - + arr[t_index]) / normalize) + return None # modifies arguments in place! + + + +def _one_time_process_error(buf, G, past_intensity_norm, future_intensity_norm, + label_array, num_bufs, num_pixels, img_per_level, + level, buf_no, norm, lev_len, + G_err, past_intensity_norm_err, future_intensity_norm_err ): + """Reference implementation of the inner loop of multi-tau one time + correlation with the calculation of errorbar (statistical error due to multipixel measurements ) + The statistical error: var( g2(Q) ) = sum( [g2(Qi)- g2(Q)]^2 )/N(N-1), Lumma, RSI, 2000 + This helper function calculates G, past_intensity_norm and + future_intensity_norm at each level, symmetric normalization is used. + .. warning :: This modifies inputs in place. + Parameters + ---------- + buf : array + image data array to use for correlation + G : array + matrix of auto-correlation function without normalizations + past_intensity_norm : array + matrix of past intensity normalizations + future_intensity_norm : array + matrix of future intensity normalizations + label_array : array + labeled array where all nonzero values are ROIs + num_bufs : int, even + number of buffers(channels) + num_pixels : array + number of pixels in certain ROI's + ROI's, dimensions are : [number of ROI's]X1 + img_per_level : array + to track how many images processed in each level + level : int + the current multi-tau level + buf_no : int + the current buffer number + norm : dict + to track bad images + lev_len : array + length of each level + Notes + ----- + .. math:: + G = + .. math:: + past_intensity_norm = + .. math:: + future_intensity_norm = + """ + img_per_level[level] += 1 + # in multi-tau correlation, the subsequent levels have half as many + # buffers as the first + i_min = num_bufs // 2 if level else 0 + #maxqind=G.shape[1] + for i in range(i_min, min(img_per_level[level], num_bufs)): + # compute the index into the autocorrelation matrix + t_index = int( level * num_bufs / 2 + i ) + delay_no = (buf_no - i) % num_bufs + # get the images for correlating + past_img = buf[level, delay_no] + future_img = buf[level, buf_no] + # find the normalization that can work both for bad_images + # and good_images + ind = int(t_index - lev_len[:level].sum()) + normalize = img_per_level[level] - i - norm[level+1][ind] + # take out the past_ing and future_img created using bad images + # (bad images are converted to np.nan array) + if np.isnan(past_img).any() or np.isnan(future_img).any(): + norm[level + 1][ind] += 1 + else: + + #for w, arr in zip([past_img*future_img, past_img, future_img], + # [G, past_intensity_norm, future_intensity_norm, + # ]): + # binned = np.bincount(label_array, weights=w)[1:] + # #nonz = np.where(w)[0] + # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + # arr[t_index] += ((binned / num_pixels - + # arr[t_index]) / normalize) + for w, arr in zip([past_img*future_img, past_img, future_img], + [ + G_err, past_intensity_norm_err, future_intensity_norm_err, + ]): + arr[t_index] += ( w - arr[t_index]) / normalize + return None # modifies arguments in place! + + +results = namedtuple( + 'correlation_results', + ['g2', 'lag_steps', 'internal_state'] +) + +_internal_state = namedtuple( + 'correlation_state', + ['buf', + 'G', + 'past_intensity', + 'future_intensity', + 'img_per_level', + 'label_array', + 'track_level', + 'cur', + 'pixel_list', + 'num_pixels', + 'lag_steps', + 'norm', + 'lev_len'] +) + +_internal_state_err = namedtuple( + 'correlation_state', + ['buf', + 'G', + 'past_intensity', + 'future_intensity', + 'img_per_level', + 'label_array', + 'track_level', + 'cur', + 'pixel_list', + 'num_pixels', + 'lag_steps', + 'norm', + 'lev_len', + 'G_all', + 'past_intensity_all', + 'future_intensity_all' + ] +) + + +_two_time_internal_state = namedtuple( + 'two_time_correlation_state', + ['buf', + 'img_per_level', + 'label_array', + 'track_level', + 'cur', + 'pixel_list', + 'num_pixels', + 'lag_steps', + 'g2', + 'count_level', + 'current_img_time', + 'time_ind', + 'norm', + 'lev_len'] +) + + +def _validate_and_transform_inputs(num_bufs, num_levels, labels): + """ + This is a helper function to validate inputs and create initial state + inputs for both one time and two time correlation + Parameters + ---------- + num_bufs : int + num_levels : int + labels : array + labeled array of the same shape as the image stack; + each ROI is represented by a distinct label (i.e., integer) + Returns + ------- + label_array : array + labels of the required region of interests(ROI's) + pixel_list : array + 1D array of indices into the raveled image for all + foreground pixels (labeled nonzero) + e.g., [5, 6, 7, 8, 14, 15, 21, 22] + num_rois : int + number of region of interests (ROI) + num_pixels : array + number of pixels in each ROI + lag_steps : array + the times at which the correlation was computed + buf : array + image data for correlation + img_per_level : array + to track how many images processed in each level + track_level : array + to track processing each level + cur : array + to increment the buffer + norm : dict + to track bad images + lev_len : array + length of each levels + """ + if num_bufs % 2 != 0: + raise ValueError("There must be an even number of `num_bufs`. You " + "provided %s" % num_bufs) + label_array, pixel_list = extract_label_indices(labels) + + # map the indices onto a sequential list of integers starting at 1 + label_mapping = {label: n+1 + for n, label in enumerate(np.unique(label_array))} + # remap the label array to go from 1 -> max(_labels) + for label, n in label_mapping.items(): + label_array[label_array == label] = n + + # number of ROI's + num_rois = len(label_mapping) + + # stash the number of pixels in the mask + num_pixels = np.bincount(label_array)[1:] + + # Convert from num_levels, num_bufs to lag frames. + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + + # these norm and lev_len will help to find the one time correlation + # normalization norm will updated when there is a bad image + norm = {key: [0] * len(dict_lag[key]) for key in (dict_lag.keys())} + lev_len = np.array([len(dict_lag[i]) for i in (dict_lag.keys())]) + + # Ring buffer, a buffer with periodic boundary conditions. + # Images must be keep for up to maximum delay in buf. + buf = np.zeros((num_levels, num_bufs, len(pixel_list)), + dtype=np.float64) + # to track how many images processed in each level + img_per_level = np.zeros(num_levels, dtype=np.int64) + # to track which levels have already been processed + track_level = np.zeros(num_levels, dtype=bool) + # to increment buffer + cur = np.ones(num_levels, dtype=np.int64) + + return (label_array, pixel_list, num_rois, num_pixels, + lag_steps, buf, img_per_level, track_level, cur, + norm, lev_len) + +def _init_state_one_time(num_levels, num_bufs, labels, cal_error = False): + """Initialize a stateful namedtuple for the generator-based multi-tau + for one time correlation + Parameters + ---------- + num_levels : int + num_bufs : int + labels : array + Two dimensional labeled array that contains ROI information + Returns + ------- + internal_state : namedtuple + The namedtuple that contains all the state information that + `lazy_one_time` requires so that it can be used to pick up + processing after it was interrupted + """ + (label_array, pixel_list, num_rois, num_pixels, lag_steps, buf, + img_per_level, track_level, cur, norm, + lev_len) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + + # G holds the un normalized auto- correlation result. We + # accumulate computations into G as the algorithm proceeds. + + G = np.zeros(( int( (num_levels + 1) * num_bufs / 2), num_rois), + dtype=np.float64) + + # matrix for normalizing G into g2 + past_intensity = np.zeros_like(G) + # matrix for normalizing G into g2 + future_intensity = np.zeros_like(G) + if cal_error: + G_all = np.zeros(( int( (num_levels + 1) * num_bufs / 2), len(pixel_list)), + dtype=np.float64) + + # matrix for normalizing G into g2 + past_intensity_all = np.zeros_like(G_all) + # matrix for normalizing G into g2 + future_intensity_all = np.zeros_like(G_all) + return _internal_state_err( + buf, + G, + past_intensity, + future_intensity, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + norm, + lev_len, + G_all, + past_intensity_all, + future_intensity_all + ) + else: + return _internal_state( + buf, + G, + past_intensity, + future_intensity, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + norm, + lev_len, + ) + + +def fill_pixel( p, v, pixelist): + fra_pix = np.zeros_like( pixelist ) + fra_pix[ np.in1d( pixelist,p ) ] = v[np.in1d( p, pixelist )] + return fra_pix + + + + + +def lazy_one_time(FD, num_levels, num_bufs, labels, + internal_state=None, bad_frame_list=None, imgsum=None, norm = None, cal_error=False ): + + """Generator implementation of 1-time multi-tau correlation + If you do not want multi-tau correlation, set num_levels to 1 and + num_bufs to the number of images you wish to correlate +The number of bins (of size 1) is one larger than the largest value in +`x`. If `minlength` is specified, there will be at least this number +of bins in the output array (though it will be longer if necessary, +depending on the contents of `x`). +Each bin gives the number of occurrences of its index value in `x`. +If `weights` is specified the input array is weighted by it, i.e. if a +value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead +of ``out[n] += 1``. + + Jan 2, 2018 YG. Add error bar calculation + + Parameters + ---------- + image_iterable : FD, a compressed eiger file by Multifile class + num_levels : int + how many generations of downsampling to perform, i.e., the depth of + the binomial tree of averaged frames + num_bufs : int, must be even + maximum lag step to compute in each generation of downsampling + labels : array + Labeled array of the same shape as the image stack. + Each ROI is represented by sequential integers starting at one. For + example, if you have four ROIs, they must be labeled 1, 2, 3, + 4. Background is labeled as 0 + internal_state : namedtuple, optional + internal_state is a bucket for all of the internal state of the + generator. It is part of the `results` object that is yielded from + this generator + + For the sake of normalization: + + imgsum: a list with the same length as FD, sum of each frame + qp, iq: the circular average radius (in pixel) and intensity + center: beam center + + Yields + ------ + +Returns +------- + + A `results` object is yielded after every image has been processed. + This `reults` object contains, in this order: + - `g2`: the normalized correlation + shape is (len(lag_steps), num_rois) + - `lag_steps`: the times at which the correlation was computed + - `_internal_state`: all of the internal state. Can be passed back in + to `lazy_one_time` as the `internal_state` parameter + Notes + ----- + The normalized intensity-intensity time-autocorrelation function + is defined as + .. math:: + g_2(q, t') = \\frac{ }{^2} + t' > 0 + Here, ``I(q, t)`` refers to the scattering strength at the momentum + transfer vector ``q`` in reciprocal space at time ``t``, and the brackets + ``<...>`` refer to averages over time ``t``. The quantity ``t'`` denotes + the delay time + This implementation is based on published work. [1]_ + References + ---------- + .. [1] D. Lumma, L. B. Lurio, S. G. J. Mochrie and M. Sutton, + "Area detector based photon correlation in the regime of + short data batches: Data reduction for dynamic x-ray + scattering," Rev. Sci. Instrum., vol 71, p 3274-3289, 2000. + """ + + if internal_state is None: + internal_state = _init_state_one_time(num_levels, num_bufs, labels, cal_error) + # create a shorthand reference to the results and state named tuple + s = internal_state + + qind, pixelist = roi.extract_label_indices( labels ) + # iterate over the images to compute multi-tau correlation + + fra_pix = np.zeros_like( pixelist, dtype=np.float64) + + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + + if bad_frame_list is None: + bad_frame_list=[] + for i in tqdm(range( FD.beg , FD.end )): + if i in bad_frame_list: + fra_pix[:]= np.nan + else: + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + + if imgsum is None: + if norm is None: + fra_pix[ pxlist] = v[w] + else: + S = norm.shape + if len(S)>1: + fra_pix[ pxlist] = v[w]/ norm[i,pxlist] #-1.0 + else: + fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0 + else: + if norm is None: + fra_pix[ pxlist] = v[w] / imgsum[i] + else: + S = norm.shape + if len(S)>1: + fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[i,pxlist] + else: + fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist] + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:]=0 + + #print( i, len(p), len(w), len( pixelist)) + + #print ('i= %s init fra_pix'%i ) + buf_no = s.cur[0] - 1 + # Compute the correlations between the first level + # (undownsampled) frames. This modifies G, + # past_intensity, future_intensity, + # and img_per_level in place! + if cal_error: + _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len, + s.G_all, s.past_intensity_all, s.future_intensity_all) + else: + _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len) + + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = True + processing = False + else: + prev = (1 + (s.cur[level - 1] - 2) % num_bufs) + s.cur[level] = ( + 1 + s.cur[level] % num_bufs) + + s.buf[level, s.cur[level] - 1] = (( + s.buf[level - 1, prev - 1] + + s.buf[level - 1, s.cur[level - 1] - 1]) / 2) + + # make the track_level zero once that level is processed + s.track_level[level] = False + + # call processing_func for each multi-tau level greater + # than one. This is modifying things in place. See comment + # on previous call above. + buf_no = s.cur[level] - 1 + if cal_error: + _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len, + s.G_all, s.past_intensity_all, s.future_intensity_all) + else: + _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len) + + level += 1 + + # Checking whether there is next level for processing + processing = level < num_levels + + # If any past intensities are zero, then g2 cannot be normalized at + # those levels. This if/else code block is basically preventing + # divide-by-zero errors. + if not cal_error: + if len(np.where(s.past_intensity == 0)[0]) != 0: + g_max1 = np.where(s.past_intensity == 0)[0][0] + else: + g_max1 = s.past_intensity.shape[0] + if len(np.where(s.future_intensity == 0)[0]) != 0: + g_max2 = np.where(s.future_intensity == 0)[0][0] + else: + g_max2 = s.future_intensity.shape[0] + g_max = min( g_max1, g_max2) + g2 = (s.G[:g_max] / (s.past_intensity[:g_max] * + s.future_intensity[:g_max])) + yield results(g2, s.lag_steps[:g_max], s) + else: + yield results(None,s.lag_steps, s) + + + +def lazy_one_time_debug(FD, num_levels, num_bufs, labels, + internal_state=None, bad_frame_list=None, imgsum=None, norm = None, cal_error=False ): + if internal_state is None: + internal_state = _init_state_one_time(num_levels, num_bufs, labels, cal_error) + # create a shorthand reference to the results and state named tuple + s = internal_state + qind, pixelist = roi.extract_label_indices( labels ) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + if bad_frame_list is None: + bad_frame_list=[] + for i in range( FD.beg , FD.end ): + print(i) + if i in bad_frame_list: + fra_pix[:]= np.nan + else: + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + if imgsum is None: + if norm is None: + fra_pix[ pxlist] = v[w] + else: + fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0 + else: + if norm is None: + fra_pix[ pxlist] = v[w] / imgsum[i] + else: + fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist] + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:]=0 + #print( i, len(p), len(w), len( pixelist)) + #print ('i= %s init fra_pix'%i ) + buf_no = s.cur[0] - 1 + # Compute the correlations between the first level + # (undownsampled) frames. This modifies G, + # past_intensity, future_intensity, + # and img_per_level in place! + if cal_error: + _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len, + s.G_all, s.past_intensity_all, s.future_intensity_all) + else: + _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len) + + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = True + processing = False + else: + prev = (1 + (s.cur[level - 1] - 2) % num_bufs) + s.cur[level] = ( + 1 + s.cur[level] % num_bufs) + + s.buf[level, s.cur[level] - 1] = (( + s.buf[level - 1, prev - 1] + + s.buf[level - 1, s.cur[level - 1] - 1]) / 2) + # make the track_level zero once that level is processed + s.track_level[level] = False + # call processing_func for each multi-tau level greater + # than one. This is modifying things in place. See comment + # on previous call above. + buf_no = s.cur[level] - 1 + if cal_error: + _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len, + s.G_all, s.past_intensity_all, s.future_intensity_all) + else: + _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len) + + level += 1 + # Checking whether there is next level for processing + processing = level < num_levels + # If any past intensities are zero, then g2 cannot be normalized at + # those levels. This if/else code block is basically preventing + # divide-by-zero errors. + if not cal_error: + if len(np.where(s.past_intensity == 0)[0]) != 0: + g_max1 = np.where(s.past_intensity == 0)[0][0] + else: + g_max1 = s.past_intensity.shape[0] + if len(np.where(s.future_intensity == 0)[0]) != 0: + g_max2 = np.where(s.future_intensity == 0)[0][0] + else: + g_max2 = s.future_intensity.shape[0] + g_max = min( g_max1, g_max2) + g2 = (s.G[:g_max] / (s.past_intensity[:g_max] * + s.future_intensity[:g_max])) + yield results(g2, s.lag_steps[:g_max], s) + #yield( i ) + + else: + yield results(None,s.lag_steps, s) + + + +def auto_corr_scat_factor(lags, beta, relaxation_rate, baseline=1): + """ + This model will provide normalized intensity-intensity time + correlation data to be minimized. + Parameters + ---------- + lags : array + delay time + beta : float + optical contrast (speckle contrast), a sample-independent + beamline parameter + relaxation_rate : float + relaxation time associated with the samples dynamics. + baseline : float, optional + baseline of one time correlation + equal to one for ergodic samples + Returns + ------- + g2 : array + normalized intensity-intensity time autocorreltion + Notes : + ------- + The intensity-intensity autocorrelation g2 is connected to the intermediate + scattering factor(ISF) g1 + .. math:: + g_2(q, \\tau) = \\beta_1[g_1(q, \\tau)]^{2} + g_\infty + For a system undergoing diffusive dynamics, + .. math:: + g_1(q, \\tau) = e^{-\gamma(q) \\tau} + .. math:: + g_2(q, \\tau) = \\beta_1 e^{-2\gamma(q) \\tau} + g_\infty + These implementation are based on published work. [1]_ + References + ---------- + .. [1] L. Li, P. Kwasniewski, D. Orsi, L. Wiegart, L. Cristofolini, + C. Caronna and A. Fluerasu, " Photon statistics and speckle + visibility spectroscopy with partially coherent X-rays," + J. Synchrotron Rad. vol 21, p 1288-1295, 2014 + """ + return beta * np.exp(-2 * relaxation_rate * lags) + baseline + + +def multi_tau_auto_corr(num_levels, num_bufs, labels, images, bad_frame_list=None, + imgsum=None, norm=None,cal_error=False ): + """Wraps generator implementation of multi-tau + Original code(in Yorick) for multi tau auto correlation + author: Mark Sutton + For parameter description, please reference the docstring for + lazy_one_time. Note that there is an API difference between this function + and `lazy_one_time`. The `images` arugment is at the end of this function + signature here for backwards compatibility, but is the first argument in + the `lazy_one_time()` function. The semantics of the variables remain + unchanged. + """ + gen = lazy_one_time(images, num_levels, num_bufs, labels,bad_frame_list=bad_frame_list, imgsum=imgsum, + norm=norm,cal_error=cal_error ) + for result in gen: + pass + if cal_error: + return result.g2, result.lag_steps, result.internal_state + else: + return result.g2, result.lag_steps + +def multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list =None, + imgsum= None, norm = None ): + """Wraps generator implementation of multi-tau two time correlation + This function computes two-time correlation + Original code : author: Yugang Zhang + Returns + ------- + results : namedtuple + For parameter definition, see the docstring for the `lazy_two_time()` + function in this module + """ + gen = lazy_two_time(FD, num_lev, num_buf, ring_mask, + two_time_internal_state= None, + bad_frame_list=bad_frame_list, imgsum=imgsum, norm = norm ) + for result in gen: + pass + return two_time_state_to_results(result) + + +def lazy_two_time(FD, num_levels, num_bufs, labels, + two_time_internal_state=None, bad_frame_list=None, imgsum= None, norm = None ): + +#def lazy_two_time(labels, images, num_frames, num_bufs, num_levels=1, +# two_time_internal_state=None): + """ Generator implementation of two-time correlation + If you do not want multi-tau correlation, set num_levels to 1 and + num_bufs to the number of images you wish to correlate + Multi-tau correlation uses a scheme to achieve long-time correlations + inexpensively by downsampling the data, iteratively combining successive + frames. + The longest lag time computed is num_levels * num_bufs. + ** see comments on multi_tau_auto_corr + Parameters + ---------- + FD: the handler of compressed data + num_levels : int, optional + how many generations of downsampling to perform, i.e., + the depth of the binomial tree of averaged frames + default is one + num_bufs : int, must be even + maximum lag step to compute in each generation of + downsampling + labels : array + labeled array of the same shape as the image stack; + each ROI is represented by a distinct label (i.e., integer) + two_time_internal_state: None + + + Yields + ------ + namedtuple + A ``results`` object is yielded after every image has been processed. + This `reults` object contains, in this order: + - ``g2``: the normalized correlation + shape is (num_rois, len(lag_steps), len(lag_steps)) + - ``lag_steps``: the times at which the correlation was computed + - ``_internal_state``: all of the internal state. Can be passed back in + to ``lazy_one_time`` as the ``internal_state`` parameter + Notes + ----- + The two-time correlation function is defined as + .. math:: + C(q,t_1,t_2) = \\frac{}{} + Here, the ensemble averages are performed over many pixels of detector, + all having the same ``q`` value. The average time or age is equal to + ``(t1+t2)/2``, measured by the distance along the ``t1 = t2`` diagonal. + The time difference ``t = |t1 - t2|``, with is distance from the + ``t1 = t2`` diagonal in the perpendicular direction. + In the equilibrium system, the two-time correlation functions depend only + on the time difference ``t``, and hence the two-time correlation contour + lines are parallel. + References + ---------- + .. [1] + A. Fluerasu, A. Moussaid, A. Mandsen and A. Schofield, "Slow dynamics + and aging in collodial gels studied by x-ray photon correlation + spectroscopy," Phys. Rev. E., vol 76, p 010401(1-4), 2007. + """ + + num_frames = FD.end - FD.beg + if two_time_internal_state is None: + two_time_internal_state = _init_state_two_time(num_levels, num_bufs,labels, num_frames) + # create a shorthand reference to the results and state named tuple + s = two_time_internal_state + qind, pixelist = roi.extract_label_indices( labels ) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + if bad_frame_list is None: + bad_frame_list=[] + + for i in tqdm(range( FD.beg , FD.end )): + if i in bad_frame_list: + fra_pix[:]= np.nan + else: + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + if imgsum is None: + if norm is None: + fra_pix[ pxlist] = v[w] + else: + fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0 + else: + if norm is None: + fra_pix[ pxlist] = v[w] / imgsum[i] + else: + fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist] + + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + s.count_level[0] = 1 + s.count_level[0] + # get the current image time + s = s._replace(current_img_time=(s.current_img_time + 1)) + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:]=0 + _two_time_process(s.buf, s.g2, s.label_array, num_bufs, + s.num_pixels, s.img_per_level, s.lag_steps, + s.current_img_time, + level=0, buf_no=s.cur[0] - 1) + # time frame for each level + s.time_ind[0].append(s.current_img_time) + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + # Compute the correlations for all higher levels. + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = 1 + processing = False + else: + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs + s.count_level[level] = 1 + s.count_level[level] + s.buf[level, s.cur[level] - 1] = ( s.buf[level - 1, prev - 1] + + s.buf[level - 1, s.cur[level - 1] - 1] )/2 + + t1_idx = (s.count_level[level] - 1) * 2 + + current_img_time = ((s.time_ind[level - 1])[t1_idx] + + (s.time_ind[level - 1])[t1_idx + 1])/2. + # time frame for each level + s.time_ind[level].append(current_img_time) + # make the track_level zero once that level is processed + s.track_level[level] = 0 + # call the _two_time_process function for each multi-tau level + # for multi-tau levels greater than one + # Again, this is modifying things in place. See comment + # on previous call above. + _two_time_process(s.buf, s.g2, s.label_array, num_bufs, + s.num_pixels, s.img_per_level, s.lag_steps, + current_img_time, + level=level, buf_no=s.cur[level]-1) + level += 1 + + # Checking whether there is next level for processing + processing = level < num_levels + #print (s.g2[1,:,1] ) + yield s + + +def two_time_state_to_results(state): + """Convert the internal state of the two time generator into usable results + Parameters + ---------- + state : namedtuple + The internal state that is yielded from `lazy_two_time` + Returns + ------- + results : namedtuple + A results object that contains the two time correlation results + and the lag steps + """ + for q in range(np.max(state.label_array)): + x0 = (state.g2)[q, :, :] + (state.g2)[q, :, :] = (np.tril(x0) + np.tril(x0).T - + np.diag(np.diag(x0))) + return results(state.g2, state.lag_steps, state) + + + +def _two_time_process(buf, g2, label_array, num_bufs, num_pixels, + img_per_level, lag_steps, current_img_time, + level, buf_no): + """ + Parameters + ---------- + buf: array + image data array to use for two time correlation + g2: array + two time correlation matrix + shape (number of labels(ROI), number of frames, number of frames) + label_array: array + Elements not inside any ROI are zero; elements inside each + ROI are 1, 2, 3, etc. corresponding to the order they are specified + in edges and segments + num_bufs: int, even + number of buffers(channels) + num_pixels : array + number of pixels in certain ROI's + ROI's, dimensions are len(np.unique(label_array)) + img_per_level: array + to track how many images processed in each level + lag_steps : array + delay or lag steps for the multiple tau analysis + shape num_levels + current_img_time : int + the current image number + level : int + the current multi-tau level + buf_no : int + the current buffer number + """ + img_per_level[level] += 1 + + # in multi-tau correlation other than first level all other levels + # have to do the half of the correlation + if level == 0: + i_min = 0 + else: + i_min = num_bufs//2 + + for i in range(i_min, min(img_per_level[level], num_bufs)): + t_index = level*num_bufs/2 + i + delay_no = (buf_no - i) % num_bufs + past_img = buf[level, delay_no] + future_img = buf[level, buf_no] + + #print( np.sum( past_img ), np.sum( future_img )) + + # get the matrix of correlation function without normalizations + tmp_binned = (np.bincount(label_array, + weights=past_img*future_img)[1:]) + # get the matrix of past intensity normalizations + pi_binned = (np.bincount(label_array, + weights=past_img)[1:]) + + # get the matrix of future intensity normalizations + fi_binned = (np.bincount(label_array, + weights=future_img)[1:]) + + tind1 = (current_img_time - 1) + tind2 = (current_img_time - lag_steps[int(t_index)] - 1) + #print( current_img_time ) + + if not isinstance(current_img_time, int): + nshift = 2**(level-1) + for i in range(-nshift+1, nshift+1): + g2[:, int(tind1+i), + int(tind2+i)] = (tmp_binned/(pi_binned * + fi_binned))*num_pixels + else: + g2[:, int(tind1), int(tind2)] = tmp_binned/(pi_binned * fi_binned)*num_pixels + + #print( num_pixels ) + + +def _init_state_two_time(num_levels, num_bufs, labels, num_frames): + """Initialize a stateful namedtuple for two time correlation + Parameters + ---------- + num_levels : int + num_bufs : int + labels : array + Two dimensional labeled array that contains ROI information + num_frames : int + number of images to use + default is number of images + Returns + ------- + internal_state : namedtuple + The namedtuple that contains all the state information that + `lazy_two_time` requires so that it can be used to pick up processing + after it was interrupted + """ + (label_array, pixel_list, num_rois, num_pixels, lag_steps, + buf, img_per_level, track_level, cur, norm, + lev_len) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + + # to count images in each level + count_level = np.zeros(num_levels, dtype=np.int64) + + # current image time + current_img_time = 0 + + # generate a time frame for each level + time_ind = {key: [] for key in range(num_levels)} + + # two time correlation results (array) + g2 = np.zeros((num_rois, num_frames, num_frames), dtype=np.float64) + + return _two_time_internal_state( + buf, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + g2, + count_level, + current_img_time, + time_ind, + norm, + lev_len, + ) + +def one_time_from_two_time(two_time_corr): + """ + This will provide the one-time correlation data from two-time + correlation data. + Parameters + ---------- + two_time_corr : array + matrix of two time correlation + shape (number of labels(ROI's), number of frames, number of frames) + Returns + ------- + one_time_corr : array + matrix of one time correlation + shape (number of labels(ROI's), number of frames) + """ + + one_time_corr = np.zeros((two_time_corr.shape[0], two_time_corr.shape[2])) + for g in two_time_corr: + for j in range(two_time_corr.shape[2]): + one_time_corr[:, j] = np.trace(g, offset=j)/two_time_corr.shape[2] + return one_time_corr + + +def cal_c12c( FD, ring_mask, + bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None, imgsum=None, norm=None ): + '''calculation two_time correlation by using a multi-tau algorithm''' + + #noframes = FD.end - good_start # number of frames, not "no frames" + + FD.beg = max(FD.beg, good_start) + noframes = FD.end - FD.beg # number of frames, not "no frames" + #num_buf = 8 # number of buffers + + if num_lev is None: + num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list)!=0: + print ('Bad frame involved and will be precessed!') + noframes -= len(np.where(np.in1d( bad_frame_list, + range(good_start, FD.end)))[0]) + print ('%s frames will be processed...'%(noframes)) + + c12, lag_steps, state = multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list, + imgsum=imgsum, norm = norm ) + + print( 'Two Time Calculation is DONE!') + m, n, n = c12.shape + #print( m,n,n) + c12_ = np.zeros( [n,n,m] ) + for i in range( m): + c12_[:,:,i ] = c12[i] + return c12_, lag_steps + + + +def cal_g2c( FD, ring_mask, + bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None, imgsum=None, norm=None,cal_error=False ): + '''calculation g2 by using a multi-tau algorithm''' + + #noframes = FD.end - good_start # number of frames, not "no frames" + + FD.beg = max(FD.beg, good_start) + noframes = FD.end - FD.beg # number of frames, not "no frames" + #num_buf = 8 # number of buffers + + if num_lev is None: + num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list)!=0: + print ('Bad frame involved and will be precessed!') + noframes -= len(np.where(np.in1d( bad_frame_list, + range(good_start, FD.end)))[0]) + + print ('%s frames will be processed...'%(noframes)) + if cal_error: + g2, lag_steps, s = multi_tau_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list, + imgsum=imgsum, norm = norm,cal_error=cal_error ) + + g2 = np.zeros_like( s.G ) + g2_err = np.zeros_like(g2) + qind, pixelist = extract_label_indices(ring_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + Ntau, Nq = s.G.shape + g_max = 1e30 + for qi in range(1,1+Nq): + pixelist_qi = np.where( qind == qi)[0] + s_Gall_qi = s.G_all[:,pixelist_qi] + s_Pall_qi = s.past_intensity_all[:,pixelist_qi] + s_Fall_qi = s.future_intensity_all[:,pixelist_qi] + avgGi = (np.average( s_Gall_qi, axis=1)) + devGi = (np.std( s_Gall_qi, axis=1)) + avgPi = (np.average( s_Pall_qi, axis=1)) + devPi = (np.std( s_Pall_qi, axis=1)) + avgFi = (np.average( s_Fall_qi, axis=1)) + devFi = (np.std( s_Fall_qi, axis=1)) + + if len(np.where(avgPi == 0)[0]) != 0: + g_max1 = np.where(avgPi == 0)[0][0] + else: + g_max1 = avgPi.shape[0] + if len(np.where(avgFi == 0)[0]) != 0: + g_max2 = np.where(avgFi == 0)[0][0] + else: + g_max2 = avgFi.shape[0] + g_max = min( g_max1, g_max2) + #print(g_max) + #g2_ = (s.G[:g_max] / (s.past_intensity[:g_max] * + # s.future_intensity[:g_max])) + g2[:g_max,qi-1] = avgGi[:g_max]/( avgPi[:g_max] * avgFi[:g_max] ) + g2_err[:g_max,qi-1] = np.sqrt( + ( 1/ ( avgFi[:g_max] * avgPi[:g_max] ))**2 * devGi[:g_max] ** 2 + + ( avgGi[:g_max]/ ( avgFi[:g_max]**2 * avgPi[:g_max] ))**2 * devFi[:g_max] ** 2 + + ( avgGi[:g_max]/ ( avgFi[:g_max] * avgPi[:g_max]**2 ))**2 * devPi[:g_max] ** 2 + ) + + print( 'G2 with error bar calculation DONE!') + return g2[:g_max,:], lag_steps[:g_max], g2_err[:g_max,:]/np.sqrt(nopr), s + else: + g2, lag_steps = multi_tau_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list, + imgsum=imgsum, norm = norm,cal_error=cal_error ) + + print( 'G2 calculation DONE!') + return g2, lag_steps + + + +def get_pixelist_interp_iq( qp, iq, ring_mask, center): + + qind, pixelist = roi.extract_label_indices( ring_mask ) + #pixely = pixelist%FD.md['nrows'] -center[1] + #pixelx = pixelist//FD.md['nrows'] - center[0] + + pixely = pixelist%ring_mask.shape[1] -center[1] + pixelx = pixelist//ring_mask.shape[1] - center[0] + + r= np.hypot(pixelx, pixely) #leave as float. + #r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 + return np.interp( r, qp, iq ) + + +class Get_Pixel_Arrayc_todo(object): + ''' + a class to get intested pixels from a images sequence, + load ROI of all images into memory + get_data: to get a 2-D array, shape as (len(images), len(pixellist)) + + One example: + data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() + ''' + + def __init__(self, FD, pixelist,beg=None, end=None, norm=None, imgsum = None, + norm_inten = None, qind=None): + ''' + indexable: a images sequences + pixelist: 1-D array, interest pixel list + norm: each q-ROI of each frame is normalized by the corresponding q-ROI of time averaged intensity + imgsum: each q-ROI of each frame is normalized by the total intensity of the corresponding frame, should have the same time sequences as FD, e.g., imgsum[10] corresponding to FD[10] + norm_inten: if True, each q-ROI of each frame is normlized by total intensity of the correponding q-ROI of the corresponding frame + qind: the index of each ROI in one frame, i.e., q + if norm_inten is True: qind has to be given + + ''' + if beg is None: + self.beg = FD.beg + if end is None: + self.end = FD.end + #if self.beg ==0: + # self.length = self.end - self.beg + #else: + # self.length = self.end - self.beg + 1 + + self.length = self.end - self.beg + + self.FD = FD + self.pixelist = pixelist + self.norm = norm + self.imgsum = imgsum + self.norm_inten= norm_inten + self.qind = qind + if self.norm_inten is not None: + if self.qind is None: + print('Please give qind.') + + def get_data(self ): + ''' + To get intested pixels array + Return: 2-D array, shape as (len(images), len(pixellist)) + ''' + + data_array = np.zeros([ self.length,len(self.pixelist)], dtype=np.float64) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 + #fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( self.FD.md['ncols'] * self.FD.md['nrows'] , dtype=np.int32 ) + timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) + + if self.norm_inten is not None: + #Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) + Mean_Int_Qind = np.ones( len( self.qind), dtype = np.float64) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 + noqs = len(np.unique( self.qind )) + nopr = np.bincount(self.qind-1) + noprs = np.concatenate( [ np.array([0]), np.cumsum(nopr) ] ) + qind_ = np.zeros_like( self.qind ) + for j in range(noqs): + qind_[ noprs[j]: noprs[j+1] ] = np.where(self.qind==j+1)[0] + + n=0 + for i in tqdm(range( self.beg , self.end )): + (p,v) = self.FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + #np.bincount( qind[pxlist], weight= + + + if self.mean_int_sets is not None:#for each frame will normalize each ROI by it's averaged value + for j in range(noqs): + #if i ==100: + # if j==0: + # print( self.mean_int_sets[i][j] ) + # print( qind_[ noprs[j]: noprs[j+1] ] ) + Mean_Int_Qind[ qind_[ noprs[j]: noprs[j+1] ] ] = self.mean_int_sets[i][j] + norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] #self.mean_int_set or Mean_Int_Qind[pxlist] + + #if i==100: + # print( i, Mean_Int_Qind[ self.qind== 11 ]) + + #print('Do norm_mean_int here') + #if i ==10: + # print( norm_Mean_Int_Qind ) + else: + norm_Mean_Int_Qind = 1.0 + if self.imgsum is not None: + norm_imgsum = self.imgsum[i] + else: + norm_imgsum = 1.0 + if self.norm is not None: + norm_avgimg_roi = self.norm[pxlist] + else: + norm_avgimg_roi = 1.0 + + norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi + #if i==100: + # print(norm_Mean_Int_Qind[:100]) + data_array[n][ pxlist] = v[w]/ norms + n +=1 + + return data_array + + + + + +class Get_Pixel_Arrayc(object): + ''' + a class to get intested pixels from a images sequence, + load ROI of all images into memory + get_data: to get a 2-D array, shape as (len(images), len(pixellist)) + + One example: + data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() + ''' + + def __init__(self, FD, pixelist,beg=None, end=None, norm=None, imgsum = None, + mean_int_sets = None, qind=None ): + ''' + indexable: a images sequences + pixelist: 1-D array, interest pixel list + norm: each q-ROI of each frame is normalized by the corresponding q-ROI of time averaged intensity + imgsum: each q-ROI of each frame is normalized by the total intensity of the corresponding frame, should have the same time sequences as FD, e.g., imgsum[10] corresponding to FD[10] + mean_int_sets: each q-ROI of each frame is normlized by total intensity of the correponding q-ROI of the corresponding frame + qind: the index of each ROI in one frame, i.e., q + if mean_int_sets is not None: qind has to be not None + + ''' + if beg is None: + self.beg = FD.beg + if end is None: + self.end = FD.end + #if self.beg ==0: + # self.length = self.end - self.beg + #else: + # self.length = self.end - self.beg + 1 + + self.length = self.end - self.beg + + self.FD = FD + self.pixelist = pixelist + self.norm = norm + self.imgsum = imgsum + self.mean_int_sets= mean_int_sets + self.qind = qind + if self.mean_int_sets is not None: + if self.qind is None: + print('Please give qind.') + + def get_data(self ): + ''' + To get intested pixels array + Return: 2-D array, shape as (len(images), len(pixellist)) + ''' + + data_array = np.zeros([ self.length,len(self.pixelist)], dtype=np.float64) + #fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( self.FD.md['ncols'] * self.FD.md['nrows'] , dtype=np.int32 ) + timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) + + if self.mean_int_sets is not None: + #Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) + Mean_Int_Qind = np.ones( len( self.qind), dtype = np.float64) + noqs = len(np.unique( self.qind )) + nopr = np.bincount(self.qind-1) + noprs = np.concatenate( [ np.array([0]), np.cumsum(nopr) ] ) + qind_ = np.zeros_like( self.qind ) + for j in range(noqs): + qind_[ noprs[j]: noprs[j+1] ] = np.where(self.qind==j+1)[0] + + n=0 + for i in tqdm(range( self.beg , self.end )): + (p,v) = self.FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + + if self.mean_int_sets is not None:#for normalization of each averaged ROI of each frame + for j in range(noqs): + #if i ==100: + # if j==0: + # print( self.mean_int_sets[i][j] ) + # print( qind_[ noprs[j]: noprs[j+1] ] ) + Mean_Int_Qind[ qind_[ noprs[j]: noprs[j+1] ] ] = self.mean_int_sets[i][j] + norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] #self.mean_int_set or Mean_Int_Qind[pxlist] + + #if i==100: + # print( i, Mean_Int_Qind[ self.qind== 11 ]) + + #print('Do norm_mean_int here') + #if i ==10: + # print( norm_Mean_Int_Qind ) + else: + norm_Mean_Int_Qind = 1.0 + if self.imgsum is not None: + norm_imgsum = self.imgsum[i] + else: + norm_imgsum = 1.0 + if self.norm is not None: + if len( (self.norm).shape )>1: + norm_avgimg_roi = self.norm[i][pxlist] + #print('here') + + else: + norm_avgimg_roi = self.norm[pxlist] + else: + norm_avgimg_roi = 1.0 + + norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi + #if i==100: + # print(norm_Mean_Int_Qind[:100]) + data_array[n][ pxlist] = v[w]/ norms + n +=1 + + return data_array + + +def auto_two_Arrayc( data_pixel, rois, index=None): + + ''' + Dec 16, 2015, Y.G.@CHX + a numpy operation method to get two-time correlation function + + Parameters: + data: images sequence, shape as [img[0], img[1], imgs_length] + rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs + Options: + + data_pixel: if not None, + 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + + + Return: + g12: a 3-D array, shape as ( imgs_length, imgs_length, q) + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + ''' + + qind, pixelist = roi.extract_label_indices( rois ) + noqs = len( np.unique(qind) ) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + noframes = data_pixel.shape[0] + + if index is None: + index = np.arange( 1, noqs + 1 ) + else: + try: + len(index) + index = np.array( index ) + except TypeError: + index = np.array( [index] ) + #print( index ) + qlist = np.arange( 1, noqs + 1 )[ index -1 ] + #print( qlist ) + try: + g12b = np.zeros( [noframes, noframes, len(qlist) ] ) + DO = True + except: + print("The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely") + '''TO be done here ''' + DO = False + + if DO: + i = 0 + for qi in tqdm(qlist ): + #print (qi-1) + pixelist_qi = np.where( qind == qi)[0] + #print (pixelist_qi.shape, data_pixel[qi].shape) + data_pixel_qi = data_pixel[:,pixelist_qi] + sum1 = (np.average( data_pixel_qi, axis=1)).reshape( 1, noframes ) + sum2 = sum1.T + #print( qi, qlist, ) + #print( g12b[:,:,qi -1 ] ) + g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2 / nopr[qi -1] + i +=1 + return g12b + +def auto_two_Arrayc_ExplicitNorm( data_pixel, rois, norm=None, index=None): + + ''' + Dec 16, 2015, Y.G.@CHX + a numpy operation method to get two-time correlation function by giving explict normalization + + Parameters: + data: images sequence, shape as [img[0], img[1], imgs_length] + rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs + norm: if not None, shoud be the shape as data_pixel, will normalize two time by this norm + if None, will return two time without normalization + + Options: + + data_pixel: if not None, + 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + + + Return: + g12: a 3-D array, shape as ( imgs_length, imgs_length, q) + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + ''' + + qind, pixelist = roi.extract_label_indices( rois ) + noqs = len( np.unique(qind) ) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + noframes = data_pixel.shape[0] + + if index is None: + index = np.arange( 1, noqs + 1 ) + else: + try: + len(index) + index = np.array( index ) + except TypeError: + index = np.array( [index] ) + #print( index ) + qlist = np.arange( 1, noqs + 1 )[ index -1 ] + #print( qlist ) + try: + g12b = np.zeros( [noframes, noframes, len(qlist) ] ) + DO = True + except: + print("The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely") + '''TO be done here ''' + DO = False + if DO: + i = 0 + for qi in tqdm(qlist ): + pixelist_qi = np.where( qind == qi)[0] + data_pixel_qi = data_pixel[:,pixelist_qi] + if norm is not None: + norm1 = norm[:,pixelist_qi] + sum1 = (np.average( norm1, axis=1)).reshape( 1, noframes ) + sum2 = sum1.T + else: + sum1=1 + sum2=1 + g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2/ nopr[qi -1] + i +=1 + return g12b + + +def two_time_norm( data_pixel, rois, index=None): + + ''' + Dec 16, 2015, Y.G.@CHX + a numpy operation method to get two-time correlation function + + Parameters: + data: images sequence, shape as [img[0], img[1], imgs_length] + rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs + + Options: + + data_pixel: if not None, + 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + + + Return: + g12: a 3-D array, shape as ( imgs_length, imgs_length, q) + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + ''' + + qind, pixelist = roi.extract_label_indices( rois ) + noqs = len( np.unique(qind) ) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + noframes = data_pixel.shape[0] + + if index is None: + index = np.arange( 1, noqs + 1 ) + else: + try: + len(index) + index = np.array( index ) + except TypeError: + index = np.array( [index] ) + #print( index ) + qlist = np.arange( 1, noqs + 1 )[ index -1 ] + #print( qlist ) + try: + norm = np.zeros( len(qlist) ) + DO = True + except: + print("The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely") + '''TO be done here ''' + DO = False + + if DO: + i = 0 + for qi in tqdm(qlist ): + #print (qi-1) + pixelist_qi = np.where( qind == qi)[0] + #print (pixelist_qi.shape, data_pixel[qi].shape) + data_pixel_qi = data_pixel[:,pixelist_qi] + sum1 = (np.average( data_pixel_qi, axis=1)).reshape( 1, noframes ) + norm[i] = np.average(sum1 ) + #sum2 = sum1.T + #print( qi, qlist, ) + #print( g12b[:,:,qi -1 ] ) + #g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2 / nopr[qi -1] + i +=1 + return norm + + + + +def check_normalization( frame_num, q_list, imgsa, data_pixel ): + '''check the ROI intensity before and after normalization + Input: + frame_num: integer, the number of frame to be checked + q_list: list of integer, the list of q to be checked + imgsa: the raw data + data_pixel: the normalized data, caculated by fucntion Get_Pixel_Arrayc + Plot the intensities + ''' + fig,ax=plt.subplots(2) + n=0 + for q in q_list: + norm_data = data_pixel[frame_num][qind==q] + raw_data = np.ravel( np.array(imgsa[frame_num]) )[pixelist[qind==q]] + #print(raw_data.mean()) + plot1D( raw_data,ax=ax[0], legend='q=%s'%(q), m=markers[n], + title='fra=%s_raw_data'%(frame_num)) + + #plot1D( raw_data/mean_int_sets_[frame_num][q-1], ax=ax[1], legend='q=%s'%(q), m=markers[n], + # xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) + #print( mean_int_sets_[frame_num][q-1] ) + plot1D( norm_data, ax=ax[1], legend='q=%s'%(q), m=markers[n], + xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) + n +=1 + diff --git a/pyCHX/backups/chx_generic_functions_01252025.py b/pyCHX/backups/chx_generic_functions_01252025.py new file mode 100644 index 0000000..d4c811b --- /dev/null +++ b/pyCHX/backups/chx_generic_functions_01252025.py @@ -0,0 +1,6365 @@ +import copy +from datetime import datetime +from os import listdir +from shutil import copyfile + +import matplotlib.cm as mcm +import numpy as np +import PIL +import pytz +import scipy +from matplotlib import cm +from modest_image import imshow +from scipy.special import erf +from skbeam.core.utils import angle_grid, radial_grid, radius_to_twotheta, twotheta_to_q +from skimage.draw import disk, ellipse, line, line_aa, polygon +from skimage.filters import prewitt + +# from tqdm import * +from pyCHX.chx_libs import * +from pyCHX.chx_libs import colors, markers + +markers = [ + "o", + "D", + "v", + "^", + "<", + ">", + "p", + "s", + "H", + "h", + "*", + "d", + "8", + "1", + "3", + "2", + "4", + "+", + "x", + "_", + "|", + ",", + "1", +] +markers = np.array(markers * 100) + + +flatten_nestlist = lambda l: [item for sublist in l for item in sublist] +""" +a function to flatten a nest list +e.g., flatten( [ ['sg','tt'],'ll' ] ) +gives ['sg', 'tt', 'l', 'l'] +""" + + +def get_frames_from_dscan(uid, detector="eiger4m_single_image"): + """Get frames from a dscan by giving uid and detector""" + hdr = db[uid] + return db.get_images(hdr, detector) + + +def get_roi_intensity(img, roi_mask): + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + avgs = np.zeros(noqs) + for i in tqdm(range(1, 1 + noqs)): + avgs[i - 1] = np.average(img[roi_mask == i]) + return avgs + + +def generate_h5_list(inDir, filename): + """YG DEV at 9/19/2019@CHX generate a lst file containing all h5 fiels in inDir + Input: + inDir: the input direction + filename: the filename for output (have to lst as extension) + Output: + Save the all h5 filenames in a lst file + """ + fp_list = listdir(inDir) + if filename[-4:] != ".lst": + filename += ".lst" + for FP in fp_list: + FP_ = inDir + FP + if os.path.isdir(FP_): + fp = listdir(FP_) + for fp_ in fp: + if ".h5" in fp_: + append_txtfile(filename=filename, data=np.array([FP_ + "/" + fp_])) + print("The full path of all the .h5 in %s has been saved in %s." % (inDir, filename)) + print("You can use ./analysis/run_gui to visualize all the h5 file.") + + +def fit_one_peak_curve(x, y, fit_range=None): + """YG Dev@Aug 10, 2019 fit a curve with a single Lorentzian shape + Parameters: + x: one-d array, x-axis data + y: one-d array, y-axis data + fit_range: [x1, x2], a list of index, to define the x-range for fit + Return: + center: float, center of the peak + center_std: float, error bar of center in the fitting + fwhm: float, full width at half max intensity of the peak, 2*sigma + fwhm_std:float, error bar of the full width at half max intensity of the peak + xf: the x in the fit + out: the fitting class resutled from lmfit + + """ + from lmfit.models import LinearModel, LorentzianModel + + peak = LorentzianModel() + background = LinearModel() + model = peak + background + if fit_range != None: + x1, x2 = fit_range + xf = x[x1:x2] + yf = y[x1:x2] + else: + xf = x + yf = y + model.set_param_hint("slope", value=5) + model.set_param_hint("intercept", value=0) + model.set_param_hint("center", value=0.005) + model.set_param_hint("amplitude", value=0.1) + model.set_param_hint("sigma", value=0.003) + # out=model.fit(yf, x=xf)#, method='nelder') + out = model.fit(yf, x=xf, method="leastsq") + cen = out.params["center"].value + cen_std = out.params["center"].stderr + wid = out.params["sigma"].value * 2 + wid_std = out.params["sigma"].stderr * 2 + return cen, cen_std, wid, wid_std, xf, out + + +def plot_xy_with_fit( + x, + y, + xf, + out, + cen, + cen_std, + wid, + wid_std, + xlim=[1e-3, 0.01], + xlabel="q (" r"$\AA^{-1}$)", + ylabel="I(q)", + filename=None, +): + """YG Dev@Aug 10, 2019 to plot x,y with fit, + currently this code is dedicated to plot q-Iq with fit and show the fittign parameter, peak pos, peak wid""" + + yf2 = out.model.eval(params=out.params, x=xf) + fig, ax = plt.subplots() + plot1D(x=x, y=y, ax=ax, m="o", ls="", c="k", legend="data") + plot1D(x=xf, y=yf2, ax=ax, m="", ls="-", c="r", legend="fit", logy=True) + ax.set_xlim(xlim) + # ax.set_ylim( 0.1, 4) + # ax.set_title(uid+'--t=%.2f'%tt) + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + txts = r"peak" + r" = %.5f +/- %.5f " % (cen, cen_std) + ax.text(x=0.02, y=0.2, s=txts, fontsize=14, transform=ax.transAxes) + txts = r"wid" + r" = %.4f +/- %.4f" % (wid, wid_std) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=0.02, y=0.1, s=txts, fontsize=14, transform=ax.transAxes) + plt.tight_layout() + if filename != None: + plt.savefig(filename) + return ax + + +def get_touched_qwidth(qcenters): + """YG Dev@CHX April 2019, get touched qwidth by giving qcenters""" + qwX = np.zeros_like(qcenters) + qW = qcenters[1:] - qcenters[:-1] + qwX[0] = qW[0] + for i in range(1, len(qcenters) - 1): + # print(i) + qwX[i] = min(qW[i - 1], qW[i]) + qwX[-1] = qW[-1] + qwX *= 0.9999 + return qwX + + +def append_txtfile(filename, data, fmt="%s", *argv, **kwargs): + """YG. Dev May 10, 2109 append data to a file + Create an empty file if the file dose not exist, otherwise, will append the data to it + Input: + fp: filename + data: the data to be append + fmt: the parameter defined in np.savetxt + + """ + from numpy import savetxt + + exists = os.path.isfile(filename) + if not exists: + np.savetxt( + filename, + [], + fmt="%s", + ) + print("create new file") + + f = open(filename, "a") + savetxt(f, data, fmt=fmt, *argv, **kwargs) + f.close() + + +def get_roi_mask_qval_qwid_by_shift( + new_cen, new_mask, old_cen, old_roi_mask, setup_pargs, geometry, limit_qnum=None +): + """YG Dev April 22, 2019 Get roi_mask, qval_dict, qwid_dict by shift the pre-defined big roi_mask""" + center = setup_pargs["center"] + roi_mask1 = shift_mask( + new_cen=center, new_mask=new_mask, old_cen=old_cen, old_roi_mask=old_roi_mask, limit_qnum=limit_qnum + ) + qval_dict_, qwid_dict_ = get_masked_qval_qwid_dict_using_Rmax( + new_mask=new_mask, setup_pargs=setup_pargs, old_roi_mask=old_roi_mask, old_cen=old_cen, geometry=geometry + ) + w, w1 = get_zero_nozero_qind_from_roi_mask(roi_mask1, new_mask) + # print(w,w1) + qval_dictx = {k: v for (k, v) in list(qval_dict_.items()) if k in w1} + qwid_dictx = {k: v for (k, v) in list(qwid_dict_.items()) if k in w1} + qval_dict = {} + qwid_dict = {} + for i, k in enumerate(list(qval_dictx.keys())): + qval_dict[i] = qval_dictx[k] + qwid_dict[i] = qwid_dictx[k] + return roi_mask1, qval_dict, qwid_dict + + +def get_zero_nozero_qind_from_roi_mask(roi_mask, mask): + """YG Dev April 22, 2019 Get unique qind of roi_mask with zero and non-zero pixel number""" + qind, pixelist = roi.extract_label_indices(roi_mask * mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + w = np.where(nopr == 0)[0] + w1 = np.where(nopr != 0)[0] + return w, w1 + + +def get_masked_qval_qwid_dict_using_Rmax(new_mask, setup_pargs, old_roi_mask, old_cen, geometry): + """YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask using a Rmax method""" + cy, cx = setup_pargs["center"] + my, mx = new_mask.shape + Rmax = int( + np.ceil(max(np.hypot(cx, cy), np.hypot(cx - mx, cy - my), np.hypot(cx, cy - my), np.hypot(cx - mx, cy))) + ) + Fmask = np.zeros([Rmax * 2, Rmax * 2], dtype=int) + Fmask[Rmax - cy : Rmax - cy + my, Rmax - cx : Rmax - cx + mx] = new_mask + roi_mask1 = shift_mask( + new_cen=[Rmax, Rmax], + new_mask=np.ones_like(Fmask), + old_cen=old_cen, + old_roi_mask=old_roi_mask, + limit_qnum=None, + ) + setup_pargs_ = { + "center": [Rmax, Rmax], + "dpix": setup_pargs["dpix"], + "Ldet": setup_pargs["Ldet"], + "lambda_": setup_pargs["lambda_"], + } + qval_dict1, qwid_dict1 = get_masked_qval_qwid_dict(roi_mask1, Fmask, setup_pargs_, geometry) + # w = get_zero_qind_from_roi_mask(roi_mask1,Fmask) + return qval_dict1, qwid_dict1 # ,w + + +def get_masked_qval_qwid_dict(roi_mask, mask, setup_pargs, geometry): + """YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask""" + + qval_dict_, qwid_dict_ = get_qval_qwid_dict(roi_mask, setup_pargs, geometry=geometry) + w, w1 = get_zero_nozero_qind_from_roi_mask(roi_mask, mask) + qval_dictx = {k: v for (k, v) in list(qval_dict_.items()) if k not in w} + qwid_dictx = {k: v for (k, v) in list(qwid_dict_.items()) if k not in w} + qval_dict = {} + qwid_dict = {} + for i, k in enumerate(list(qval_dictx.keys())): + qval_dict[i] = qval_dictx[k] + qwid_dict[i] = qwid_dictx[k] + return qval_dict, qwid_dict + + +def get_qval_qwid_dict(roi_mask, setup_pargs, geometry="saxs"): + """YG Dev April 6, 2019 + Get qval_dict and qwid_dict by giving roi_mask, setup_pargs + Input: + roi_mask: integer type 2D array + setup_pargs: dict, should at least contains, center (direct beam center), dpix (in mm), + lamda_: in A-1, Ldet: in mm + e.g., + {'Ldet': 1495.0, abs #essential + 'center': [-4469, 363], #essential + 'dpix': 0.075000003562308848, #essential + 'exposuretime': 0.99999702, + 'lambda_': 0.9686265, #essential + 'path': '/XF11ID/analysis/2018_1/jianheng/Results/b85dad/', + 'timeperframe': 1.0, + 'uid': 'uid=b85dad'} + geometry: support saxs for isotropic transmission SAXS + ang_saxs for anisotropic transmission SAXS + flow_saxs for anisotropic transmission SAXS under flow (center symetric) + + Return: + qval_dict: dict, key as q-number, val: q val + qwid_dict: dict, key as q-number, val: q width (qmax - qmin) + + TODOLIST: to make GiSAXS work + + """ + + origin = setup_pargs["center"] # [::-1] + shape = roi_mask.shape + qp_map = radial_grid(origin, shape) + phi_map = np.degrees(angle_grid(origin, shape)) + two_theta = radius_to_twotheta(setup_pargs["Ldet"], setup_pargs["dpix"] * qp_map) + q_map = utils.twotheta_to_q(two_theta, setup_pargs["lambda_"]) + qind, pixelist = roi.extract_label_indices(roi_mask) + Qval = np.unique(qind) + qval_dict_ = {} + qwid_dict_ = {} + for j, i in enumerate(Qval): + qval = q_map[roi_mask == i] + # print( qval ) + if geometry == "saxs": + qval_dict_[j] = [(qval.max() + qval.min()) / 2] # np.mean(qval) + qwid_dict_[j] = [(qval.max() - qval.min())] + + elif geometry == "ang_saxs": + aval = phi_map[roi_mask == i] + # print(j,i,qval, aval) + qval_dict_[j] = np.zeros(2) + qwid_dict_[j] = np.zeros(2) + + qval_dict_[j][0] = (qval.max() + qval.min()) / 2 # np.mean(qval) + qwid_dict_[j][0] = qval.max() - qval.min() + + if ((aval.max() * aval.min()) < 0) & (aval.max() > 90): + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 - 180 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min() - 360) + # print('here -- %s'%j) + else: + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min()) + + elif geometry == "flow_saxs": + sx, sy = roi_mask.shape + cx, cy = origin + aval = (phi_map[cx:])[roi_mask[cx:] == i] + if len(aval) == 0: + aval = (phi_map[:cx])[roi_mask[:cx] == i] + 180 + + qval_dict_[j] = np.zeros(2) + qwid_dict_[j] = np.zeros(2) + qval_dict_[j][0] = (qval.max() + qval.min()) / 2 # np.mean(qval) + qwid_dict_[j][0] = qval.max() - qval.min() + # print(aval) + if ((aval.max() * aval.min()) < 0) & (aval.max() > 90): + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 - 180 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min() - 360) + # print('here -- %s'%j) + else: + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min()) + + return qval_dict_, qwid_dict_ + + +def get_SG_norm(FD, pixelist, bins=1, mask=None, window_size=11, order=5): + """Get normalization of a time series by SavitzkyGolay filter + Input: + FD: file handler for a compressed data + pixelist: pixel list for a roi_mask + bins: the bin number for the time series, if number = total number of the time frame, + it means SG of the time averaged image + mask: the additional mask + window_size, order, for the control of SG filter, see chx_generic_functions.py/sgolay2d for details + Return: + norm: shape as ( length of FD, length of pixelist ) + """ + if mask == None: + mask = 1 + beg = FD.beg + end = FD.end + N = end - beg + BEG = beg + if bins == 1: + END = end + NB = N + MOD = 0 + else: + END = N // bins + MOD = N % bins + NB = END + norm = np.zeros([end, len(pixelist)]) + for i in tqdm(range(NB)): + if bins == 1: + img = FD.rdframe(i + BEG) + else: + for j in range(bins): + ct = i * bins + j + BEG + # print(ct) + if j == 0: + img = FD.rdframe(ct) + n = 1.0 + else: + (p, v) = FD.rdrawframe(ct) + np.ravel(img)[p] += v + # img += FD.rdframe( ct ) + n += 1 + img /= n + avg_imgf = sgolay2d(img, window_size=window_size, order=order) * mask + normi = np.ravel(avg_imgf)[pixelist] + if bins == 1: + norm[i + beg] = normi + else: + norm[i * bins + beg : (i + 1) * bins + beg] = normi + if MOD: + for j in range(MOD): + ct = (1 + i) * bins + j + BEG + if j == 0: + img = FD.rdframe(ct) + n = 1.0 + else: + (p, v) = FD.rdrawframe(ct) + np.ravel(img)[p] += v + n += 1 + img /= n + # print(ct,n) + img = FD.rdframe(ct) + avg_imgf = sgolay2d(img, window_size=window_size, order=order) * mask + normi = np.ravel(avg_imgf)[pixelist] + norm[(i + 1) * bins + beg : (i + 2) * bins + beg] = normi + return norm + + +def shift_mask(new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None): + """Y.G. Dev April 2019@CHX to make a new roi_mask by shift and crop the old roi_mask, which is much bigger than the new mask + Input: + new_cen: [x,y] in uint of pixel + new_mask: provide the shape of the new roi_mask and also multiply this mask to the shifted mask + old_cen: [x,y] in uint of pixel + old_roi_mask: the roi_mask to be shifted + limit_qnum: integer, if not None, defines the max number of unique values of nroi_mask + + Output: + the shifted/croped roi_mask + """ + nsx, nsy = new_mask.shape + down, up, left, right = new_cen[0], nsx - new_cen[0], new_cen[1], nsy - new_cen[1] + x1, x2, y1, y2 = [old_cen[0] - down, old_cen[0] + up, old_cen[1] - left, old_cen[1] + right] + nroi_mask_ = old_roi_mask[x1:x2, y1:y2] * new_mask + nroi_mask = np.zeros_like(nroi_mask_) + qind, pixelist = roi.extract_label_indices(nroi_mask_) + qu = np.unique(qind) + # noqs = len( qu ) + # nopr = np.bincount(qind, minlength=(noqs+1))[1:] + # qm = nopr>0 + for j, qv in enumerate(qu): + nroi_mask[nroi_mask_ == qv] = j + 1 + if limit_qnum != None: + nroi_mask[nroi_mask > limit_qnum] = 0 + return nroi_mask + + +def plot_q_g2fitpara_general( + g2_dict, + g2_fitpara, + geometry="saxs", + ylim=None, + plot_all_range=True, + plot_index_range=None, + show_text=True, + return_fig=False, + show_fit=True, + ylabel="g2", + qth_interest=None, + max_plotnum_fig=1600, + qphi_analysis=False, + *argv, + **kwargs, +): + """ + Mar 29,2019, Y.G.@CHX + + plot q~fit parameters + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + """ + + if "uid" in kwargs.keys(): + uid_ = kwargs["uid"] + else: + uid_ = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + data_dir = path + if ylabel == "g2": + ylabel = "g_2" + if ylabel == "g4": + ylabel = "g_4" + + if geometry == "saxs": + if qphi_analysis: + geometry = "ang_saxs" + + qval_dict_, fit_res_ = g2_dict, g2_fitpara + + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + fps = [] + + # print(qr_label, qz_label, short_ulabel, long_ulabel) + # $print( num_short, num_long ) + beta, relaxation_rate, baseline, alpha = ( + g2_fitpara["beta"], + g2_fitpara["relaxation_rate"], + g2_fitpara["baseline"], + g2_fitpara["alpha"], + ) + + fps = [] + for s_ind in range(num_short): + ind_long_i = ind_long[s_ind] + num_long_i = len(ind_long_i) + betai, relaxation_ratei, baselinei, alphai = ( + beta[ind_long_i], + relaxation_rate[ind_long_i], + baseline[ind_long_i], + alpha[ind_long_i], + ) + qi = long_ulabel + # print(s_ind, qi, np.array( betai) ) + + if RUN_GUI: + fig = Figure(figsize=(10, 12)) + else: + # fig = plt.figure( ) + if num_long_i <= 4: + if master_plot != "qz": + fig = plt.figure(figsize=(8, 6)) + else: + if num_short > 1: + fig = plt.figure(figsize=(8, 4)) + else: + fig = plt.figure(figsize=(10, 6)) + # print('Here') + elif num_long_i > max_plotnum_fig: + num_fig = int(np.ceil(num_long_i / max_plotnum_fig)) # num_long_i //16 + fig = [plt.figure(figsize=figsize) for i in range(num_fig)] + # print( figsize ) + else: + # print('Here') + if master_plot != "qz": + fig = plt.figure(figsize=figsize) + else: + fig = plt.figure(figsize=(10, 10)) + + if master_plot == "qz": + if geometry == "ang_saxs": + title_short = "Angle= %.2f" % (short_ulabel[s_ind]) + r"$^\circ$" + elif geometry == "gi_saxs": + title_short = r"$Q_z= $" + "%.4f" % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + else: + title_short = "" + else: # qr + if geometry == "ang_saxs" or geometry == "gi_saxs": + title_short = r"$Q_r= $" + "%.5f " % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + else: + title_short = "" + # print(geometry) + # filename ='' + til = "%s:--->%s" % (uid_, title_short) + if num_long_i <= 4: + plt.title(til, fontsize=14, y=1.15) + else: + plt.title(til, fontsize=20, y=1.06) + # print( num_long ) + if num_long != 1: + # print( 'here') + plt.axis("off") + # sy = min(num_long_i,4) + sy = min(num_long_i, int(np.ceil(min(max_plotnum_fig, num_long_i) / 4))) + + else: + sy = 1 + sx = min(4, int(np.ceil(min(max_plotnum_fig, num_long_i) / float(sy)))) + temp = sy + sy = sx + sx = temp + if sx == 1: + if sy == 1: + plt.axis("on") + ax1 = fig.add_subplot(4, 1, 1) + ax2 = fig.add_subplot(4, 1, 2) + ax3 = fig.add_subplot(4, 1, 3) + ax4 = fig.add_subplot(4, 1, 4) + plot1D(x=qi, y=betai, m="o", ls="--", c="k", ax=ax1, legend=r"$\beta$", title="") + plot1D(x=qi, y=alphai, m="o", ls="--", c="r", ax=ax2, legend=r"$\alpha$", title="") + plot1D(x=qi, y=baselinei, m="o", ls="--", c="g", ax=ax3, legend=r"$baseline$", title="") + plot1D(x=qi, y=relaxation_ratei, m="o", c="b", ls="--", ax=ax4, legend=r"$\gamma$ $(s^{-1})$", title="") + + ax4.set_ylabel(r"$\gamma$ $(s^{-1})$") + ax4.set_xlabel(r"$q $ $(\AA)$", fontsize=16) + ax3.set_ylabel(r"$baseline") + ax2.set_ylabel(r"$\alpha$") + ax1.set_ylabel(r"$\beta$") + fig.tight_layout() + fp = data_dir + uid_ + "g2_q_fit_para_%s.png" % short_ulabel[s_ind] + fig.savefig(fp, dpi=fig.dpi) + fps.append(fp) + outputfile = data_dir + "%s_g2_q_fitpara_plot" % uid_ + ".png" + # print(uid) + combine_images(fps, outputfile, outsize=[2000, 2400]) + + +def plot_q_rate_general( + qval_dict, + rate, + geometry="saxs", + ylim=None, + logq=True, + lograte=True, + plot_all_range=True, + plot_index_range=None, + show_text=True, + return_fig=False, + show_fit=True, + *argv, + **kwargs, +): + """ + Mar 29,2019, Y.G.@CHX + + plot q~rate in log-log scale + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + """ + + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + fig, ax = plt.subplots() + plt.title(r"$Q$" "-Rate-%s" % (uid), fontsize=20, y=1.06) + Nqz = num_short + if Nqz != 1: + ls = "--" + else: + ls = "" + # print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[i] + y = np.array(rate)[ind_long_i] + x = long_label[ind_long_i] + # print(i, x, y, D0 ) + if Nqz != 1: + label = r"$q_z=%.5f$" % short_ulabel[i] + else: + label = "" + ax.loglog(x, y, marker="o", ls=ls, label=label) + if Nqz != 1: + legend = ax.legend(loc="best") + + if plot_index_range != None: + d1, d2 = plot_index_range + d2 = min(len(x) - 1, d2) + ax.set_xlim((x**power)[d1], (x**power)[d2]) + ax.set_ylim(y[d1], y[d2]) + + if ylim != None: + ax.set_ylim(ylim) + + ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$) (log)") + ax.set_xlabel("$q$" r"($\AA$) (log)") + fp = path + "%s_Q_Rate_loglog" % (uid) + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + if return_fig: + return fig, ax + + +def plot_xy_x2( + x, + y, + x2=None, + pargs=None, + loglog=False, + logy=True, + fig_ax=None, + xlabel="q (" r"$\AA^{-1}$)", + xlabel2="q (pixel)", + title="_q_Iq", + ylabel="I(q)", + save=True, + *argv, + **kwargs, +): + """YG.@CHX 2019/10/ Plot x, y, x2, if have, will plot as twiny( same y, different x) + This funciton is primary for plot q-Iq + + Input: + x: one-d array, x in one unit + y: one-d array, + x2:one-d array, x in anoter unit + pargs: dict, could include 'uid', 'path' + loglog: if True, if plot x and y in log, by default plot in y-log + save: if True, save the plot in the path defined in pargs + kwargs: could include xlim (in unit of index), ylim (in unit of real value) + + """ + if fig_ax == None: + fig, ax1 = plt.subplots() + else: + fig, ax1 = fig_ax + if pargs != None: + uid = pargs["uid"] + path = pargs["path"] + else: + uid = "XXX" + path = "" + if loglog: + ax1.loglog(x, y, "-o") + elif logy: + ax1.semilogy(x, y, "-o") + else: + ax1.plot(x, y, "-o") + ax1.set_xlabel(xlabel) + ax1.set_ylabel(ylabel) + title = ax1.set_title("%s--" % uid + title) + Nx = len(x) + if "xlim" in kwargs.keys(): + xlim = kwargs["xlim"] + if xlim[1] > Nx: + xlim[1] = Nx - 1 + else: + xlim = [0, Nx] + if "ylim" in kwargs.keys(): + ylim = kwargs["ylim"] + else: + ylim = [y.min(), y.max()] + lx1, lx2 = xlim + ax1.set_xlim([x[lx1], x[lx2]]) + ax1.set_ylim(ylim) + if x2 != None: + ax2 = ax1.twiny() + ax2.set_xlabel(xlabel2) + ax2.set_ylabel(ylabel) + ax2.set_xlim([x2[lx1], x2[lx2]]) + title.set_y(1.1) + fig.subplots_adjust(top=0.85) + if save: + path = pargs["path"] + fp = path + "%s_q_Iq" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + +def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1, threshold=0): + """save oavs as png""" + tifs = list(db[uid].data("OAV_image"))[0] + try: + pixel_scalebar = np.ceil(scalebar_size / md["OAV resolution um_pixel"]) + except: + pixel_scalebar = None + print("No OAVS resolution is available.") + + text_string = "%s $\mu$m" % scalebar_size + h = db[uid] + oavs = tifs + + # 12/03/2023: have a problem with OAV not being detector [0]...just try and go throught the list + detectors = sorted(get_detectors(h)) + for d in range(len(detectors)): + try: + oav_period = h["descriptors"][d]["configuration"]["OAV"]["data"]["OAV_cam_acquire_period"] + oav_expt = h["descriptors"][d]["configuration"]["OAV"]["data"]["OAV_cam_acquire_time"] + except: + pass + oav_times = [] + for i in range(len(oavs)): + oav_times.append(oav_expt + i * oav_period) + fig = plt.subplots(int(np.ceil(len(oavs) / 3)), 3, figsize=(3 * 5.08, int(np.ceil(len(oavs) / 3)) * 4)) + for m in range(len(oavs)): + plt.subplot(int(np.ceil(len(oavs) / 3)), 3, m + 1) + # plt.subplots(figsize=(5.2,4)) + img = oavs[m] + try: + ind = np.flipud(img * scale)[:, :, 2] < threshold + except: + ind = np.flipud(img * scale) < threshold + rgb_cont_img = np.copy(np.flipud(img)) + # rgb_cont_img[ind,0]=1000 + if brightness_scale != 1: + rgb_cont_img = scale_rgb(rgb_cont_img, scale=brightness_scale) + + plt.imshow(rgb_cont_img, interpolation="none", resample=True, cmap="gray") + plt.axis("equal") + cross = [685, 440, 50] # definintion of direct beam: x, y, size + plt.plot([cross[0] - cross[2] / 2, cross[0] + cross[2] / 2], [cross[1], cross[1]], "r-") + plt.plot([cross[0], cross[0]], [cross[1] - cross[2] / 2, cross[1] + cross[2] / 2], "r-") + if pixel_scalebar != None: + plt.plot([1100, 1100 + pixel_scalebar], [150, 150], "r-", Linewidth=5) # scale bar. + plt.text(1000, 50, text_string, fontsize=14, color="r") + plt.text(600, 50, str(oav_times[m])[:5] + " [s]", fontsize=14, color="r") + plt.axis("off") + plt.savefig(data_dir + "uid=%s_OVA_images.png" % uid) + + +def shift_mask_old(mask, shiftx, shifty): + """YG Dev Feb 4@CHX create new mask by shift mask in x and y direction with unit in pixel + Input: + mask: int-type array, + shiftx: int scalar, shift value in x direction with unit in pixel + shifty: int scalar, shift value in y direction with unit in pixel + Output: + maskn: int-type array, shifted mask + + """ + qind, pixelist = roi.extract_label_indices(mask) + dims = mask.shape + imgwidthy = dims[1] # dimension in y, but in plot being x + imgwidthx = dims[0] # dimension in x, but in plot being y + pixely = pixelist % imgwidthy + pixelx = pixelist // imgwidthy + pixelyn = pixely + shiftx + pixelxn = pixelx + shifty + w = (pixelyn < imgwidthy) & (pixelyn >= 0) & (pixelxn < imgwidthx) & (pixelxn >= 0) + pixelist_new = pixelxn[w] * imgwidthy + pixelyn[w] + maskn = np.zeros_like(mask) + maskn.ravel()[pixelist_new] = qind[w] + return maskn + + +def get_current_time(): + """get current time in a fomart of year/month/date/hour(24)/min/sec/, + e.g. 2009-01-05 22:14:39 + """ + loc_dt = datetime.now(pytz.timezone("US/Eastern")) + fmt = "%Y-%m-%d %H:%M:%S" + return loc_dt.strftime(fmt) + + +def evalue_array(array, verbose=True): + """Y.G., Dev Nov 1, 2018 Get min, max, avg, std of an array""" + _min, _max, avg, std = np.min(array), np.max(array), np.average(array), np.std(array) + if verbose: + print( + "The min, max, avg, std of this array are: %s %s %s %s, respectively." % (_min, _max, avg, std) + ) + return _min, _max, avg, std + + +def find_good_xpcs_uids(fuids, Nlim=100, det=["4m", "1m", "500"]): + """Y.G., Dev Nov 1, 2018 Find the good xpcs series + Input: + fuids: list, a list of full uids + Nlim: integer, the smallest number of images to be considered as XCPS sereis + det: list, a list of detector (can be short string of the full name of the detector) + Return: + the xpcs uids list + + """ + guids = [] + for i, uid in enumerate(fuids): + if db[uid]["start"]["plan_name"] == "count" or db[uid]["start"]["plan_name"] == "manual_count": + head = db[uid]["start"] + for dec in head["detectors"]: + for dt in det: + if dt in dec: + if "number of images" in head: + if float(head["number of images"]) >= Nlim: + # print(i, uid) + guids.append(uid) + G = np.unique(guids) + print("Found %s uids for XPCS series." % len(G)) + return G + + +def create_fullImg_with_box( + shape, + box_nx=9, + box_ny=8, +): + """Y.G. 2018/10/26 Divide image with multi touched boxes + Input + shape: the shape of image + box_nx: the number of box in x + box_ny: the number width of box in y + Return: + roi_mask, (* mask ) + """ + + # shape = mask.shape + Wrow, Wcol = int(np.ceil(shape[0] / box_nx)), int(np.ceil(shape[1] / box_ny)) + # print(Wrow, Wcol) + roi_mask = np.zeros(shape, dtype=np.int32) + for i in range(box_nx): + for j in range(box_ny): + roi_mask[i * Wrow : (i + 1) * Wrow, j * Wcol : (j + 1) * Wcol] = i * box_ny + j + 1 + # roi_mask *= mask + return roi_mask + + +def get_refl_y0( + inc_ang, + inc_y0, + Ldet, + pixel_size, +): + """Get reflection beam center y + Input: + inc_ang: incident angle in degree + inc_y0: incident beam y center in pixel + Ldet: sample to detector distance in meter + pixel_size: pixel size in meter + Return: reflection beam center y in pixel + """ + return Ldet * np.tan(np.radians(inc_ang)) * 2 / pixel_size + inc_y0 + + +def lin2log_g2(lin_tau, lin_g2, num_points=False): + """ + Lutz developed at Aug,2018 + function to resample g2 with linear time steps into logarithmics + g2 values between consecutive logarthmic time steps are averaged to increase statistics + calling sequence: lin2log_g2(lin_tau,lin_g2,num_points=False) + num_points=False -> determine number of logortihmically sampled time points automatically (8 pts./decade) + num_points=18 -> use 18 logarithmically spaced time points + """ + # prep taus and g2s: remove nan and first data point at tau=0 + rem = lin_tau == 0 + # print('lin_tau: '+str(lin_tau.size)) + # print('lin_g2: '+str(lin_g2.size)) + lin_tau[rem] = np.nan + # lin_tau[0]=np.nan;#lin_g2[0]=np.nan + lin_g2 = lin_g2[np.isfinite(lin_tau)] + lin_tau = lin_tau[np.isfinite(lin_tau)] + # print('from lin-to-log-g2_sampling: ',lin_tau) + if num_points == False: + # automatically decide how many log-points (8/decade) + dec = int(np.ceil((np.log10(lin_tau.max()) - np.log10(lin_tau.min())) * 8)) + else: + dec = int(num_points) + log_tau = np.logspace(np.log10(lin_tau[0]), np.log10(lin_tau.max()), dec) + # re-sample correlation function: + log_g2 = [] + for i in range(log_tau.size - 1): + y = [i, log_tau[i] - (log_tau[i + 1] - log_tau[i]) / 2, log_tau[i] + (log_tau[i + 1] - log_tau[i]) / 2] + # x=lin_tau[lin_tau>y[1]] + x1 = lin_tau > y[1] + x2 = lin_tau < y[2] + x = x1 * x2 + # print(np.average(lin_g2[x])) + if np.isfinite(np.average(lin_g2[x])): + log_g2.append(np.average(lin_g2[x])) + else: + log_g2.append(np.interp(log_tau[i], lin_tau, lin_g2)) + if i == log_tau.size - 2: + # print(log_tau[i+1]) + y = [i + 1, log_tau[i + 1] - (log_tau[i + 1] - log_tau[i]) / 2, log_tau[i + 1]] + x1 = lin_tau > y[1] + x2 = lin_tau < y[2] + x = x1 * x2 + log_g2.append(np.average(lin_g2[x])) + return [log_tau, log_g2] + + +def get_eigerImage_per_file(data_fullpath): + f = h5py.File(data_fullpath) + dset_keys = list(f["/entry/data"].keys()) + dset_keys.sort() + dset_root = "/entry/data" + dset_keys = [dset_root + "/" + dset_key for dset_key in dset_keys] + dset = f[dset_keys[0]] + return len(dset) + + +def copy_data(old_path, new_path="/tmp_data/data/"): + """YG Dev July@CHX + Copy Eiger file containing master and data files to a new path + old_path: the full path of the Eiger master file + new_path: the new path + + """ + import glob + import shutil + + # old_path = sud[2][0] + # new_path = '/tmp_data/data/' + fps = glob.glob(old_path[:-10] + "*") + for fp in tqdm(fps): + if not os.path.exists(new_path + os.path.basename(fp)): + shutil.copy(fp, new_path) + print("The files %s are copied: %s." % (old_path[:-10] + "*", new_path + os.path.basename(fp))) + + +def delete_data(old_path, new_path="/tmp_data/data/"): + """YG Dev July@CHX + Delete copied Eiger file containing master and data in a new path + old_path: the full path of the Eiger master file + new_path: the new path + """ + import glob + import shutil + + # old_path = sud[2][0] + # new_path = '/tmp_data/data/' + fps = glob.glob(old_path[:-10] + "*") + for fp in tqdm(fps): + nfp = new_path + os.path.basename(fp) + if os.path.exists(nfp): + os.remove(nfp) + + +def show_tif_series( + tif_series, Nx=None, center=None, w=50, vmin=None, vmax=None, cmap=cmap_vge_hdr, logs=False, figsize=[10, 16] +): + """ + tif_series: list of 2D tiff images + Nx: the number in the row for dispalying + center: the center of iamge (or direct beam pixel) + w: the ROI half size in pixel + vmin: the min intensity value for plot + vmax: if None, will be max intensity value of the ROI + figsize: size of the plot (in inch) + + """ + + if center != None: + cy, cx = center + # infs = sorted(sample_list) + N = len(tif_series) + if Nx == None: + sy = int(np.sqrt(N)) + else: + sy = Nx + sx = int(np.ceil(N / sy)) + fig = plt.figure(figsize=figsize) + for i in range(N): + # print(i) + ax = fig.add_subplot(sx, sy, i + 1) + # d = (np.array( PIL.Image.open( infs[i] ).convert('I') ))[ cy-w:cy+w, cx-w:cx+w ] + d = tif_series[i][::-1] + # vmax= np.max(d) + # pritn(vmax) + # vmin= 10#np.min(d) + show_img( + d, + logs=logs, + show_colorbar=False, + show_ticks=False, + ax=[fig, ax], + image_name="%02d" % (i + 1), + cmap=cmap, + vmin=vmin, + vmax=vmax, + aspect=1, + save=False, + path=None, + ) + return fig, ax + + +from scipy.special import erf + + +def ps(y, shift=0.5, replot=True, logplot="off", x=None): + """ + Dev 16, 2018 + Modified ps() function in 95-utilities.py + function to determine statistic on line profile (assumes either peak or erf-profile) + Input: + y: 1D array, the data for analysis + shift: scale for peak presence (0.5 -> peak has to be taller factor 2 above background) + replot: if True, will plot data (if error func) with the fit and peak/cen/com position + logplot: if on, will plot in log scale + x: if not None, give x-data + + + """ + if x == None: + x = np.arange(len(y)) + x = np.array(x) + y = np.array(y) + + PEAK = x[np.argmax(y)] + PEAK_y = np.max(y) + COM = np.sum(x * y) / np.sum(y) + + ### from Maksim: assume this is a peak profile: + def is_positive(num): + return True if num > 0 else False + + # Normalize values first: + ym = (y - np.min(y)) / (np.max(y) - np.min(y)) - shift # roots are at Y=0 + positive = is_positive(ym[0]) + list_of_roots = [] + for i in range(len(y)): + current_positive = is_positive(ym[i]) + if current_positive != positive: + list_of_roots.append(x[i - 1] + (x[i] - x[i - 1]) / (abs(ym[i]) + abs(ym[i - 1])) * abs(ym[i - 1])) + positive = not positive + if len(list_of_roots) >= 2: + FWHM = abs(list_of_roots[-1] - list_of_roots[0]) + CEN = list_of_roots[0] + 0.5 * (list_of_roots[1] - list_of_roots[0]) + ps.fwhm = FWHM + ps.cen = CEN + yf = ym + # return { + # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), + # 'x_range': list_of_roots, + # } + else: # ok, maybe it's a step function.. + # print('no peak...trying step function...') + ym = ym + shift + + def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang + return base - A * erf(k * (x - x0)) + + mod = Model(err_func) + ### estimate starting values: + x0 = np.mean(x) + # k=0.1*(np.max(x)-np.min(x)) + pars = mod.make_params(x0=x0, k=2, A=1.0, base=0.0) + result = mod.fit(ym, pars, x=x) + CEN = result.best_values["x0"] + FWHM = result.best_values["k"] + A = result.best_values["A"] + b = result.best_values["base"] + yf_ = err_func(x, CEN, k=FWHM, A=A, base=b) # result.best_fit + yf = (yf_) * (np.max(y) - np.min(y)) + np.min(y) + + # (y - np.min(y)) / (np.max(y) - np.min(y)) - shift + + ps.cen = CEN + ps.fwhm = FWHM + + if replot: + ### re-plot results: + if logplot == "on": + fig, ax = plt.subplots() # plt.figure() + ax.semilogy([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") + ax.hold(True) + ax.semilogy([CEN, CEN], [np.min(y), np.max(y)], "r-.", label="CEN") + ax.semilogy([COM, COM], [np.min(y), np.max(y)], "g.-.", label="COM") + ax.semilogy(x, y, "bo-") + # plt.xlabel(field);plt.ylabel(intensity_field) + ax.legend() + # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + # plt.show() + else: + # plt.close(999) + fig, ax = plt.subplots() # plt.figure() + ax.plot([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") + + # ax.hold(True) + ax.plot([CEN, CEN], [np.min(y), np.max(y)], "m-.", label="CEN") + ax.plot([COM, COM], [np.min(y), np.max(y)], "g.-.", label="COM") + ax.plot(x, y, "bo--") + ax.plot(x, yf, "r-", label="Fit") + + # plt.xlabel(field);plt.ylabel(intensity_field) + ax.legend() + # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + # plt.show() + + ### assign values of interest as function attributes: + ps.peak = PEAK + ps.com = COM + return ps.cen + + +def create_seg_ring(ring_edges, ang_edges, mask, setup_pargs): + """YG Dev April 6, 2018 + Create segment ring mask + Input: + ring_edges: edges of rings (in pixel), e.g., [ [320,340], [450, 460], ] + ang_edges: edges of angles, e.g., [ [20,40], [50, 60], ] + mask: bool type 2D array + set_pargs: dict, should at least contains, center + e.g., + {'Ldet': 1495.0, abs #essential + 'center': [-4469, 363], #essential + 'dpix': 0.075000003562308848, #essential + 'exposuretime': 0.99999702, + 'lambda_': 0.9686265, #essential + 'path': '/XF11ID/analysis/2018_1/jianheng/Results/b85dad/', + 'timeperframe': 1.0, + 'uid': 'uid=b85dad'} + Return: + roi_mask: segmented ring mask: two-D array + qval_dict: dict, key as q-number, val: q val + + """ + + roi_mask_qr, qr, qr_edge = get_ring_mask( + mask, + inner_radius=None, + outer_radius=None, + width=None, + num_rings=None, + edges=np.array(ring_edges), + unit="pixel", + pargs=setup_pargs, + ) + + roi_mask_ang, ang_center, ang_edge = get_angular_mask( + mask, + inner_angle=None, + outer_angle=None, + width=None, + edges=np.array(ang_edges), + num_angles=None, + center=center, + flow_geometry=False, + ) + + roi_mask, good_ind = combine_two_roi_mask(roi_mask_qr, roi_mask_ang, pixel_num_thres=100) + qval_dict_ = get_qval_dict(qr_center=qr, qz_center=ang_center, one_qz_multi_qr=False) + qval_dict = {i: qval_dict_[k] for (i, k) in enumerate(good_ind)} + return roi_mask, qval_dict + + +def find_bad_pixels_FD(bad_frame_list, FD, img_shape=[514, 1030], threshold=15, show_progress=True): + """Designed to find bad pixel list in 500K + threshold: the max intensity in 5K + """ + bad = np.zeros(img_shape, dtype=bool) + if show_progress: + for i in tqdm(bad_frame_list[bad_frame_list >= FD.beg]): + p, v = FD.rdrawframe(i) + w = np.where(v > threshold)[0] + bad.ravel()[p[w]] = 1 + # x,y = np.where( imgsa[i] > threshold) + # bad[x[0],y[0]] = 1 + else: + for i in bad_frame_list[bad_frame_list >= FD.beg]: + p, v = FD.rdrawframe(i) + w = np.where(v > threshold)[0] + bad.ravel()[p[w]] = 1 + + return ~bad + + +def get_q_iq_using_dynamic_mask(FD, mask, setup_pargs, bin_number=1, threshold=15): + """DEV by Yugang@CHX, June 6, 2019 + Get circular average of a time series using a dynamics mask, which pixel values are defined as + zeors if above a threshold. + Return an averaged q(pix)-Iq-q(A-1) of the whole time series using bin frames with bin_number + Input: + FD: the multifile handler for the time series + mask: a two-d bool type array + setup_pargs: dict, parameters of setup for calculate q-Iq + should have keys as + 'dpix', 'Ldet','lambda_', 'center' + bin_number: bin number of the frame + threshold: define the dynamics mask, which pixel values are defined as + zeors if above this threshold + Output: + qp_saxs: q in pixel + iq_saxs: intenstity + q_saxs: q in A-1 + """ + beg = FD.beg + end = FD.end + shape = FD.rdframe(beg).shape + Nimg_ = FD.end - FD.beg + # Nimg_ = 100 + Nimg = Nimg_ // bin_number + time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bin_number)) + beg + for n in tqdm(range(Nimg)): + t1, t2 = time_edge[n] + # print(t1,t2) + if bin_number == 1: + avg_imgi = FD.rdframe(t1) + else: + avg_imgi = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False, show_progress=False) + badpi = find_bad_pixels_FD( + np.arange(t1, t2), FD, img_shape=avg_imgi.shape, threshold=threshold, show_progress=False + ) + img = avg_imgi * mask * badpi + qp_saxsi, iq_saxsi, q_saxsi = get_circular_average(img, mask * badpi, save=False, pargs=setup_pargs) + # print( img.max()) + if t1 == FD.beg: + qp_saxs, iq_saxs, q_saxs = np.zeros_like(qp_saxsi), np.zeros_like(iq_saxsi), np.zeros_like(q_saxsi) + qp_saxs += qp_saxsi + iq_saxs += iq_saxsi + q_saxs += q_saxsi + qp_saxs /= Nimg + iq_saxs /= Nimg + q_saxs /= Nimg + + return qp_saxs, iq_saxs, q_saxs + + +def get_waxs_beam_center(gamma, origin=[432, 363], Ldet=1495, pixel_size=75 * 1e-3): + """YG Feb 10, 2018 + Calculate beam center for WAXS geometry by giving beam center at gamma=0 and the target gamma + Input: + gamma: angle in degree + Ldet: sample to detector distance, 1495 mm for CHX WAXS + origin: beam center for gamma = 0, (python x,y coordinate in pixel) + pxiel size: 75 * 1e-3 mm for Eiger 1M + output: + beam center: for the target gamma, in pixel + """ + return [int(origin[0] + np.tan(np.radians(gamma)) * Ldet / pixel_size), origin[1]] + + +def get_img_from_iq(qp, iq, img_shape, center): + """YG Jan 24, 2018 + Get image from circular average + Input: + qp: q in pixel unit + iq: circular average + image_shape, e.g., [256,256] + center: [center_y, center_x] e.g., [120, 200] + Output: + img: recovered image + """ + pixelist = np.arange(img_shape[0] * img_shape[1]) + pixely = pixelist % img_shape[1] - center[1] + pixelx = pixelist // img_shape[1] - center[0] + r = np.hypot(pixelx, pixely) # leave as float. + # r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 + return (np.interp(r, qp, iq)).reshape(img_shape) + + +def average_array_withNan(array, axis=0, mask=None): + """YG. Jan 23, 2018 + Average array invovling np.nan along axis + + Input: + array: ND array, actually should be oneD or twoD at this stage..TODOLIST for ND + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + avg: averaged array along axis + """ + shape = array.shape + if mask == None: + mask = np.isnan(array) + # mask = np.ma.masked_invalid(array).mask + array_ = np.ma.masked_array(array, mask=mask) + try: + sums = np.array(np.ma.sum(array_[:, :], axis=axis)) + except: + sums = np.array(np.ma.sum(array_[:], axis=axis)) + + cts = np.sum(~mask, axis=axis) + # print(cts) + return sums / cts + + +def deviation_array_withNan(array, axis=0, mask=None): + """YG. Jan 23, 2018 + Get the deviation of array invovling np.nan along axis + + Input: + array: ND array + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + dev: the deviation of array along axis + """ + avg2 = average_array_withNan(array**2, axis=axis, mask=mask) + avg = average_array_withNan(array, axis=axis, mask=mask) + return np.sqrt(avg2 - avg**2) + + +def refine_roi_mask(roi_mask, pixel_num_thres=10): + """YG Dev Jan20,2018 + remove bad roi which pixel numbe is lower pixel_num_thres + roi_mask: array, + pixel_num_thres: integer, the low limit pixel number in each roi of the combined mask, + i.e., if the pixel number in one roi of the combined mask smaller than pixel_num_thres, + that roi will be considered as bad one and be removed. + """ + new_mask = np.zeros_like(roi_mask) + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + good_ind = np.where(nopr >= pixel_num_thres)[0] + 1 + l = len(good_ind) + new_ind = np.arange(1, l + 1) + for i, gi in enumerate(good_ind): + new_mask.ravel()[np.where(roi_mask.ravel() == gi)[0]] = new_ind[i] + return new_mask, good_ind - 1 + + +def shrink_image_stack(imgs, bins): + """shrink imgs by bins + imgs: shape as [Nimg, imx, imy]""" + Nimg, imx, imy = imgs.shape + bx, by = bins + imgsk = np.zeros([Nimg, imx // bx, imy // by]) + N = len(imgs) + for i in range(N): + imgsk[i] = shrink_image(imgs[i], bins) + return imgsk + + +def shrink_image(img, bins): + """YG Dec 12, 2017 dev@CHX shrink a two-d image by factor as bins, i.e., bins_x, bins_y + input: + img: 2d array, + bins: integer list, eg. [2,2] + output: + imgb: binned img + """ + m, n = img.shape + bx, by = bins + Nx, Ny = m // bx, n // by + # print(Nx*bx, Ny*by) + return img[: Nx * bx, : Ny * by].reshape(Nx, bx, Ny, by).mean(axis=(1, 3)) + + +def get_diff_fv(g2_fit_paras, qval_dict, ang_init=137.2): + """YG@CHX Nov 9,2017 + Get flow velocity and diff from g2_fit_paras""" + g2_fit_para_ = g2_fit_paras.copy() + qr = np.array([qval_dict[k][0] for k in sorted(qval_dict.keys())]) + qang = np.array([qval_dict[k][1] for k in sorted(qval_dict.keys())]) + # x=g2_fit_para_.pop( 'relaxation_rate' ) + # x=g2_fit_para_.pop( 'flow_velocity' ) + g2_fit_para_["diff"] = g2_fit_paras["relaxation_rate"] / qr**2 + cos_part = np.abs(np.cos(np.radians(qang - ang_init))) + g2_fit_para_["fv"] = g2_fit_paras["flow_velocity"] / cos_part / qr + return g2_fit_para_ + + +# function to get indices of local extrema (=indices of speckle echo maximum amplitudes): +def get_echos(dat_arr, min_distance=10): + """ + getting local maxima and minima from 1D data -> e.g. speckle echos + strategy: using peak_local_max (from skimage) with min_distance parameter to find well defined local maxima + using np.argmin to find absolute minima between relative maxima + returns [max_ind,min_ind] -> lists of indices corresponding to local maxima/minima + by LW 10/23/2018 + """ + from skimage.feature import peak_local_max + + max_ind = peak_local_max(dat_arr, min_distance) # !!! careful, skimage function reverses the order (wtf?) + min_ind = [] + for i in range(len(max_ind[:-1])): + min_ind.append(max_ind[i + 1][0] + np.argmin(dat_arr[max_ind[i + 1][0] : max_ind[i][0]])) + # unfortunately, skimage function fu$$s up the format: max_ind is an array of a list of lists...fix this: + mmax_ind = [] + for l in max_ind: + mmax_ind.append(l[0]) + # return [mmax_ind,min_ind] + return [list(reversed(mmax_ind)), list(reversed(min_ind))] + + +def pad_length(arr, pad_val=np.nan): + """ + arr: 2D matrix + pad_val: values being padded + adds pad_val to each row, to make the length of each row equal to the lenght of the longest row of the original matrix + -> used to convert python generic data object to HDF5 native format + function fixes python bug in padding (np.pad) integer array with np.nan + update June 2023: remove use of np.shape and np.size that doesn't work (anymore?) on arrays with inhomogenous size + by LW 12/30/2017 + """ + max_len = [] + for i in range(len(arr)): + max_len.append([len(arr[i])]) + max_len = np.max(max_len) + for l in range(len(arr)): + arr[l] = np.pad(arr[l] * 1.0, (0, max_len - np.size(arr[l])), mode="constant", constant_values=pad_val) + return arr + + +def save_array_to_tiff(array, output, verbose=True): + """Y.G. Nov 1, 2017 + Save array to a tif file + """ + img = PIL.Image.fromarray(array) + img.save(output) + if verbose: + print("The data is save to: %s." % (output)) + + +def load_pilatus(filename): + """Y.G. Nov 1, 2017 + Load a pilatus 2D image + """ + return np.array(PIL.Image.open(filename).convert("I")) + + +def ls_dir(inDir, have_list=[], exclude_list=[]): + """Y.G. Aug 1, 2019 + List all filenames in a filefolder + inDir: fullpath of the inDir + have_string: only retrun filename containing the string + exclude_string: only retrun filename not containing the string + + """ + from os import listdir + from os.path import isfile, join + + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + tifs_ = [] + for tif in tifs: + flag = 1 + for string in have_list: + if string not in tif: + flag *= 0 + for string in exclude_list: + if string in tif: + flag *= 0 + if flag: + tifs_.append(tif) + + return np.array(tifs_) + + +def ls_dir2(inDir, string=None): + """Y.G. Nov 1, 2017 + List all filenames in a filefolder (not include hidden files and subfolders) + inDir: fullpath of the inDir + string: if not None, only retrun filename containing the string + """ + from os import listdir + from os.path import isfile, join + + if string == None: + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + else: + tifs = np.array([f for f in listdir(inDir) if (isfile(join(inDir, f))) & (string in f)]) + return tifs + + +def re_filename(old_filename, new_filename, inDir=None, verbose=True): + """Y.G. Nov 28, 2017 + Rename old_filename with new_filename in a inDir + inDir: fullpath of the inDir, if None, the filename should have the fullpath + old_filename/ new_filename: string + an example: + re_filename( 'uid=run20_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', + 'uid=run17_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', + '/home/yuzhang/Analysis/Timepix/2017_3/Results/run17/run17_pos1/' + ) + """ + if inDir != None: + os.rename(inDir + old_filename, inDir + new_filename) + else: + os.rename(old_filename, new_filename) + print("The file: %s is changed to: %s." % (old_filename, new_filename)) + + +def re_filename_dir(old_pattern, new_pattern, inDir, verbose=True): + """Y.G. Nov 28, 2017 + Rename all filenames with old_pattern with new_pattern in a inDir + inDir: fullpath of the inDir, if None, the filename should have the fullpath + old_pattern, new_pattern + an example, + re_filename_dir('20_', '17_', inDir ) + """ + fps = ls_dir(inDir) + for fp in fps: + if old_pattern in fp: + old_filename = fp + new_filename = fp.replace(old_pattern, new_pattern) + re_filename(old_filename, new_filename, inDir, verbose=verbose) + + +def get_roi_nr(qdict, q, phi, q_nr=True, phi_nr=False, q_thresh=0, p_thresh=0, silent=True, qprecision=5): + """ + function to return roi number from qval_dict, corresponding Q and phi, lists (sets) of all available Qs and phis + [roi_nr,Q,phi,Q_list,phi_list]=get_roi_nr(..) + calling sequence: get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False, verbose=True) + qdict: qval_dict from analysis pipeline/hdf5 result file + q: q of interest, can be either value (q_nr=False) or q-number (q_nr=True) + q_thresh: threshold for comparing Q-values, set to 0 for exact comparison + phi: phi of interest, can be either value (phi_nr=False) or q-number (phi_nr=True) + p_thresh: threshold for comparing phi values, set to 0 for exact comparison + silent=True/False: Don't/Do print lists of available qs and phis, q and phi of interest + by LW 10/21/2017 + update by LW 08/22/2018: introduced thresholds for comparison of Q and phi values (before: exact match required) + update 2019/09/28 add qprecision to get unique Q + update 2020/3/12 explicitly order input dictionary to fix problem with environments >= 2019-3.0.1 + """ + import collections + from collections import OrderedDict + + qdict = collections.OrderedDict(sorted(qdict.items())) + qs = [] + phis = [] + for i in qdict.keys(): + qs.append(qdict[i][0]) + phis.append(qdict[i][1]) + qslist = list(OrderedDict.fromkeys(qs)) + qslist = np.unique(np.round(qslist, qprecision)) + phislist = list(OrderedDict.fromkeys(phis)) + qslist = list(np.sort(qslist)) + phislist = list(np.sort(phislist)) + if q_nr: + qinterest = qslist[q] + qindices = [i for i, x in enumerate(qs) if np.abs(x - qinterest) < q_thresh] + else: + qinterest = q + qindices = [i for i, x in enumerate(qs) if np.abs(x - qinterest) < q_thresh] # new + if phi_nr: + phiinterest = phislist[phi] + phiindices = [i for i, x in enumerate(phis) if x == phiinterest] + else: + phiinterest = phi + phiindices = [i for i, x in enumerate(phis) if np.abs(x - phiinterest) < p_thresh] # new + ret_list = [ + list(set(qindices).intersection(phiindices))[0], + qinterest, + phiinterest, + qslist, + phislist, + ] # -> this is the original + if silent == False: + print("list of available Qs:") + print(qslist) + print("list of available phis:") + print(phislist) + print("Roi number for Q= " + str(ret_list[1]) + " and phi= " + str(ret_list[2]) + ": " + str(ret_list[0])) + return ret_list + + +def get_fit_by_two_linear( + x, + y, + mid_xpoint1, + mid_xpoint2=None, + xrange=None, +): + """YG Octo 16,2017 Fit a curve with two linear func, the curve is splitted by mid_xpoint, + namely, fit the curve in two regions defined by (xmin,mid_xpoint ) and (mid_xpoint2, xmax) + Input: + x: 1D np.array + y: 1D np.array + mid_xpoint: float, the middle point of x + xrange: [x1,x2] + Return: + D1, gmfit1, D2, gmfit2 : + fit parameter (slope, background) of linear fit1 + convinent fit class, gmfit1(x) gives yvale + fit parameter (slope, background) of linear fit2 + convinent fit class, gmfit2(x) gives yvale + + """ + if xrange == None: + x1, x2 = min(x), max(x) + x1, x2 = xrange + if mid_xpoint2 == None: + mid_xpoint2 = mid_xpoint1 + D1, gmfit1 = linear_fit(x, y, xrange=[x1, mid_xpoint1]) + D2, gmfit2 = linear_fit(x, y, xrange=[mid_xpoint2, x2]) + return D1, gmfit1, D2, gmfit2 + + +def get_cross_point(x, gmfit1, gmfit2): + """YG Octo 16,2017 + Get croess point of two curve + """ + y1 = gmfit1(x) + y2 = gmfit2(x) + return x[np.argmin(np.abs(y1 - y2))] + + +def get_curve_turning_points( + x, + y, + mid_xpoint1, + mid_xpoint2=None, + xrange=None, +): + """YG Octo 16,2017 + Get a turning point of a curve by doing a two-linear fit + """ + D1, gmfit1, D2, gmfit2 = get_fit_by_two_linear(x, y, mid_xpoint1, mid_xpoint2, xrange) + return get_cross_point(x, gmfit1, gmfit2) + + +def plot_fit_two_linear_fit(x, y, gmfit1, gmfit2, ax=None): + """YG Octo 16,2017 Plot data with two fitted linear func""" + if ax == None: + fig, ax = plt.subplots() + plot1D(x=x, y=y, ax=ax, c="k", legend="data", m="o", ls="") # logx=True, logy=True ) + plot1D(x=x, y=gmfit1(x), ax=ax, c="r", m="", ls="-", legend="fit1") + plot1D(x=x, y=gmfit2(x), ax=ax, c="b", m="", ls="-", legend="fit2") + return ax + + +def linear_fit(x, y, xrange=None): + """YG Octo 16,2017 copied from XPCS_SAXS + a linear fit + """ + if xrange != None: + xmin, xmax = xrange + x1, x2 = find_index(x, xmin, tolerance=None), find_index(x, xmax, tolerance=None) + x_ = x[x1:x2] + y_ = y[x1:x2] + else: + x_ = x + y_ = y + D0 = np.polyfit(x_, y_, 1) + gmfit = np.poly1d(D0) + return D0, gmfit + + +def find_index(x, x0, tolerance=None): + """YG Octo 16,2017 copied from SAXS + find index of x0 in x + #find the position of P in a list (plist) with tolerance + """ + + N = len(x) + i = 0 + if x0 > max(x): + position = len(x) - 1 + elif x0 < min(x): + position = 0 + else: + position = np.argmin(np.abs(x - x0)) + return position + + +def find_index_old(x, x0, tolerance=None): + """YG Octo 16,2017 copied from SAXS + find index of x0 in x + #find the position of P in a list (plist) with tolerance + """ + + N = len(x) + i = 0 + position = None + if tolerance == None: + tolerance = (x[1] - x[0]) / 2.0 + if x0 > max(x): + position = len(x) - 1 + elif x0 < min(x): + position = 0 + else: + for item in x: + if abs(item - x0) <= tolerance: + position = i + # print 'Found Index!!!' + break + i += 1 + + return position + + +def sgolay2d(z, window_size, order, derivative=None): + """YG Octo 16, 2017 + Modified from http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html + Procedure for sg2D: + https://en.wikipedia.org/wiki/Savitzky%E2%80%93Golay_filter#Two-dimensional_convolution_coefficients + + Two-dimensional smoothing and differentiation can also be applied to tables of data values, such as intensity + values in a photographic image which is composed of a rectangular grid of pixels.[16] [17] The trick is to transform + part of the table into a row by a simple ordering of the indices of the pixels. Whereas the one-dimensional filter + coefficients are found by fitting a polynomial in the subsidiary variable, z to a set of m data points, the + two-dimensional coefficients are found by fitting a polynomial in subsidiary variables v and w to a set of m x m + data points. The following example, for a bicubic polynomial and m = 5, illustrates the process, which parallels the + process for the one dimensional case, above.[18] + + The square of 25 data values, d1 - d25 + becomes a vector when the rows are placed one after another. + The Jacobian has 10 columns, one for each of the parameters a00 - a03 and 25 rows, one for each pair of v and w values. + The convolution coefficients are calculated as + The first row of C contains 25 convolution coefficients which can be multiplied with the 25 data values to provide a + smoothed value for the central data point (13) of the 25. + + """ + # number of terms in the polynomial expression + n_terms = (order + 1) * (order + 2) / 2.0 + + if window_size % 2 == 0: + raise ValueError("window_size must be odd") + + if window_size**2 < n_terms: + raise ValueError("order is too high for the window size") + + half_size = window_size // 2 + + # exponents of the polynomial. + # p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ... + # this line gives a list of two item tuple. Each tuple contains + # the exponents of the k-th term. First element of tuple is for x + # second element for y. + # Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...] + exps = [(k - n, n) for k in range(order + 1) for n in range(k + 1)] + + # coordinates of points + ind = np.arange(-half_size, half_size + 1, dtype=np.float64) + dx = np.repeat(ind, window_size) + dy = np.tile(ind, [window_size, 1]).reshape( + window_size**2, + ) + + # build matrix of system of equation + A = np.empty((window_size**2, len(exps))) + for i, exp in enumerate(exps): + A[:, i] = (dx ** exp[0]) * (dy ** exp[1]) + + # pad input array with appropriate values at the four borders + new_shape = z.shape[0] + 2 * half_size, z.shape[1] + 2 * half_size + Z = np.zeros((new_shape)) + # top band + band = z[0, :] + Z[:half_size, half_size:-half_size] = band - np.abs(np.flipud(z[1 : half_size + 1, :]) - band) + # bottom band + band = z[-1, :] + Z[-half_size:, half_size:-half_size] = band + np.abs(np.flipud(z[-half_size - 1 : -1, :]) - band) + # left band + band = np.tile(z[:, 0].reshape(-1, 1), [1, half_size]) + Z[half_size:-half_size, :half_size] = band - np.abs(np.fliplr(z[:, 1 : half_size + 1]) - band) + # right band + band = np.tile(z[:, -1].reshape(-1, 1), [1, half_size]) + Z[half_size:-half_size, -half_size:] = band + np.abs(np.fliplr(z[:, -half_size - 1 : -1]) - band) + # central band + Z[half_size:-half_size, half_size:-half_size] = z + + # top left corner + band = z[0, 0] + Z[:half_size, :half_size] = band - np.abs(np.flipud(np.fliplr(z[1 : half_size + 1, 1 : half_size + 1])) - band) + # bottom right corner + band = z[-1, -1] + Z[-half_size:, -half_size:] = band + np.abs( + np.flipud(np.fliplr(z[-half_size - 1 : -1, -half_size - 1 : -1])) - band + ) + + # top right corner + band = Z[half_size, -half_size:] + Z[:half_size, -half_size:] = band - np.abs(np.flipud(Z[half_size + 1 : 2 * half_size + 1, -half_size:]) - band) + # bottom left corner + band = Z[-half_size:, half_size].reshape(-1, 1) + Z[-half_size:, :half_size] = band - np.abs(np.fliplr(Z[-half_size:, half_size + 1 : 2 * half_size + 1]) - band) + + # solve system and convolve + if derivative == None: + m = np.linalg.pinv(A)[0].reshape((window_size, -1)) + return scipy.signal.fftconvolve(Z, m, mode="valid") + elif derivative == "col": + c = np.linalg.pinv(A)[1].reshape((window_size, -1)) + return scipy.signal.fftconvolve(Z, -c, mode="valid") + elif derivative == "row": + r = np.linalg.pinv(A)[2].reshape((window_size, -1)) + return scipy.signal.fftconvolve(Z, -r, mode="valid") + elif derivative == "both": + c = np.linalg.pinv(A)[1].reshape((window_size, -1)) + r = np.linalg.pinv(A)[2].reshape((window_size, -1)) + return scipy.signal.fftconvolve(Z, -r, mode="valid"), scipy.signal.fftconvolve(Z, -c, mode="valid") + + +def load_filelines(fullpath): + """YG Develop March 10, 2018 + Load all content from a file + basepath, fname = os.path.split(os.path.abspath( fullpath )) + Input: + fullpath: str, full path of the file + Return: + list: str + """ + with open(fullpath, "r") as fin: + p = fin.readlines() + return p + + +def extract_data_from_file( + filename, + filepath, + good_line_pattern=None, + start_row=None, + good_cols=None, + labels=None, +): + """YG Develop Octo 17, 2017 + Add start_row option at March 5, 2018 + + Extract data from a file + Input: + filename: str, filename of the data + filepath: str, path of the data + good_line_pattern: str, data will be extract below this good_line_pattern + Or giving start_row: int + good_cols: list of integer, good index of cols + lables: the label of the good_cols + #save: False, if True will save the data into a csv file with filename appending csv ?? + Return: + a pds.dataframe + Example: + filepath = '/XF11ID/analysis/2017_3/lwiegart/Link_files/Exports/' + filename = 'ANPES2 15-10-17 16-31-11-84Exported.txt' + good_cols = [ 1,2,4,6,8,10 ] + labels = [ 'time', 'temperature', 'force', 'distance', 'stress', 'strain' ] + good_line_pattern = "Index\tX\tY\tX\tY\tX\tY" + df = extract_data_from_file( filename, filepath, good_line_pattern, good_cols, labels) + """ + import pandas as pds + + with open(filepath + filename, "r") as fin: + p = fin.readlines() + di = 1e20 + for i, line in enumerate(p): + if start_row != None: + di = start_row + elif good_line_pattern != None: + if good_line_pattern in line: + di = i + else: + di = 0 + if i == di + 1: + els = line.split() + if good_cols == None: + data = np.array(els, dtype=float) + else: + data = np.array([els[j] for j in good_cols], dtype=float) + elif i > di: + try: + els = line.split() + if good_cols == None: + temp = np.array(els, dtype=float) + else: + temp = np.array([els[j] for j in good_cols], dtype=float) + data = np.vstack((data, temp)) + except: + pass + if labels == None: + labels = np.arange(data.shape[1]) + df = pds.DataFrame(data, index=np.arange(data.shape[0]), columns=labels) + return df + + +def get_print_uids(start_time, stop_time, return_all_info=False): + """Update Feb 20, 2018 also return full uids + YG. Octo 3, 2017@CHX + Get full uids and print uid plus Measurement contents by giving start_time, stop_time + + """ + hdrs = list(db(start_time=start_time, stop_time=stop_time)) + fuids = np.zeros(len(hdrs), dtype=object) + uids = np.zeros(len(hdrs), dtype=object) + sids = np.zeros(len(hdrs), dtype=object) + n = 0 + all_info = np.zeros(len(hdrs), dtype=object) + for i in range(len(hdrs)): + fuid = hdrs[-i - 1]["start"]["uid"] # reverse order + uid = fuid[:6] # reverse order + sid = hdrs[-i - 1]["start"]["scan_id"] + fuids[n] = fuid + uids[n] = uid + sids[n] = sid + date = time.ctime(hdrs[-i - 1]["start"]["time"]) + try: + m = hdrs[-i - 1]["start"]["Measurement"] + except: + m = "" + info = "%3d: uid = '%s' ##%s #%s: %s-- %s " % (i, uid, date, sid, m, fuid) + print(info) + if return_all_info: + all_info[n] = info + n += 1 + if not return_all_info: + return fuids, uids, sids + else: + return fuids, uids, sids, all_info + + +def get_last_uids(n=-1): + """YG Sep 26, 2017 + A Convinient function to copy uid to jupyter for analysis""" + uid = db[n]["start"]["uid"][:8] + sid = db[n]["start"]["scan_id"] + m = db[n]["start"]["Measurement"] + return " uid = '%s' #(scan num: %s (Measurement: %s " % (uid, sid, m) + + +def get_base_all_filenames(inDir, base_filename_cut_length=-7): + """YG Sep 26, 2017 + Get base filenames and their related all filenames + Input: + inDir, str, input data dir + base_filename_cut_length: to which length the base name is unique + Output: + dict: keys, base filename + vales, all realted filename + """ + from os import listdir + from os.path import isfile, join + + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + tifsc = list(tifs.copy()) + utifs = np.sort(np.unique(np.array([f[:base_filename_cut_length] for f in tifs])))[::-1] + files = {} + for uf in utifs: + files[uf] = [] + i = 0 + reName = [] + for i in range(len(tifsc)): + if uf in tifsc[i]: + files[uf].append(tifsc[i]) + reName.append(tifsc[i]) + for fn in reName: + tifsc.remove(fn) + return files + + +def create_ring_mask(shape, r1, r2, center, mask=None): + """YG. Sep 20, 2017 Develop@CHX + Create 2D ring mask + input: + shape: two integer number list, mask shape, e.g., [100,100] + r1: the inner radius + r2: the outer radius + center: two integer number list, [cx,cy], ring center, e.g., [30,50] + output: + 2D numpy array, 0,1 type + """ + + m = np.zeros(shape, dtype=bool) + rr, cc = disk((center[1], center[0]), r2, shape=shape) + m[rr, cc] = 1 + rr, cc = disk((center[1], center[0]), r1, shape=shape) + m[rr, cc] = 0 + if mask != None: + m += mask + return m + + +def get_image_edge(img): + """ + Y.G. Developed at Sep 8, 2017 @CHX + Get sharp edges of an image + img: two-D array, e.g., a roi mask + """ + edg_ = prewitt(img / 1.0) + edg = np.zeros_like(edg_) + w = np.where(edg_ > 1e-10) + edg[w] = img[w] + edg[np.where(edg == 0)] = 1 + return edg + + +def get_image_with_roi(img, roi_mask, scale_factor=2): + """ + Y.G. Developed at Sep 8, 2017 @CHX + Get image with edges of roi_mask by doing + i) get edges of roi_mask by function get_image_edge + ii) scale img at region of interest (ROI) by scale_factor + img: two-D array for image + roi_mask: two-D array for ROI + scale_factor: scaling factor of ROI in image + """ + edg = get_image_edge(roi_mask) + img_ = img.copy() + w = np.where(roi_mask) + img_[w] = img[w] * scale_factor + return img_ * edg + + +def get_today_date(): + from time import gmtime, strftime + + return strftime("%m-%d-%Y", gmtime()) + + +def move_beamstop(mask, xshift, yshift): + """Y.G. Developed at July 18, 2017 @CHX + Create new mask by shift the old one with xshift, yshift + Input + --- + mask: 2D numpy array, 0 for bad pixels, 1 for good pixels + xshift, integer, shift value along x direction + yshift, integer, shift value along y direction + + Output + --- + mask, 2D numpy array, + """ + m = np.ones_like(mask) + W, H = mask.shape + w = np.where(mask == 0) + nx, ny = w[0] + int(yshift), w[1] + int(xshift) + gw = np.where((nx >= 0) & (nx < W) & (ny >= 0) & (ny < H)) + nx = nx[gw] + ny = ny[gw] + m[nx, ny] = 0 + return m + + +def validate_uid(uid): + """check uid whether be able to load data""" + try: + sud = get_sid_filenames(db[uid]) + print(sud) + md = get_meta_data(uid) + imgs = load_data(uid, md["detector"], reverse=True) + print(imgs) + return 1 + except: + print("Can't load this uid=%s!" % uid) + return 0 + + +def validate_uid_dict(uid_dict): + """Y.G. developed July 17, 2017 @CHX + Check each uid in a dict can load data or not + uids: dict, val: meaningful decription, key: a list of uids + + """ + badn = 0 + badlist = [] + for k in list(uids.keys()): + for uid in uids[k]: + flag = validate_uid(uid) + if not flag: + badn += 1 + badlist.append(uid) + print("There are %s bad uids:%s in this uid_dict." % (badn, badlist)) + + +def get_mass_center_one_roi(FD, roi_mask, roi_ind): + """Get the mass center (in pixel unit) of one roi in a time series FD + FD: handler for a compressed time series + roi_mask: the roi array + roi_ind: the interest index of the roi + + """ + import scipy + + m = roi_mask == roi_ind + cx, cy = np.zeros(int((FD.end - FD.beg) / 1)), np.zeros(int((FD.end - FD.beg) / 1)) + n = 0 + for i in tqdm(range(FD.beg, FD.end, 1), desc="Get mass center of one ROI of each frame"): + img = FD.rdframe(i) * m + c = scipy.ndimage.measurements.center_of_mass(img) + cx[n], cy[n] = int(c[0]), int(c[1]) + n += 1 + return cx, cy + + +def get_current_pipeline_filename(NOTEBOOK_FULL_PATH): + """Y.G. April 25, 2017 + Get the current running pipeline filename and path + Assume the piple is located in /XF11ID/ + Return, path and filename + """ + from IPython.core.magics.display import Javascript + + if False: + Javascript( + """ + var nb = IPython.notebook; + var kernel = IPython.notebook.kernel; + var command = "NOTEBOOK_FULL_PATH = '" + nb.base_url + nb.notebook_path + "'"; + kernel.execute(command); + """ + ) + print(NOTEBOOK_FULL_PATH) + filename = NOTEBOOK_FULL_PATH.split("/")[-1] + path = "/XF11ID/" + for s in NOTEBOOK_FULL_PATH.split("/")[3:-1]: + path += s + "/" + return path, filename + + +def get_current_pipeline_fullpath(NOTEBOOK_FULL_PATH): + """Y.G. April 25, 2017 + Get the current running pipeline full filepath + Assume the piple is located in /XF11ID/ + Return, the fullpath (path + filename) + """ + p, f = get_current_pipeline_filename(NOTEBOOK_FULL_PATH) + return p + f + + +def save_current_pipeline(NOTEBOOK_FULL_PATH, outDir): + """Y.G. April 25, 2017 + Save the current running pipeline to outDir + The save pipeline should be the snapshot of the current state. + """ + + import shutil + + path, fp = get_current_pipeline_filename(NOTEBOOK_FULL_PATH) + shutil.copyfile(path + fp, outDir + fp) + + print("This pipeline: %s is saved in %s." % (fp, outDir)) + + +def plot_g1(taus, g2, g2_fit_paras, qr=None, ylim=[0, 1], title=""): + """Dev Apr 19, 2017, + Plot one-time correlation, giving taus, g2, g2_fit""" + noqs = g2.shape[1] + fig, ax = plt.subplots() + if qr == None: + qr = np.arange(noqs) + for i in range(noqs): + b = g2_fit_paras["baseline"][i] + beta = g2_fit_paras["beta"][i] + y = np.sqrt(np.abs(g2[1:, i] - b) / beta) + plot1D( + x=taus[1:], + y=y, + ax=ax, + legend="q=%s" % qr[i], + ls="-", + lw=2, + m=markers[i], + c=colors[i], + title=title, + ylim=ylim, + logx=True, + legend_size=8, + ) + ax.set_ylabel(r"$g_1$" + "(" + r"$\tau$" + ")") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + return ax + + +def filter_roi_mask(filter_dict, roi_mask, avg_img, filter_type="ylim"): + """Remove bad pixels in roi_mask. The bad pixel is defined by the filter_dict, + if filter_type ='ylim', the filter_dict wit key as q and each value gives a high and low limit thresholds. The value of the pixels in avg_img above or below the limit are considered as bad pixels. + if filter_type='badpix': the filter_dict wit key as q and each value gives a list of bad pixel. + + avg_img, the averaged image + roi_mask: two-d array, the same shape as image, the roi mask, value is integer, e.g., 1 ,2 ,... + filter_dict: keys, as roi_mask integer, value, by default is [None,None], is the limit, + example, {2:[4,5], 10:[0.1,1.1]} + NOTE: first q = 1 (not 0) + """ + rm = roi_mask.copy() + rf = np.ravel(rm) + for k in list(filter_dict.keys()): + pixel = roi.roi_pixel_values(avg_img, roi_mask, [k])[0][0] + # print( np.max(pixel), np.min(pixel) ) + if filter_type == "ylim": + xmin, xmax = filter_dict[k] + badp = np.where((pixel >= xmax) | (pixel <= xmin))[0] + else: + badp = filter_dict[k] + if len(badp) != 0: + pls = np.where([rf == k])[1] + rf[pls[badp]] = 0 + return rm + + +## +# Dev at March 31 for create Eiger chip mask +def create_chip_edges_mask(det="1M"): + """Create a chip edge mask for Eiger detector""" + if det == "1M": + shape = [1065, 1030] + w = 4 + mask = np.ones(shape, dtype=np.int32) + cx = [1030 // 4 * i for i in range(1, 4)] + # cy = [ 1065//4 *i for i in range(1,4) ] + cy = [808, 257] + # print (cx, cy ) + for c in cx: + mask[:, c - w // 2 : c + w // 2] = 0 + for c in cy: + mask[c - w // 2 : c + w // 2, :] = 0 + + return mask + + +def create_ellipse_donut(cx, cy, wx_inner, wy_inner, wx_outer, wy_outer, roi_mask, gap=0): + Nmax = np.max(np.unique(roi_mask)) + rr1, cc1 = ellipse(cy, cx, wy_inner, wx_inner) + rr2, cc2 = ellipse(cy, cx, wy_inner + gap, wx_inner + gap) + rr3, cc3 = ellipse(cy, cx, wy_outer, wx_outer) + roi_mask[rr3, cc3] = 2 + Nmax + roi_mask[rr2, cc2] = 0 + roi_mask[rr1, cc1] = 1 + Nmax + return roi_mask + + +def create_box(cx, cy, wx, wy, roi_mask): + Nmax = np.max(np.unique(roi_mask)) + for i, [cx_, cy_] in enumerate(list(zip(cx, cy))): # create boxes + x = np.array([cx_ - wx, cx_ + wx, cx_ + wx, cx_ - wx]) + y = np.array([cy_ - wy, cy_ - wy, cy_ + wy, cy_ + wy]) + rr, cc = polygon(y, x) + roi_mask[rr, cc] = i + 1 + Nmax + return roi_mask + + +def create_folder(base_folder, sub_folder): + """ + Crate a subfolder under base folder + Input: + base_folder: full path of the base folder + sub_folder: sub folder name to be created + Return: + Created full path of the created folder + """ + + data_dir0 = os.path.join(base_folder, sub_folder) + ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + os.makedirs(data_dir0, exist_ok=True) + print("Results from this analysis will be stashed in the directory %s" % data_dir0) + return data_dir0 + + +def create_user_folder(CYCLE, username=None, default_dir="/XF11ID/analysis/"): + """ + Crate a folder for saving user data analysis result + Input: + CYCLE: run cycle + username: if None, get username from the jupyter username + Return: + Created folder name + """ + if username != "Default": + if username == None: + username = getpass.getuser() + data_dir0 = os.path.join(default_dir, CYCLE, username, "Results/") + else: + data_dir0 = os.path.join(default_dir, CYCLE + "/") + ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + os.makedirs(data_dir0, exist_ok=True) + print("Results from this analysis will be stashed in the directory %s" % data_dir0) + return data_dir0 + + +################################## +# ########For dose analysis ####### +# ################################# +def get_fra_num_by_dose(exp_dose, exp_time, att=1, dead_time=2): + """ + Calculate the frame number to be correlated by giving a X-ray exposure dose + + Paramters: + exp_dose: a list, the exposed dose, e.g., in unit of exp_time(ms)*N(fram num)*att( attenuation) + exp_time: float, the exposure time for a xpcs time sereies + dead_time: dead time for the fast shutter reponse time, CHX = 2ms + Return: + noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) + e.g., + + no_dose_fra = get_fra_num_by_dose( exp_dose = [ 3.34* 20, 3.34*50, 3.34*100, 3.34*502, 3.34*505 ], + exp_time = 1.34, dead_time = 2) + + --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) + """ + return np.int_(np.array(exp_dose) / (exp_time + dead_time) / att) + + +def get_multi_tau_lag_steps(fra_max, num_bufs=8): + """ + Get taus in log steps ( a multi-taus defined taus ) for a time series with max frame number as fra_max + Parameters: + fra_max: integer, the maximun frame number + buf_num (default=8), + Return: + taus_in_log, a list + + e.g., + get_multi_tau_lag_steps( 20, 8 ) --> array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]) + + """ + num_levels = int(np.log(fra_max / (num_bufs - 1)) / np.log(2) + 1) + 1 + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + return lag_steps[lag_steps < fra_max] + + +def get_series_g2_taus(fra_max_list, acq_time=1, max_fra_num=None, log_taus=True, num_bufs=8): + """ + Get taus for dose dependent analysis + Parameters: + fra_max_list: a list, a lsit of largest available frame number + acq_time: acquistion time for each frame + log_taus: if true, will use the multi-tau defined taus bu using buf_num (default=8), + otherwise, use deltau =1 + Return: + tausd, a dict, with keys as taus_max_list items + e.g., + get_series_g2_taus( fra_max_list=[20,30,40], acq_time=1, max_fra_num=None, log_taus = True, num_bufs = 8) + --> + {20: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]), + 30: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28]), + 40: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32]) + } + + """ + tausd = {} + for n in fra_max_list: + if max_fra_num != None: + L = max_fra_num + else: + L = np.infty + if n > L: + warnings.warn( + "Warning: the dose value is too large, and please" + "check the maxium dose in this data set and give a smaller dose value." + "We will use the maxium dose of the data." + ) + n = L + if log_taus: + lag_steps = get_multi_tau_lag_steps(n, num_bufs) + else: + lag_steps = np.arange(n) + tausd[n] = lag_steps * acq_time + return tausd + + +def check_lost_metadata(md, Nimg=None, inc_x0=None, inc_y0=None, pixelsize=7.5 * 10 * (-5)): + """Y.G. Dec 31, 2016, check lost metadata + + Parameter: + md: dict, meta data dictionay + Nimg: number of frames for this uid metadata + inc_x0/y0: incident beam center x0/y0, if None, will over-write the md['beam_center_x/y'] + pixelsize: if md don't have ['x_pixel_size'], the pixelsize will add it + Return: + dpix: pixelsize, in mm + lambda_: wavelegth of the X-rays in Angstroms + exposuretime: exposure time in sec + timeperframe: acquisition time is sec + center: list, [x,y], incident beam center in pixel + Will also update md + """ + mdn = md.copy() + if "number of images" not in list(md.keys()): + md["number of images"] = Nimg + if "x_pixel_size" not in list(md.keys()): + md["x_pixel_size"] = 7.5000004e-05 + dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm + try: + lambda_ = md["wavelength"] + except: + lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms + try: + Ldet = md["det_distance"] + if Ldet <= 1000: + Ldet *= 1000 + md["det_distance"] = Ldet + except: + Ldet = md["detector_distance"] + if Ldet <= 1000: + Ldet *= 1000 + md["detector_distance"] = Ldet + + try: # try exp time from detector + exposuretime = md["count_time"] # exposure time in sec + except: + exposuretime = md["cam_acquire_time"] # exposure time in sec + try: # try acq time from detector + acquisition_period = md["frame_time"] + except: + try: + acquisition_period = md["acquire period"] + except: + uid = md["uid"] + acquisition_period = float(db[uid]["start"]["acquire period"]) + timeperframe = acquisition_period + if inc_x0 != None: + mdn["beam_center_x"] = inc_y0 + print("Beam_center_x has been changed to %s. (no change in raw metadata): " % inc_y0) + if inc_y0 != None: + mdn["beam_center_y"] = inc_x0 + print("Beam_center_y has been changed to %s. (no change in raw metadata): " % inc_x0) + center = [int(mdn["beam_center_x"]), int(mdn["beam_center_y"])] # beam center [y,x] for python image + center = [center[1], center[0]] + + return dpix, lambda_, Ldet, exposuretime, timeperframe, center + + +def combine_images(filenames, outputfile, outsize=(2000, 2400)): + """Y.G. Dec 31, 2016 + Combine images together to one image using PIL.Image + Input: + filenames: list, the images names to be combined + outputfile: str, the filename to generate + outsize: the combined image size + Output: + save a combined image file + """ + N = len(filenames) + # nx = np.int( np.ceil( np.sqrt(N)) ) + # ny = np.int( np.ceil( N / float(nx) ) ) + + ny = int(np.ceil(np.sqrt(N))) + nx = int(np.ceil(N / float(ny))) + + # print(nx,ny) + result = Image.new("RGB", outsize, color=(255, 255, 255, 0)) + basewidth = int(outsize[0] / nx) + hsize = int(outsize[1] / ny) + for index, file in enumerate(filenames): + path = os.path.expanduser(file) + img = Image.open(path) + bands = img.split() + ratio = img.size[1] / img.size[0] # h/w + if hsize > basewidth * ratio: + basewidth_ = basewidth + hsize_ = int(basewidth * ratio) + else: + basewidth_ = int(hsize / ratio) + hsize_ = hsize + # print( index, file, basewidth, hsize ) + size = (basewidth_, hsize_) + bands = [b.resize(size, Image.Resampling.BILINEAR) for b in bands] + img = Image.merge("RGBA", bands) + x = index % nx * basewidth + y = index // nx * hsize + w, h = img.size + # print('pos {0},{1} size {2},{3}'.format(x, y, w, h)) + result.paste(img, (x, y, x + w, y + h)) + result.save(outputfile, quality=100, optimize=True) + print("The combined image is saved as: %s" % outputfile) + + +def get_qval_dict(qr_center, qz_center=None, qval_dict=None, multi_qr_for_one_qz=True, one_qz_multi_qr=True): + """Y.G. Dec 27, 2016 + Map the roi label array with qr or (qr,qz) or (q//, q|-) values + Parameters: + qr_center: list, a list of qr + qz_center: list, a list of qz, + multi_qr_for_one_qz: by default=True, + if one_qz_multi_qr: + one qz_center corresponds to all qr_center, in other words, there are totally, len(qr_center)* len(qz) qs + else: + one qr_center corresponds to all qz_center, + else: one qr with one qz + qval_dict: if not None, will append the new dict to the qval_dict + Return: + qval_dict, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + + """ + + if qval_dict == None: + qval_dict = {} + maxN = 0 + else: + maxN = np.max(list(qval_dict.keys())) + 1 + + if qz_center != None: + if multi_qr_for_one_qz: + if one_qz_multi_qr: + for qzind in range(len(qz_center)): + for qrind in range(len(qr_center)): + qval_dict[maxN + qzind * len(qr_center) + qrind] = np.array( + [qr_center[qrind], qz_center[qzind]] + ) + else: + for qrind in range(len(qr_center)): + for qzind in range(len(qz_center)): + qval_dict[maxN + qrind * len(qz_center) + qzind] = np.array( + [qr_center[qrind], qz_center[qzind]] + ) + + else: + for i, [qr, qz] in enumerate(zip(qr_center, qz_center)): + qval_dict[maxN + i] = np.array([qr, qz]) + else: + for qrind in range(len(qr_center)): + qval_dict[maxN + qrind] = np.array([qr_center[qrind]]) + return qval_dict + + +def update_qval_dict(qval_dict1, qval_dict2): + """Y.G. Dec 31, 2016 + Update qval_dict1 with qval_dict2 + Input: + qval_dict1, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + qval_dict2, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + Output: + qval_dict, a dict, with the same key as dict1, and all key in dict2 but which key plus max(dict1.keys()) + """ + maxN = np.max(list(qval_dict1.keys())) + 1 + qval_dict = {} + qval_dict.update(qval_dict1) + for k in list(qval_dict2.keys()): + qval_dict[k + maxN] = qval_dict2[k] + return qval_dict + + +def update_roi_mask(roi_mask1, roi_mask2): + """Y.G. Dec 31, 2016 + Update qval_dict1 with qval_dict2 + Input: + roi_mask1, 2d-array, label array, same shape as xpcs frame, + roi_mask2, 2d-array, label array, same shape as xpcs frame, + Output: + roi_mask, 2d-array, label array, same shape as xpcs frame, update roi_mask1 with roi_mask2 + """ + roi_mask = roi_mask1.copy() + w = np.where(roi_mask2) + roi_mask[w] = roi_mask2[w] + np.max(roi_mask) + return roi_mask + + +def check_bad_uids(uids, mask, img_choice_N=10, bad_uids_index=None): + """Y.G. Dec 22, 2016 + Find bad uids by checking the average intensity by a selection of the number img_choice_N of frames for the uid. If the average intensity is zeros, the uid will be considered as bad uid. + Parameters: + uids: list, a list of uid + mask: array, bool type numpy.array + img_choice_N: random select number of the uid + bad_uids_index: a list of known bad uid list, default is None + Return: + guids: list, good uids + buids, list, bad uids + """ + import random + + buids = [] + guids = list(uids) + # print( guids ) + if bad_uids_index == None: + bad_uids_index = [] + for i, uid in enumerate(uids): + # print( i, uid ) + if i not in bad_uids_index: + detector = get_detector(db[uid]) + imgs = load_data(uid, detector) + img_samp_index = random.sample(range(len(imgs)), img_choice_N) + imgsa = apply_mask(imgs, mask) + avg_img = get_avg_img(imgsa, img_samp_index, plot_=False, uid=uid) + if avg_img.max() == 0: + buids.append(uid) + guids.pop(list(np.where(np.array(guids) == uid)[0])[0]) + print("The bad uid is: %s" % uid) + else: + guids.pop(list(np.where(np.array(guids) == uid)[0])[0]) + buids.append(uid) + print("The bad uid is: %s" % uid) + print("The total and bad uids number are %s and %s, repsectively." % (len(uids), len(buids))) + return guids, buids + + +def find_uids(start_time, stop_time): + """Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end time + Return: + sids: list, scan id + uids: list, uid with 8 character length + fuids: list, uid with full length + + """ + hdrs = db(start_time=start_time, stop_time=stop_time) + try: + print("Totally %s uids are found." % (len(list(hdrs)))) + except: + pass + sids = [] + uids = [] + fuids = [] + for hdr in hdrs: + s = get_sid_filenames(hdr) + # print (s[1][:8]) + sids.append(s[0]) + uids.append(s[1][:8]) + fuids.append(s[1]) + sids = sids[::-1] + uids = uids[::-1] + fuids = fuids[::-1] + return np.array(sids), np.array(uids), np.array(fuids) + + +def ployfit(y, x=None, order=20): + """ + fit data (one-d array) by a ploynominal function + return the fitted one-d array + """ + if x == None: + x = range(len(y)) + pol = np.polyfit(x, y, order) + return np.polyval(pol, x) + + +def check_bad_data_points( + data, + fit=True, + polyfit_order=30, + legend_size=12, + plot=True, + scale=1.0, + good_start=None, + good_end=None, + path=None, + return_ylim=False, +): + """ + data: 1D array + scale: the scale of deviation + fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve + else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve + + """ + if good_start == None: + good_start = 0 + if good_end == None: + good_end = len(data) + bd1 = [i for i in range(0, good_start)] + bd3 = [i for i in range(good_end, len(data))] + + d_ = data[good_start:good_end] + + if fit: + pfit = ployfit(d_, order=polyfit_order) + d = d_ - pfit + else: + d = d_ + pfit = np.ones_like(d) * data.mean() + + ymin = d.mean() - scale * d.std() + ymax = d.mean() + scale * d.std() + + if plot: + fig = plt.figure() + ax = fig.add_subplot(2, 1, 1) + plot1D(d_, ax=ax, color="k", legend="data", legend_size=legend_size) + plot1D(pfit, ax=ax, color="b", legend="ploy-fit", title="Find Bad Points", legend_size=legend_size) + + ax2 = fig.add_subplot(2, 1, 2) + plot1D( + d, + ax=ax2, + legend="difference", + marker="s", + color="b", + ) + + # print('here') + plot1D( + x=[0, len(d_)], + y=[ymin, ymin], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="low_thresh", + legend_size=legend_size, + ) + + plot1D( + x=[0, len(d_)], + y=[ymax, ymax], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="high_thresh", + title="", + legend_size=legend_size, + ) + + if path != None: + fp = path + "%s" % (uid) + "_find_bad_points" + ".png" + plt.savefig(fp, dpi=fig.dpi) + bd2 = list(np.where(np.abs(d - d.mean()) > scale * d.std())[0] + good_start) + + if return_ylim: + return np.array(bd1 + bd2 + bd3), ymin, ymax, pfit + else: + return np.array(bd1 + bd2 + bd3), pfit + + +def get_bad_frame_list( + imgsum, + fit=True, + polyfit_order=30, + legend_size=12, + plot=True, + scale=1.0, + good_start=None, + good_end=None, + uid="uid", + path=None, + return_ylim=False, +): + """ + imgsum: the sum intensity of a time series + scale: the scale of deviation + fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve + else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve + + """ + if good_start == None: + good_start = 0 + if good_end == None: + good_end = len(imgsum) + bd1 = [i for i in range(0, good_start)] + bd3 = [i for i in range(good_end, len(imgsum))] + + imgsum_ = imgsum[good_start:good_end] + + if fit: + pfit = ployfit(imgsum_, order=polyfit_order) + data = imgsum_ - pfit + else: + data = imgsum_ + pfit = np.ones_like(data) * data.mean() + + ymin = data.mean() - scale * data.std() + ymax = data.mean() + scale * data.std() + + if plot: + fig = plt.figure() + ax = fig.add_subplot(2, 1, 1) + plot1D(imgsum_, ax=ax, color="k", legend="data", legend_size=legend_size) + plot1D(pfit, ax=ax, color="b", legend="ploy-fit", title=uid + "_imgsum", legend_size=legend_size) + + ax2 = fig.add_subplot(2, 1, 2) + plot1D( + data, + ax=ax2, + legend="difference", + marker="s", + color="b", + ) + + # print('here') + plot1D( + x=[0, len(imgsum_)], + y=[ymin, ymin], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="low_thresh", + legend_size=legend_size, + ) + + plot1D( + x=[0, len(imgsum_)], + y=[ymax, ymax], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="high_thresh", + title="imgsum_to_find_bad_frame", + legend_size=legend_size, + ) + + if path != None: + fp = path + "%s" % (uid) + "_imgsum_analysis" + ".png" + plt.savefig(fp, dpi=fig.dpi) + + bd2 = list(np.where(np.abs(data - data.mean()) > scale * data.std())[0] + good_start) + + if return_ylim: + return np.array(bd1 + bd2 + bd3), ymin, ymax + else: + return np.array(bd1 + bd2 + bd3) + + +def save_dict_csv(mydict, filename, mode="w"): + import csv + + with open(filename, mode) as csv_file: + spamwriter = csv.writer(csv_file) + for key, value in mydict.items(): + spamwriter.writerow([key, value]) + + +def read_dict_csv(filename): + import csv + + with open(filename, "r") as csv_file: + reader = csv.reader(csv_file) + mydict = dict(reader) + return mydict + + +def find_bad_pixels(FD, bad_frame_list, uid="uid"): + bpx = [] + bpy = [] + for n in bad_frame_list: + if n >= FD.beg and n <= FD.end: + f = FD.rdframe(n) + w = np.where(f == f.max()) + if len(w[0]) == 1: + bpx.append(w[0][0]) + bpy.append(w[1][0]) + + return trans_data_to_pd([bpx, bpy], label=[uid + "_x", uid + "_y"], dtype="list") + + +def mask_exclude_badpixel(bp, mask, uid): + + for i in range(len(bp)): + mask[int(bp[bp.columns[0]][i]), int(bp[bp.columns[1]][i])] = 0 + return mask + + +def print_dict(dicts, keys=None): + """ + print keys: values in a dicts + if keys is None: print all the keys + """ + if keys == None: + keys = list(dicts.keys()) + for k in keys: + try: + print("%s--> %s" % (k, dicts[k])) + except: + pass + + +def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): + """ + Jan 25, 2018 add default_dec opt + + Y.G. Dev Dec 8, 2016 + + Get metadata from a uid + + - Adds detector key with detector name + + Parameters: + uid: the unique data acquisition id + kwargs: overwrite the meta data, for example + get_meta_data( uid = uid, sample = 'test') --> will overwrtie the meta's sample to test + return: + meta data of the uid: a dictionay + with keys: + detector + suid: the simple given uid + uid: full uid + filename: the full path of the data + start_time: the data acquisition starting time in a human readable manner + And all the input metadata + """ + + if "verbose" in kwargs.keys(): # added: option to suppress output + verbose = kwargs["verbose"] + else: + verbose = True + + import time + + header = db[uid] + md = {} + + md["suid"] = uid # short uid + try: + md["filename"] = get_sid_filenames(header)[2][0] + except: + md["filename"] = "N.A." + + devices = sorted(list(header.devices())) + if len(devices) > 1: + if verbose: # added: mute output + print( + "More than one device. This would have unintented consequences.Currently, only the device contains 'default_dec=%s'." + % default_dec + ) + # raise ValueError("More than one device. This would have unintented consequences.") + dec = devices[0] + for dec_ in devices: + if default_dec in dec_: + dec = dec_ + + # print(dec) + # detector_names = sorted( header.start['detectors'] ) + detector_names = sorted(get_detectors(db[uid])) + # if len(detector_names) > 1: + # raise ValueError("More than one det. This would have unintented consequences.") + detector_name = detector_names[0] + # md['detector'] = detector_name + md["detector"] = get_detector(header) + # print( md['detector'] ) + new_dict = header.config_data(dec)["primary"][0] + for key, val in new_dict.items(): + newkey = key.replace(detector_name + "_", "") + md[newkey] = val + + # for k,v in ev['descriptor']['configuration'][dec]['data'].items(): + # md[ k[len(dec)+1:] ]= v + + try: + md.update(header.start["plan_args"].items()) + md.pop("plan_args") + except: + pass + md.update(header.start.items()) + + # print(header.start.time) + md["start_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(header.start["time"])) + md["stop_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(header.stop["time"])) + try: # added: try to handle runs that don't contain image data + if "primary" in header.v2: + descriptor = header.v2["primary"].descriptors[0] + md["img_shape"] = descriptor["data_keys"][md["detector"]]["shape"][:2][::-1] + except: + if verbose: + print("couldn't find image shape...skip!") + else: + pass + md.update(kwargs) + + # for k, v in sorted(md.items()): + # ... + # print(f'{k}: {v}') + + return md + + +def get_max_countc(FD, labeled_array): + """YG. 2016, Nov 18 + Compute the max intensity of ROIs in the compressed file (FD) + + Parameters + ---------- + FD: Multifile class + compressed file + labeled_array : array + labeled array; 0 is background. + Each ROI is represented by a nonzero integer. It is not required that + the ROI labels are contiguous + index : int, list, optional + The ROI's to use. If None, this function will extract averages for all + ROIs + + Returns + ------- + max_intensity : a float + index : list + The labels for each element of the `mean_intensity` list + """ + + qind, pixelist = roi.extract_label_indices(labeled_array) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + + if labeled_array.shape != (FD.md["ncols"], FD.md["nrows"]): + raise ValueError( + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" + % (FD.md["ncols"], FD.md["nrows"], labeled_array.shape[0], labeled_array.shape[1]) + ) + + max_inten = 0 + for i in tqdm(range(FD.beg, FD.end, 1), desc="Get max intensity of ROIs in all frames"): + try: + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + max_inten = max(max_inten, np.max(v[w])) + except: + pass + return max_inten + + +def create_polygon_mask(image, xcorners, ycorners): + """ + Give image and x/y coners to create a polygon mask + image: 2d array + xcorners, list, points of x coners + ycorners, list, points of y coners + Return: + the polygon mask: 2d array, the polygon pixels with values 1 and others with 0 + + Example: + + + """ + from skimage.draw import disk, line, line_aa, polygon + + imy, imx = image.shape + bst_mask = np.zeros_like(image, dtype=bool) + rr, cc = polygon(ycorners, xcorners, shape=image.shape) + bst_mask[rr, cc] = 1 + # full_mask= ~bst_mask + return bst_mask + + +def create_rectangle_mask(image, xcorners, ycorners): + """ + Give image and x/y coners to create a rectangle mask + image: 2d array + xcorners, list, points of x coners + ycorners, list, points of y coners + Return: + the polygon mask: 2d array, the polygon pixels with values 1 and others with 0 + + Example: + + + """ + from skimage.draw import disk, line, line_aa, polygon + + imy, imx = image.shape + bst_mask = np.zeros_like(image, dtype=bool) + rr, cc = polygon(ycorners, xcorners, shape=image.shape) + bst_mask[rr, cc] = 1 + # full_mask= ~bst_mask + return bst_mask + + +def create_multi_rotated_rectangle_mask(image, center=None, length=100, width=50, angles=[0]): + """Developed at July 10, 2017 by Y.G.@CHX, NSLS2 + Create multi rectangle-shaped mask by rotating a rectangle with a list of angles + The original rectangle is defined by four corners, i.e., + [ (center[1] - width//2, center[0]), + (center[1] + width//2, center[0]), + (center[1] + width//2, center[0] + length), + (center[1] - width//2, center[0] + length) + ] + + Parameters: + image: 2D numpy array, to give mask shape + center: integer list, if None, will be the center of the image + length: integer, the length of the non-ratoted rectangle + width: integer, the width of the non-ratoted rectangle + angles: integer list, a list of rotated angles + + Return: + mask: 2D bool-type numpy array + """ + + from skimage.draw import polygon + from skimage.transform import rotate + + cx, cy = center + imy, imx = image.shape + mask = np.zeros(image.shape, dtype=bool) + wy = length + wx = width + x = np.array([max(0, cx - wx // 2), min(imx, cx + wx // 2), min(imx, cx + wx // 2), max(0, cx - wx // 2)]) + y = np.array([cy, cy, min(imy, cy + wy), min(imy, cy + wy)]) + rr, cc = polygon(y, x, shape=image.shape) + mask[rr, cc] = 1 + mask_rot = np.zeros(image.shape, dtype=bool) + for angle in angles: + mask_rot += np.array(rotate(mask, angle, center=center), dtype=bool) # , preserve_range=True) + return ~mask_rot + + +def create_wedge(image, center, radius, wcors, acute_angle=True): + """YG develop at June 18, 2017, @CHX + Create a wedge by a combination of disk and a triangle defined by center and wcors + wcors: [ [x1,x2,x3...], [y1,y2,y3..] + + """ + from skimage.draw import disk, line, line_aa, polygon + + imy, imx = image.shape + cy, cx = center + x = [cx] + list(wcors[0]) + y = [cy] + list(wcors[1]) + + maskc = np.zeros_like(image, dtype=bool) + rr, cc = disk((cy, cx), radius, shape=image.shape) + maskc[rr, cc] = 1 + + maskp = np.zeros_like(image, dtype=bool) + x = np.array(x) + y = np.array(y) + print(x, y) + rr, cc = polygon(y, x, shape=image.shape) + maskp[rr, cc] = 1 + if acute_angle: + return maskc * maskp + else: + return maskc * ~maskp + + +def create_cross_mask( + image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4, center_disk=True, center_radius=10 +): + """ + Give image and the beam center to create a cross-shaped mask + wy_left: the width of left h-line + wy_right: the width of rigth h-line + wx_up: the width of up v-line + wx_down: the width of down v-line + center_disk: if True, create a disk with center and center_radius + + Return: + the cross mask + """ + from skimage.draw import disk, line, line_aa, polygon + + imy, imx = image.shape + cx, cy = center + bst_mask = np.zeros_like(image, dtype=bool) + ### + # for right part + wy = wy_right + x = np.array([cx, imx, imx, cx]) + y = np.array([cy - wy, cy - wy, cy + wy, cy + wy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 + + ### + # for left part + wy = wy_left + x = np.array([0, cx, cx, 0]) + y = np.array([cy - wy, cy - wy, cy + wy, cy + wy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 + + ### + # for up part + wx = wx_up + x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) + y = np.array([cy, cy, imy, imy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 + + ### + # for low part + wx = wx_down + x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) + y = np.array([0, 0, cy, cy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 + + if center_radius != 0: + rr, cc = disk((cy, cx), center_radius, shape=bst_mask.shape) + bst_mask[rr, cc] = 1 + + full_mask = ~bst_mask + + return full_mask + + +def generate_edge(centers, width): + """YG. 10/14/2016 + give centers and width (number or list) to get edges""" + edges = np.zeros([len(centers), 2]) + edges[:, 0] = centers - width + edges[:, 1] = centers + width + return edges + + +def export_scan_scalar( + uid, x="dcm_b", y=["xray_eye1_stats1_total"], path="/XF11ID/analysis/2016_3/commissioning/Results/" +): + """YG. 10/17/2016 + export uid data to a txt file + uid: unique scan id + x: the x-col + y: the y-cols + path: save path + Example: + data = export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'], + path='/XF11ID/analysis/2016_3/commissioning/Results/exported/' ) + A plot for the data: + d.plot(x='dcm_b', y = 'xray_eye1_stats1_total', marker='o', ls='-', color='r') + + """ + from databroker import DataBroker as db + + from pyCHX.chx_generic_functions import trans_data_to_pd + + hdr = db[uid] + print(hdr.fields()) + data = db[uid].table() + xp = data[x] + datap = np.zeros([len(xp), len(y) + 1]) + datap[:, 0] = xp + for i, yi in enumerate(y): + datap[:, i + 1] = data[yi] + + datap = trans_data_to_pd(datap, label=[x] + [yi for yi in y]) + datap.to_csv(path + "uid=%s.csv" % uid) + return datap + + +##### +# load data by databroker + + +def get_flatfield(uid, reverse=False): + import h5py + + detector = get_detector(db[uid]) + sud = get_sid_filenames(db[uid]) + master_path = "%s_master.h5" % (sud[2][0]) + print(master_path) + f = h5py.File(master_path, "r") + k = "entry/instrument/detector/detectorSpecific/" # data_collection_date' + d = np.array(f[k]["flatfield"]) + f.close() + if reverse: + d = reverse_updown(d) + + return d + + +def get_detector(header): + """Get the first detector image string by giving header""" + keys = get_detectors(header) + for k in keys: + if "eiger" in k: + return k + + +def get_detectors(header): + """Get all the detector image strings by giving header""" + if "primary" in header.v2: + descriptor = header.v2["primary"].descriptors[0] + keys = [k for k, v in descriptor["data_keys"].items() if "external" in v] + return sorted(set(keys)) + return [] + + +def get_full_data_path(uid): + """A dirty way to get full data path""" + header = db[uid] + d = header.db + s = list(d.get_documents(db[uid])) + # print(s[2]) + p = s[2][1]["resource_path"] + p2 = s[3][1]["datum_kwargs"]["seq_id"] + # print(p,p2) + return p + "_" + str(p2) + "_master.h5" + + +def get_sid_filenames(hdr, verbose=False): + """ + get scan_id, uid and detector filename from databroker + get_sid_filenames(hdr,verbose=False) + hdr = db[uid] + returns (scan_id, uid, filepath) + LW 04/30/2024 + """ + import glob + from time import localtime, strftime + + start_doc = hdr.start + stop_doc = hdr.stop + success = False + + ret = ( + start_doc["scan_id"], + start_doc["uid"], + glob.glob(f"{start_doc['data path']}*_{start_doc['sequence id']}_master.h5"), + ) # looking for (eiger) datafile at the path specified in metadata + if len(ret[2]) == 0: + if verbose: + print('could not find detector filename from "data_path" in metadata: %s' % start_doc["data path"]) + else: + if verbose: + print('Found detector filename from "data_path" in metadata!') + success = True + + if not success: # looking at path in metadata, but taking the date from the run start document + data_path = start_doc["data path"][:-11] + strftime("%Y/%m/%d/", localtime(start_doc["time"])) + ret = ( + start_doc["scan_id"], + start_doc["uid"], + glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5"), + ) + if len(ret[2]) == 0: + if verbose: + print("could not find detector filename in %s" % data_path) + else: + if verbose: + print("Found detector filename in %s" % data_path) + success = True + + if ( + not success + ): # looking at path in metadata, but taking the date from the run stop document (in case the date rolled over between creating the start doc and staging the detector) + data_path = start_doc["data path"][:-11] + strftime("%Y/%m/%d/", localtime(stop_doc["time"])) + ret = ( + start_doc["scan_id"], + start_doc["uid"], + glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5"), + ) + if len(ret[2]) == 0: + if verbose: + print("Sorry, could not find detector filename....") + else: + if verbose: + print("Found detector filename in %s" % data_path) + success = True + return ret + + +# def get_sid_filenames(header): +# """YG. Dev Jan, 2016 +# Get a bluesky scan_id, unique_id, filename by giveing uid + +# Parameters +# ---------- +# header: a header of a bluesky scan, e.g. db[-1] + +# Returns +# ------- +# scan_id: integer +# unique_id: string, a full string of a uid +# filename: sring + +# Usuage: +# sid,uid, filenames = get_sid_filenames(db[uid]) + +# """ +# from collections import defaultdict +# from glob import glob +# from pathlib import Path + +# filepaths = [] +# resources = {} # uid: document +# datums = defaultdict(list) # uid: List(document) +# for name, doc in header.documents(): +# if name == "resource": +# resources[doc["uid"]] = doc +# elif name == "datum": +# datums[doc["resource"]].append(doc) +# elif name == "datum_page": +# for datum in event_model.unpack_datum_page(doc): +# datums[datum["resource"]].append(datum) +# for resource_uid, resource in resources.items(): +# file_prefix = Path(resource.get('root', '/'), resource["resource_path"]) +# if 'eiger' not in resource['spec'].lower(): +# continue +# for datum in datums[resource_uid]: +# dm_kw = datum["datum_kwargs"] +# seq_id = dm_kw['seq_id'] +# new_filepaths = glob(f'{file_prefix!s}_{seq_id}*') +# filepaths.extend(new_filepaths) +# return header.start['scan_id'], header.start['uid'], filepaths + + +def load_dask_data(uid, detector, mask_path_full, reverse=False, rot90=False): + """ + load data as dask-array + get image md (direct beam, wavelength, sample-detector distance,...) from databroker documents (no need to read an actual image) + get pixel_mask and binary_mask from static location (getting it from image metadata takes forever in some conda envs...) + load_dask_data(uid,detector,reverse=False,rot90=False) + uid: uid (str) + detector: md['detector'] + mask_path_full: current standard would be _mask_path_+'pixel_masks/' + returns detector_images(dask-array), image_md + LW 04/26/2024 + """ + import json + + import dask + + hdr = db[uid] + det = detector.split("_image")[0] + # collect image metadata from loading single image + img_md_dict = { + "detector_distance": "det_distance", + "incident_wavelength": "wavelength", + "frame_time": "cam_acquire_period", + "count_time": "cam_acquire_time", + "num_images": "cam_num_images", + "beam_center_x": "beam_center_x", + "beam_center_y": "beam_center_y", + } + img_md = {} + for k in list(img_md_dict.keys()): + img_md[k] = hdr.config_data(det)["primary"][0]["%s_%s" % (det, img_md_dict[k])] + if detector in ["eiger4m_single_image", "eiger1m_single_image", "eiger500K_single_image"]: + img_md.update({"y_pixel_size": 7.5e-05, "x_pixel_size": 7.5e-05}) + got_pixel_mask = True + else: + img_md.update({"y_pixel_size": None, "x_pixel_size": None}) + got_pixel_mask = False + # load pixel mask from static location + if got_pixel_mask: + # json_open = open(_mask_path_ + "pixel_masks/pixel_mask_compression_%s.json" % detector.split("_")[0]) + json_open = open(mask_path_full + "pixel_mask_compression_%s.json" % detector.split("_")[0]) + mask_dict = json.load(json_open) + img_md["pixel_mask"] = np.array(mask_dict["pixel_mask"]) + img_md["binary_mask"] = np.array(mask_dict["binary_mask"]) + del mask_dict + + # load image data as dask-arry: + dimg = hdr.xarray_dask()[detector][0] + if reverse: + dimg = dask.array.flip(dimg, axis=(1, 1)) + if rot90: + dimg = dask.array.rot90(dimg, axes=(1, 2)) + return dimg, img_md + + +def load_data(uid, detector="eiger4m_single_image", fill=True, reverse=False, rot90=False): + """load bluesky scan data by giveing uid and detector + + Parameters + ---------- + uid: unique ID of a bluesky scan + detector: the used area detector + fill: True to fill data + reverse: if True, reverse the image upside down to match the "real" image geometry (should always be True in the future) + + Returns + ------- + image data: a pims frames series + if not success read the uid, will return image data as 0 + + Usuage: + imgs = load_data( uid, detector ) + md = imgs.md + """ + hdr = db[uid] + + if False: + ATTEMPTS = 0 + for attempt in range(ATTEMPTS): + try: + (ev,) = hdr.events(fields=[detector], fill=fill) + break + + except Exception: + print("Trying again ...!") + if attempt == ATTEMPTS - 1: + # We're out of attempts. Raise the exception to help with debugging. + raise + else: + # We didn't succeed + raise Exception("Failed after {} repeated attempts".format(ATTEMPTS)) + + # TODO(mrakitin): replace with the lazy loader (when it's implemented): + imgs = list(hdr.data(detector)) + + if len(imgs[0]) >= 1: + md = imgs[0].md + imgs = pims.pipeline(lambda img: img)(imgs[0]) + imgs.md = md + + if reverse: + md = imgs.md + imgs = reverse_updown(imgs) # Why not np.flipud? + imgs.md = md + + if rot90: + md = imgs.md + imgs = rot90_clockwise(imgs) # Why not np.flipud? + imgs.md = md + + return imgs + + +def mask_badpixels(mask, detector): + """ + Mask known bad pixel from the giveing mask + + """ + if detector == "eiger1m_single_image": + # to be determined + mask = mask + elif detector == "eiger4m_single_image" or detector == "image": + mask[513:552, :] = 0 + mask[1064:1103, :] = 0 + mask[1615:1654, :] = 0 + mask[:, 1029:1041] = 0 + mask[:, 0] = 0 + mask[0:, 2069] = 0 + mask[0] = 0 + mask[2166] = 0 + + elif detector == "eiger500K_single_image": + # to be determined + mask = mask + else: + mask = mask + return mask + + +def load_data2(uid, detector="eiger4m_single_image"): + """load bluesky scan data by giveing uid and detector + + Parameters + ---------- + uid: unique ID of a bluesky scan + detector: the used area detector + + Returns + ------- + image data: a pims frames series + if not success read the uid, will return image data as 0 + + Usuage: + imgs = load_data( uid, detector ) + md = imgs.md + """ + hdr = db[uid] + flag = 1 + while flag < 4 and flag != 0: + try: + (ev,) = hdr.events(fields=[detector]) + flag = 0 + except: + flag += 1 + print("Trying again ...!") + + if flag: + print("Can't Load Data!") + uid = "00000" # in case of failling load data + imgs = 0 + else: + imgs = ev["data"][detector] + + # print (imgs) + return imgs + + +def psave_obj(obj, filename): + """save an object with filename by pickle.dump method + This function automatically add '.pkl' as filename extension + Input: + obj: the object to be saved + filename: filename (with full path) to be saved + Return: + None + """ + with open(filename + ".pkl", "wb") as f: + pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) + + +def pload_obj(filename): + """load a pickled filename + This function automatically add '.pkl' to filename extension + Input: + filename: filename (with full path) to be saved + Return: + load the object by pickle.load method + """ + with open(filename + ".pkl", "rb") as f: + return pickle.load(f) + + +def load_mask(path, mask_name, plot_=False, reverse=False, rot90=False, *argv, **kwargs): + """load a mask file + the mask is a numpy binary file (.npy) + + Parameters + ---------- + path: the path of the mask file + mask_name: the name of the mask file + plot_: a boolen type + reverse: if True, reverse the image upside down to match the "real" image geometry (should always be True in the future) + Returns + ------- + mask: array + if plot_ =True, will show the mask + + Usuage: + mask = load_mask( path, mask_name, plot_ = True ) + """ + + mask = np.load(path + mask_name) + mask = np.array(mask, dtype=np.int32) + if reverse: + mask = mask[::-1, :] + if rot90: + mask = np.rot90(mask) + if plot_: + show_img(mask, *argv, **kwargs) + return mask + + +def create_hot_pixel_mask(img, threshold, center=None, center_radius=300, outer_radius=0): + """create a hot pixel mask by giving threshold + Input: + img: the image to create hot pixel mask + threshold: the threshold above which will be considered as hot pixels + center: optional, default=None + else, as a two-element list (beam center), i.e., [center_x, center_y] + if center is not None, the hot pixel will not include a disk region + which is defined by center and center_radius ( in unit of pixel) + Output: + a bool types numpy array (mask), 1 is good and 0 is excluded + + """ + bst_mask = np.ones_like(img, dtype=bool) + if center != None: + from skimage.draw import disk + + imy, imx = img.shape + cy, cx = center + rr, cc = disk((cy, cx), center_radius, shape=img.shape) + bst_mask[rr, cc] = 0 + if outer_radius: + bst_mask = np.zeros_like(img, dtype=bool) + rr2, cc2 = disk((cy, cx), outer_radius, shape=img.shape) + bst_mask[rr2, cc2] = 1 + bst_mask[rr, cc] = 0 + hmask = np.ones_like(img) + hmask[np.where(img * bst_mask > threshold)] = 0 + return hmask + + +def apply_mask(imgs, mask): + """apply mask to imgs to produce a generator + + Usuages: + imgsa = apply_mask( imgs, mask ) + good_series = apply_mask( imgs[good_start:], mask ) + + """ + return pims.pipeline(lambda img: np.int_(mask) * img)(imgs) # lazily apply mask + + +def reverse_updown(imgs): + """reverse imgs upside down to produce a generator + + Usuages: + imgsr = reverse_updown( imgs) + + + """ + return pims.pipeline(lambda img: img[::-1, :])(imgs) # lazily apply mask + + +def rot90_clockwise(imgs): + """reverse imgs upside down to produce a generator + + Usuages: + imgsr = rot90_clockwise( imgs) + + """ + return pims.pipeline(lambda img: np.rot90(img))(imgs) # lazily apply mask + + +def RemoveHot(img, threshold=1e7, plot_=True): + """Remove hot pixel from img""" + + mask = np.ones_like(np.array(img)) + badp = np.where(np.array(img) >= threshold) + if len(badp[0]) != 0: + mask[badp] = 0 + if plot_: + show_img(mask) + return mask + + +############ +###plot data + + +def show_img( + image, + ax=None, + label_array=None, + alpha=0.5, + interpolation="nearest", + xlim=None, + ylim=None, + save=False, + image_name=None, + path=None, + aspect=None, + logs=False, + vmin=None, + vmax=None, + return_fig=False, + cmap="viridis", + show_time=False, + file_name=None, + ylabel=None, + xlabel=None, + extent=None, + show_colorbar=True, + tight=True, + show_ticks=True, + save_format="png", + dpi=None, + center=None, + origin="lower", + lab_fontsize=16, + tick_size=12, + colorbar_fontsize=8, + use_mat_imshow=False, + *argv, + **kwargs, +): + """YG. Sep26, 2017 Add label_array/alpha option to show a mask on top of image + + a simple function to show image by using matplotlib.plt imshow + pass *argv,**kwargs to imshow + + Parameters + ---------- + image : array + Image to show + Returns + ------- + None + """ + if ax == None: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots() + else: + fig, ax = ax + + if center != None: + plot1D(center[1], center[0], ax=ax, c="b", m="o", legend="") + if not logs: + if not use_mat_imshow: + im = imshow( + ax, + image, + origin=origin, + cmap=cmap, + interpolation=interpolation, + vmin=vmin, + vmax=vmax, + extent=extent, + ) # vmin=0,vmax=1, + else: + im = ax.imshow( + image, origin=origin, cmap=cmap, interpolation=interpolation, vmin=vmin, vmax=vmax, extent=extent + ) # vmin=0,vmax=1, + else: + if not use_mat_imshow: + im = imshow( + ax, + image, + origin=origin, + cmap=cmap, + interpolation=interpolation, + norm=LogNorm(vmin, vmax), + extent=extent, + ) + else: + im = ax.imshow( + image, + origin=origin, + cmap=cmap, + interpolation=interpolation, + norm=LogNorm(vmin, vmax), + extent=extent, + ) + if label_array != None: + im2 = show_label_array(ax, label_array, alpha=alpha, cmap=cmap, interpolation=interpolation) + + ax.set_title(image_name) + if xlim != None: + ax.set_xlim(xlim) + if ylim != None: + ax.set_ylim(ylim) + + if not show_ticks: + ax.set_yticks([]) + ax.set_xticks([]) + else: + + ax.tick_params(axis="both", which="major", labelsize=tick_size) + ax.tick_params(axis="both", which="minor", labelsize=tick_size) + # mpl.rcParams['xtick.labelsize'] = tick_size + # mpl.rcParams['ytick.labelsize'] = tick_size + # print(tick_size) + + if ylabel != None: + # ax.set_ylabel(ylabel)#, fontsize = 9) + ax.set_ylabel(ylabel, fontsize=lab_fontsize) + if xlabel != None: + ax.set_xlabel(xlabel, fontsize=lab_fontsize) + + if aspect != None: + # aspect = image.shape[1]/float( image.shape[0] ) + ax.set_aspect(aspect) + else: + ax.set_aspect(aspect="auto") + + if show_colorbar: + cbar = fig.colorbar(im, extend="neither", spacing="proportional", orientation="vertical") + cbar.ax.tick_params(labelsize=colorbar_fontsize) + fig.set_tight_layout(tight) + if save: + if show_time: + dt = datetime.now() + CurTime = "_%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) + fp = path + "%s" % (file_name) + CurTime + "." + save_format + else: + fp = path + "%s" % (image_name) + "." + save_format + if dpi == None: + dpi = fig.dpi + plt.savefig(fp, dpi=dpi) + # fig.set_tight_layout(tight) + if return_fig: + return im # fig + + +def plot1D( + y, + x=None, + yerr=None, + ax=None, + return_fig=False, + ls="-", + figsize=None, + legend=None, + legend_size=None, + lw=None, + markersize=None, + tick_size=8, + *argv, + **kwargs, +): + """a simple function to plot two-column data by using matplotlib.plot + pass *argv,**kwargs to plot + + Parameters + ---------- + y: column-y + x: column-x, by default x=None, the plot will use index of y as x-axis + the other paramaters are defined same as plt.plot + Returns + ------- + None + """ + if ax == None: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + if figsize != None: + fig, ax = plt.subplots(figsize=figsize) + else: + fig, ax = plt.subplots() + + if legend == None: + legend = " " + try: + logx = kwargs["logx"] + except: + logx = False + try: + logy = kwargs["logy"] + except: + logy = False + + try: + logxy = kwargs["logxy"] + except: + logxy = False + + if logx == True and logy == True: + logxy = True + + try: + marker = kwargs["marker"] + except: + try: + marker = kwargs["m"] + except: + marker = next(markers_) + try: + color = kwargs["color"] + except: + try: + color = kwargs["c"] + except: + color = next(colors_) + + if x == None: + x = range(len(y)) + if yerr == None: + ax.plot( + x, + y, + marker=marker, + color=color, + ls=ls, + label=legend, + lw=lw, + markersize=markersize, + ) # ,*argv,**kwargs) + else: + ax.errorbar( + x, + y, + yerr, + marker=marker, + color=color, + ls=ls, + label=legend, + lw=lw, + markersize=markersize, + ) # ,*argv,**kwargs) + if logx: + ax.set_xscale("log") + if logy: + ax.set_yscale("log") + if logxy: + ax.set_xscale("log") + ax.set_yscale("log") + + ax.tick_params(axis="both", which="major", labelsize=tick_size) + ax.tick_params(axis="both", which="minor", labelsize=tick_size) + + if "xlim" in kwargs.keys(): + ax.set_xlim(kwargs["xlim"]) + if "ylim" in kwargs.keys(): + ax.set_ylim(kwargs["ylim"]) + if "xlabel" in kwargs.keys(): + ax.set_xlabel(kwargs["xlabel"]) + if "ylabel" in kwargs.keys(): + ax.set_ylabel(kwargs["ylabel"]) + + if "title" in kwargs.keys(): + title = kwargs["title"] + else: + title = "plot" + ax.set_title(title) + # ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)') + if (legend != "") and (legend != None): + ax.legend(loc="best", fontsize=legend_size) + if "save" in kwargs.keys(): + if kwargs["save"]: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + # fp = kwargs['path'] + '%s'%( title ) + CurTime + '.png' + fp = kwargs["path"] + "%s" % (title) + ".png" + plt.savefig(fp, dpi=fig.dpi) + if return_fig: + return fig + + +### + + +def check_shutter_open(data_series, min_inten=0, time_edge=[0, 10], plot_=False, *argv, **kwargs): + """Check the first frame with shutter open + + Parameters + ---------- + data_series: a image series + min_inten: the total intensity lower than min_inten is defined as shtter close + time_edge: the searching frame number range + + return: + shutter_open_frame: a integer, the first frame number with open shutter + + Usuage: + good_start = check_shutter_open( imgsa, min_inten=5, time_edge = [0,20], plot_ = False ) + + """ + imgsum = np.array([np.sum(img) for img in data_series[time_edge[0] : time_edge[1] : 1]]) + if plot_: + fig, ax = plt.subplots() + ax.plot(imgsum, "bo") + ax.set_title("uid=%s--imgsum" % uid) + ax.set_xlabel("Frame") + ax.set_ylabel("Total_Intensity") + # plt.show() + shutter_open_frame = np.where(np.array(imgsum) > min_inten)[0][0] + print("The first frame with open shutter is : %s" % shutter_open_frame) + return shutter_open_frame + + +def get_each_frame_intensity( + data_series, sampling=50, bad_pixel_threshold=1e10, plot_=False, save=False, *argv, **kwargs +): + """Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + """ + + # print ( argv, kwargs ) + imgsum = np.array([np.sum(img) for img in tqdm(data_series[::sampling], leave=True)]) + if plot_: + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + fig, ax = plt.subplots() + ax.plot(imgsum, "bo") + ax.set_title("uid= %s--imgsum" % uid) + ax.set_xlabel("Frame_bin_%s" % sampling) + ax.set_ylabel("Total_Intensity") + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "Uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() + + bad_frame_list = np.where(np.array(imgsum) > bad_pixel_threshold)[0] + if len(bad_frame_list): + print("Bad frame list are: %s" % bad_frame_list) + else: + print("No bad frames are involved.") + return imgsum, bad_frame_list + + +def create_time_slice(N, slice_num, slice_width, edges=None): + """create a ROI time regions""" + if edges != None: + time_edge = edges + else: + if slice_num == 1: + time_edge = [[0, N]] + else: + tstep = N // slice_num + te = np.arange(0, slice_num + 1) * tstep + tc = np.int_((te[:-1] + te[1:]) / 2)[1:-1] + if slice_width % 2: + sw = slice_width // 2 + 1 + time_edge = ( + [ + [0, slice_width], + ] + + [[s - sw + 1, s + sw] for s in tc] + + [[N - slice_width, N]] + ) + else: + sw = slice_width // 2 + time_edge = ( + [ + [0, slice_width], + ] + + [[s - sw, s + sw] for s in tc] + + [[N - slice_width, N]] + ) + + return np.array(time_edge) + + +def show_label_array(ax, label_array, cmap=None, aspect=None, interpolation="nearest", **kwargs): + """ + YG. Sep 26, 2017 + Modified show_label_array(ax, label_array, cmap=None, **kwargs) + from https://github.com/Nikea/xray-vision/blob/master/xray_vision/mpl_plotting/roi.py + Display a labeled array nicely + Additional kwargs are passed through to `ax.imshow`. + If `vmin` is in kwargs, it is clipped to minimum of 0.5. + Parameters + ---------- + ax : Axes + The `Axes` object to add the artist too + label_array: ndarray + Expected to be an unsigned integer array. 0 is background, + positive integers label region of interest + cmap : str or colormap, optional + Color map to use, defaults to 'Paired' + Returns + ------- + img : AxesImage + The artist added to the axes + """ + if cmap == None: + cmap = "viridis" + # print(cmap) + _cmap = copy.copy((mcm.get_cmap(cmap))) + _cmap.set_under("w", 0) + vmin = max(0.5, kwargs.pop("vmin", 0.5)) + im = ax.imshow(label_array, cmap=cmap, interpolation=interpolation, vmin=vmin, **kwargs) + if aspect == None: + ax.set_aspect(aspect="auto") + # ax.set_aspect('equal') + return im + + +def show_label_array_on_image( + ax, + image, + label_array, + cmap=None, + norm=None, + log_img=True, + alpha=0.3, + vmin=0.1, + vmax=5, + imshow_cmap="gray", + **kwargs, +): # norm=LogNorm(), + """ + This will plot the required ROI's(labeled array) on the image + + Additional kwargs are passed through to `ax.imshow`. + If `vmin` is in kwargs, it is clipped to minimum of 0.5. + Parameters + ---------- + ax : Axes + The `Axes` object to add the artist too + image : array + The image array + label_array : array + Expected to be an unsigned integer array. 0 is background, + positive integers label region of interest + cmap : str or colormap, optional + Color map to use for plotting the label_array, defaults to 'None' + imshow_cmap : str or colormap, optional + Color map to use for plotting the image, defaults to 'gray' + norm : str, optional + Normalize scale data, defaults to 'Lognorm()' + Returns + ------- + im : AxesImage + The artist added to the axes + im_label : AxesImage + The artist added to the axes + """ + ax.set_aspect("equal") + + # print (vmin, vmax ) + if log_img: + im = ax.imshow( + image, cmap=imshow_cmap, interpolation="none", norm=LogNorm(vmin, vmax), **kwargs + ) # norm=norm, + else: + im = ax.imshow(image, cmap=imshow_cmap, interpolation="none", vmin=vmin, vmax=vmax, **kwargs) # norm=norm, + + im_label = mpl_plot.show_label_array( + ax, label_array, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha, **kwargs + ) # norm=norm, + + return im, im_label + + +def show_ROI_on_image( + image, + ROI, + center=None, + rwidth=400, + alpha=0.3, + label_on=True, + save=False, + return_fig=False, + rect_reqion=None, + log_img=True, + vmin=0.01, + vmax=5, + show_ang_cor=False, + cmap=cmap_albula, + fig_ax=None, + uid="uid", + path="", + aspect=1, + show_colorbar=True, + show_roi_edge=False, + *argv, + **kwargs, +): + """show ROI on an image + image: the data frame + ROI: the interested region + center: the plot center + rwidth: the plot range around the center + + """ + + if RUN_GUI: + fig = Figure(figsize=(8, 8)) + axes = fig.add_subplot(111) + elif fig_ax != None: + fig, axes = fig_ax + else: + fig, axes = plt.subplots() # plt.subplots(figsize=(8,8)) + + # print( vmin, vmax) + # norm=LogNorm(vmin, vmax) + + axes.set_title("%s_ROI_on_Image" % uid) + if log_img: + if vmin == 0: + vmin += 1e-10 + + vmax = max(1, vmax) + if not show_roi_edge: + # print('here') + im, im_label = show_label_array_on_image( + axes, + image, + ROI, + imshow_cmap="viridis", + cmap=cmap, + alpha=alpha, + log_img=log_img, + vmin=vmin, + vmax=vmax, + origin="lower", + ) + else: + edg = get_image_edge(ROI) + image_ = get_image_with_roi(image, ROI, scale_factor=2) + # fig, axes = plt.subplots( ) + show_img( + image_, + ax=[fig, axes], + vmin=vmin, + vmax=vmax, + logs=log_img, + image_name="%s_ROI_on_Image" % uid, + cmap=cmap, + ) + + if rect_reqion == None: + if center != None: + x1, x2 = [center[1] - rwidth, center[1] + rwidth] + y1, y2 = [center[0] - rwidth, center[0] + rwidth] + axes.set_xlim([x1, x2]) + axes.set_ylim([y1, y2]) + else: + x1, x2, y1, y2 = rect_reqion + axes.set_xlim([x1, x2]) + axes.set_ylim([y1, y2]) + + if label_on: + num_qzr = len(np.unique(ROI)) - 1 + for i in range(1, num_qzr + 1): + ind = np.where(ROI == i)[1] + indz = np.where(ROI == i)[0] + c = "%i" % i + y_val = int(indz.mean()) + x_val = int(ind.mean()) + # print (xval, y) + axes.text(x_val, y_val, c, color="b", va="center", ha="center") + if show_ang_cor: + axes.text(-0.0, 0.5, "-/+180" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(1.0, 0.5, "0" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(0.5, -0.0, "-90" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(0.5, 1.0, "90" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + + axes.set_aspect(aspect) + # fig.colorbar(im_label) + if show_colorbar: + if not show_roi_edge: + fig.colorbar(im) + if save: + fp = path + "%s_ROI_on_Image" % uid + ".png" + plt.savefig(fp, dpi=fig.dpi) + # plt.show() + if return_fig: + return fig, axes, im + + +def crop_image(image, crop_mask): + """Crop the non_zeros pixels of an image to a new image""" + from skimage.util import crop, pad + + pxlst = np.where(crop_mask.ravel())[0] + dims = crop_mask.shape + imgwidthy = dims[1] # dimension in y, but in plot being x + imgwidthx = dims[0] # dimension in x, but in plot being y + # x and y are flipped??? + # matrix notation!!! + pixely = pxlst % imgwidthy + pixelx = pxlst // imgwidthy + + minpixelx = np.min(pixelx) + minpixely = np.min(pixely) + maxpixelx = np.max(pixelx) + maxpixely = np.max(pixely) + crops = crop_mask * image + img_crop = crop(crops, ((minpixelx, imgwidthx - maxpixelx - 1), (minpixely, imgwidthy - maxpixely - 1))) + return img_crop + + +def get_avg_img(data_series, img_samp_index=None, sampling=100, plot_=False, save=False, *argv, **kwargs): + """Get average imagef from a data_series by every sampling number to save time""" + if img_samp_index == None: + avg_img = np.average(data_series[::sampling], axis=0) + else: + avg_img = np.zeros_like(data_series[0]) + n = 0 + for i in img_samp_index: + avg_img += data_series[i] + n += 1 + avg_img = np.array(avg_img) / n + + if plot_: + fig, ax = plt.subplots() + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + + im = ax.imshow(avg_img, cmap="viridis", origin="lower", norm=LogNorm(vmin=0.001, vmax=1e2)) + # ax.set_title("Masked Averaged Image") + ax.set_title("uid= %s--Masked Averaged Image" % uid) + fig.colorbar(im) + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() + + return avg_img + + +def check_ROI_intensity(avg_img, ring_mask, ring_number=3, save=False, plot=True, *argv, **kwargs): + """plot intensity versus pixel of a ring + Parameters + ---------- + avg_img: 2D-array, the image + ring_mask: 2D-array + ring_number: which ring to plot + + Returns + ------- + + + """ + # print('here') + + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + pixel = roi.roi_pixel_values(avg_img, ring_mask, [ring_number]) + + if plot: + fig, ax = plt.subplots() + ax.set_title("%s--check-RIO-%s-intensity" % (uid, ring_number)) + ax.plot(pixel[0][0], "bo", ls="-") + ax.set_ylabel("Intensity") + ax.set_xlabel("pixel") + if save: + path = kwargs["path"] + fp = path + "%s_Mean_intensity_of_one_ROI" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + if save: + path = kwargs["path"] + save_lists( + [range(len(pixel[0][0])), pixel[0][0]], + label=["pixel_list", "roi_intensity"], + filename="%s_Mean_intensity_of_one_ROI" % uid, + path=path, + ) + # plt.show() + return pixel[0][0] + + +# from tqdm import tqdm + + +def cal_g2(image_series, ring_mask, bad_image_process, bad_frame_list=None, good_start=0, num_buf=8, num_lev=None): + """calculation g2 by using a multi-tau algorithm""" + + noframes = len(image_series) # number of frames, not "no frames" + # num_buf = 8 # number of buffers + + if bad_image_process: + import skbeam.core.mask as mask_image + + bad_img_list = np.array(bad_frame_list) - good_start + new_imgs = mask_image.bad_to_nan_gen(image_series, bad_img_list) + + if num_lev == None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + print("%s frames will be processed..." % (noframes)) + print("Bad Frames involved!") + + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(new_imgs)) + print("G2 calculation DONE!") + + else: + + if num_lev == None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + print("%s frames will be processed..." % (noframes)) + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(image_series)) + print("G2 calculation DONE!") + + return g2, lag_steps + + +def run_time(t0): + """Calculate running time of a program + Parameters + ---------- + t0: time_string, t0=time.time() + The start time + Returns + ------- + Print the running time + + One usage + --------- + t0=time.time() + .....(the running code) + run_time(t0) + """ + + elapsed_time = time.time() - t0 + if elapsed_time < 60: + print("Total time: %.3f sec" % (elapsed_time)) + else: + print("Total time: %.3f min" % (elapsed_time / 60.0)) + + +def trans_data_to_pd(data, label=None, dtype="array"): + """ + convert data into pandas.DataFrame + Input: + data: list or np.array + label: the coloum label of the data + dtype: list or array [[NOT WORK or dict (for dict only save the scalar not arrays values)]] + Output: + a pandas.DataFrame + """ + # lists a [ list1, list2...] all the list have the same length + import sys + + import pandas as pd + from numpy import arange, array + + if dtype == "list": + data = array(data).T + N, M = data.shape + elif dtype == "array": + data = array(data) + N, M = data.shape + else: + print("Wrong data type! Now only support 'list' and 'array' tpye") + + index = arange(N) + if label == None: + label = ["data%s" % i for i in range(M)] + # print label + df = pd.DataFrame(data, index=index, columns=label) + return df + + +def save_lists(data, label=None, filename=None, path=None, return_res=False, verbose=False): + """ + save_lists( data, label=None, filename=None, path=None) + + save lists to a CSV file with filename in path + Parameters + ---------- + data: list + label: the column name, the length should be equal to the column number of list + filename: the filename to be saved + path: the filepath to be saved + + Example: + save_arrays( [q,iq], label= ['q_A-1', 'Iq'], filename='uid=%s-q-Iq'%uid, path= data_dir ) + """ + + M, N = len(data[0]), len(data) + d = np.zeros([N, M]) + for i in range(N): + d[i] = data[i] + + df = trans_data_to_pd(d.T, label, "array") + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + if filename == None: + filename = "data" + filename = os.path.join(path, filename) # +'.csv') + df.to_csv(filename) + if verbose: + print("The data was saved in: %s." % filename) + if return_res: + return df + + +def get_pos_val_overlap(p1, v1, p2, v2, Nl): + """get the overlap of v1 and v2 + p1: the index of array1 in array with total length as Nl + v1: the corresponding value of p1 + p2: the index of array2 in array with total length as Nl + v2: the corresponding value of p2 + Return: + The values in v1 with the position in overlap of p1 and p2 + The values in v2 with the position in overlap of p1 and p2 + + An example: + Nl =10 + p1= np.array( [1,3,4,6,8] ) + v1 = np.array( [10,20,30,40,50]) + p2= np.array( [ 0,2,3,5,7,8]) + v2=np.array( [10,20,30,40,50,60,70]) + + get_pos_val_overlap( p1, v1, p2,v2, Nl) + + """ + ind = np.zeros(Nl, dtype=np.int32) + ind[p1] = np.arange(len(p1)) + 1 + w2 = np.where(ind[p2])[0] + w1 = ind[p2[w2]] - 1 + return v1[w1], v2[w2] + + +def save_arrays(data, label=None, dtype="array", filename=None, path=None, return_res=False, verbose=False): + """ + July 10, 2016, Y.G.@CHX + save_arrays( data, label=None, dtype='array', filename=None, path=None): + save data to a CSV file with filename in path + Parameters + ---------- + data: arrays + label: the column name, the length should be equal to the column number of data + dtype: array or list + filename: the filename to be saved + path: the filepath to be saved + + Example: + + save_arrays( qiq, label= ['q_A-1', 'Iq'], dtype='array', filename='uid=%s-q-Iq'%uid, path= data_dir ) + + + """ + df = trans_data_to_pd(data, label, dtype) + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + if filename == None: + filename = "data" + filename_ = os.path.join(path, filename) # +'.csv') + df.to_csv(filename_) + if verbose: + print("The file: %s is saved in %s" % (filename, path)) + # print( 'The g2 of uid= %s is saved in %s with filename as g2-%s-%s.csv'%(uid, path, uid, CurTime)) + if return_res: + return df + + +def cal_particle_g2(radius, viscosity, qr, taus, beta=0.2, T=298): + """YG Dev Nov 20, 2017@CHX + calculate particle g2 fucntion by giving particle radius, Q , and solution viscosity using a simple + exponetional model + Input: + radius: m + qr, list, in A-1 + visocity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) + T: temperture, in K + e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: + 1.38064852*10**(-123)*298 / ( 6*np.pi * 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10**5 A2/s + taus: time + beta: contrast + + cal_particle_g2( radius=125 *10**(-9), qr=[0.01,0.015], viscosity= 8.9*1e-4) + + """ + D0 = get_diffusion_coefficient(viscosity, radius, T=T) + g2_q1 = np.zeros(len(qr), dtype=object) + for i, q1 in enumerate(qr): + relaxation_rate = D0 * q1**2 + g2_q1[i] = simple_exponential(taus, beta=beta, relaxation_rate=relaxation_rate, baseline=1) + return g2_q1 + + +def get_Reynolds_number(flow_rate, flow_radius, fluid_density, fluid_viscosity): + """May 10, 2019, Y.G.@CHX + get Reynolds_number , the ratio of the inertial to viscous forces, V*Dia*density/eta + Reynolds_number << 1000 gives a laminar flow + flow_rate: ul/s + flow_radius: mm + fluid_density: Kg/m^3 ( for water, 1000 Kg/m^3 = 1 g/cm^3 ) + fliud_viscosity: N*s/m^2 ( Kg /(s*m) ) + + return Reynolds_number + """ + return flow_rate * 1e-6 * flow_radius * 1e-3 * 2 * fluid_density / fluid_viscosity + + +def get_Deborah_number(flow_rate, beam_size, q_vector, diffusion_coefficient): + """May 10, 2019, Y.G.@CHX + get Deborah_number, the ratio of transit time to diffusion time, (V/beam_size)/ ( D*q^2) + flow_rate: ul/s + beam_size: ul + q_vector: A-1 + diffusion_coefficient: A^2/s + + return Deborah_number + """ + return (flow_rate / beam_size) / (diffusion_coefficient * q_vector**2) + + +def get_viscosity(diffusion_coefficient, radius, T=298): + """May 10, 2019, Y.G.@CHX + get visocity of a Brownian motion particle with radius in fuild with diffusion_coefficient + diffusion_coefficient in unit of A^2/s + radius: m + T: K + k: 1.38064852(79)*10**(−23) J/T, Boltzmann constant + + return visosity: N*s/m^2 (water at 25K = 8.9*10**(-4) ) + """ + + k = 1.38064852 * 10 ** (-23) + return k * T / (6 * np.pi * diffusion_coefficient * radius) * 10**20 + + +def get_diffusion_coefficient(viscosity, radius, T=298): + """July 10, 2016, Y.G.@CHX + get diffusion_coefficient of a Brownian motion particle with radius in fuild with visocity + viscosity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) + radius: m + T: K + k: 1.38064852(79)×10−23 J/T, Boltzmann constant + + return diffusion_coefficient in unit of A^2/s + e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: + 1.38064852*10**(−23) *298 / ( 6*np.pi* 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10^5 A2/s + + get_diffusion_coefficient( 0.20871, 250 *10**(-9), T=298) + + """ + + k = 1.38064852 * 10 ** (-23) + return k * T / (6 * np.pi * viscosity * radius) * 10**20 + + +def ring_edges(inner_radius, width, spacing=0, num_rings=None): + """ + Aug 02, 2016, Y.G.@CHX + ring_edges(inner_radius, width, spacing=0, num_rings=None) + + Calculate the inner and outer radius of a set of rings. + + The number of rings, their widths, and any spacing between rings can be + specified. They can be uniform or varied. + + LW 04/02/2024: fixed checking whether width and spacing are iterable + + Parameters + ---------- + inner_radius : float + inner radius of the inner-most ring + + width : float or list of floats + ring thickness + If a float, all rings will have the same thickness. + + spacing : float or list of floats, optional + margin between rings, 0 by default + If a float, all rings will have the same spacing. If a list, + the length of the list must be one less than the number of + rings. + + num_rings : int, optional + number of rings + Required if width and spacing are not lists and number + cannot thereby be inferred. If it is given and can also be + inferred, input is checked for consistency. + + Returns + ------- + edges : array + inner and outer radius for each ring + + Example + ------- + # Make two rings starting at r=1px, each 5px wide + >>> ring_edges(inner_radius=1, width=5, num_rings=2) + [(1, 6), (6, 11)] + # Make three rings of different widths and spacings. + # Since the width and spacings are given individually, the number of + # rings here is simply inferred. + >>> ring_edges(inner_radius=1, width=(5, 4, 3), spacing=(1, 2)) + [(1, 6), (7, 11), (13, 16)] + + """ + # All of this input validation merely checks that width, spacing, and + # num_rings are self-consistent and complete. + try: + iter(width) + width_is_list = True + except: + width_is_list = False + try: + iter(spacing) + spacing_is_list = True + except: + spacing_is_list = False + + # width_is_list = isinstance(width, collections.Iterable) + # spacing_is_list = isinstance(spacing, collections.Iterable) + if width_is_list and spacing_is_list: + if len(width) != len(spacing) + 1: + raise ValueError("List of spacings must be one less than list " "of widths.") + if num_rings == None: + try: + num_rings = len(width) + except TypeError: + try: + num_rings = len(spacing) + 1 + except TypeError: + raise ValueError( + "Since width and spacing are constant, " + "num_rings cannot be inferred and must be " + "specified." + ) + else: + if width_is_list: + if num_rings != len(width): + raise ValueError("num_rings does not match width list") + if spacing_is_list: + if num_rings - 1 != len(spacing): + raise ValueError("num_rings does not match spacing list") + # Now regularlize the input. + if not width_is_list: + width = np.ones(num_rings) * width + + if spacing == None: + spacing = [] + else: + if not spacing_is_list: + spacing = np.ones(num_rings - 1) * spacing + # The inner radius is the first "spacing." + all_spacings = np.insert(spacing, 0, inner_radius) + steps = np.array([all_spacings, width]).T.ravel() + edges = np.cumsum(steps).reshape(-1, 2) + return edges + + +def get_non_uniform_edges( + centers, + width=4, + number_rings=1, + spacing=0, +): + """ + YG CHX Spe 6 + get_non_uniform_edges( centers, width = 4, number_rings=3 ) + + Calculate the inner and outer radius of a set of non uniform distributed + rings by giving ring centers + For each center, there are number_rings with each of width + + LW 04/02/2024: fixed checking whether 'width' is iterable + + Parameters + ---------- + centers : float + the center of the rings + + width : float or list of floats + ring thickness + If a float, all rings will have the same thickness. + + num_rings : int, optional + number of rings + Required if width and spacing are not lists and number + cannot thereby be inferred. If it is given and can also be + inferred, input is checked for consistency. + + Returns + ------- + edges : array + inner and outer radius for each ring + """ + + if number_rings == None: + number_rings = 1 + edges = np.zeros([len(centers) * number_rings, 2]) + + try: + iter(width) + except: + width = np.ones_like(centers) * width + for i, c in enumerate(centers): + edges[i * number_rings : (i + 1) * number_rings, :] = ring_edges( + inner_radius=c - width[i] * number_rings / 2, width=width[i], spacing=spacing, num_rings=number_rings + ) + return edges + + +def trans_tf_to_td(tf, dtype="dframe"): + """July 02, 2015, Y.G.@CHX + Translate epoch time to string + """ + from datetime import datetime + + import numpy as np + import pandas as pd + + """translate time.float to time.date, + td.type dframe: a dataframe + td.type list, a list + """ + if dtype == "dframe": + ind = tf.index + else: + ind = range(len(tf)) + td = np.array([datetime.fromtimestamp(tf[i]) for i in ind]) + return td + + +def trans_td_to_tf(td, dtype="dframe"): + """July 02, 2015, Y.G.@CHX + Translate string to epoch time + + """ + import time + + import numpy as np + + """translate time.date to time.float, + td.type dframe: a dataframe + td.type list, a list + """ + if dtype == "dframe": + ind = td.index + else: + ind = range(len(td)) + # tf = np.array([ time.mktime(td[i].timetuple()) for i in range(len(td)) ]) + tf = np.array([time.mktime(td[i].timetuple()) for i in ind]) + return tf + + +def get_averaged_data_from_multi_res( + multi_res, keystr="g2", different_length=True, verbose=False, cal_errorbar=False +): + """Y.G. Dec 22, 2016 + get average data from multi-run analysis result + Parameters: + multi_res: dict, generated by function run_xpcs_xsvs_single + each key is a uid, inside each uid are also dict with key as 'g2','g4' et.al. + keystr: string, get the averaged keystr + different_length: if True, do careful average for different length results + return: + array, averaged results + + """ + maxM = 0 + mkeys = multi_res.keys() + if not different_length: + n = 0 + for i, key in enumerate(list(mkeys)): + keystri = multi_res[key][keystr] + if i == 0: + keystr_average = keystri + else: + keystr_average += keystri + n += 1 + keystr_average /= n + + else: + length_dict = {} + D = 1 + for i, key in enumerate(list(mkeys)): + if verbose: + print(i, key) + shapes = multi_res[key][keystr].shape + M = shapes[0] + if i == 0: + if len(shapes) == 2: + D = 2 + maxN = shapes[1] + elif len(shapes) == 3: + D = 3 + maxN = shapes[2] # in case of two-time correlation + if (M) not in length_dict: + length_dict[(M)] = 1 + else: + length_dict[(M)] += 1 + maxM = max(maxM, M) + # print( length_dict ) + avg_count = {} + sk = np.array(sorted(length_dict)) + for i, k in enumerate(sk): + avg_count[k] = np.sum(np.array([length_dict[k] for k in sk[i:]])) + # print(length_dict, avg_count) + if D == 2: + # print('here') + keystr_average = np.zeros([maxM, maxN]) + elif D == 3: + keystr_average = np.zeros([maxM, maxM, maxN]) + else: + keystr_average = np.zeros([maxM]) + for i, key in enumerate(list(mkeys)): + keystri = multi_res[key][keystr] + Mi = keystri.shape[0] + if D != 3: + keystr_average[:Mi] += keystri + else: + keystr_average[:Mi, :Mi, :] += keystri + if D != 3: + keystr_average[: sk[0]] /= avg_count[sk[0]] + else: + keystr_average[: sk[0], : sk[0], :] /= avg_count[sk[0]] + for i in range(0, len(sk) - 1): + if D != 3: + keystr_average[sk[i] : sk[i + 1]] /= avg_count[sk[i + 1]] + else: + keystr_average[sk[i] : sk[i + 1], sk[i] : sk[i + 1], :] /= avg_count[sk[i + 1]] + + return keystr_average + + +def save_g2_general(g2, taus, qr=None, qz=None, uid="uid", path=None, return_res=False): + """Y.G. Dec 29, 2016 + + save g2 results, + res_pargs should contain + g2: one-time correlation function + taus, lags of g2 + qr: the qr center, same length as g2 + qz: the qz or angle center, same length as g2 + path: + uid: + + """ + + df = DataFrame(np.hstack([(taus).reshape(len(g2), 1), g2])) + t, qs = g2.shape + if qr is None: + qr = range(qs) + if qz is None: + df.columns = ["tau"] + [str(qr_) for qr_ in qr] + else: + df.columns = ["tau"] + [str(qr_) + "_" + str(qz_) for (qr_, qz_) in zip(qr, qz)] + + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + + # if filename is None: + + filename = uid + # filename = 'uid=%s--g2.csv' % (uid) + # filename += '-uid=%s-%s.csv' % (uid,CurTime) + # filename += '-uid=%s.csv' % (uid) + filename1 = os.path.join(path, filename) + df.to_csv(filename1) + print("The correlation function is saved in %s with filename as %s" % (path, filename)) + if return_res: + return df + + +########### +# *for g2 fit and plot + + +def stretched_auto_corr_scat_factor(x, beta, relaxation_rate, alpha=1.0, baseline=1): + return beta * np.exp(-2 * (relaxation_rate * x) ** alpha) + baseline + + +def simple_exponential(x, beta, relaxation_rate, baseline=1): + """relation_rate: unit 1/s""" + return beta * np.exp(-2 * relaxation_rate * x) + baseline + + +def simple_exponential_with_vibration(x, beta, relaxation_rate, freq, amp, baseline=1): + return beta * (1 + amp * np.cos(2 * np.pi * freq * x)) * np.exp(-2 * relaxation_rate * x) + baseline + + +def stretched_auto_corr_scat_factor_with_vibration(x, beta, relaxation_rate, alpha, freq, amp, baseline=1): + return beta * (1 + amp * np.cos(2 * np.pi * freq * x)) * np.exp(-2 * (relaxation_rate * x) ** alpha) + baseline + + +def flow_para_function_with_vibration(x, beta, relaxation_rate, flow_velocity, freq, amp, baseline=1): + vibration_part = 1 + amp * np.cos(2 * np.pi * freq * x) + Diff_part = np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 + return beta * vibration_part * Diff_part * Flow_part + baseline + + +def flow_para_function(x, beta, relaxation_rate, flow_velocity, baseline=1): + """flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )""" + + Diff_part = np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 + return beta * Diff_part * Flow_part + baseline + + +def flow_para_function_explicitq(x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0): + """Nov 9, 2017 Basically, make q vector to (qr, angle), + ###relaxation_rate is actually a diffusion rate + flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) + Diffusion part: np.exp( -2*D q^2 *tau ) + q_ang: would be np.radians( ang - 90 ) + + """ + + Diff_part = np.exp(-2 * (diffusion * qr**2 * x) ** alpha) + if flow_velocity != 0: + if np.cos(q_ang) >= 1e-8: + Flow_part = ( + np.pi**2 + / (16 * x * flow_velocity * qr * abs(np.cos(q_ang))) + * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity * qr * abs(np.cos(q_ang))))) ** 2 + ) + else: + Flow_part = 1 + else: + Flow_part = 1 + return beta * Diff_part * Flow_part + baseline + + +def get_flow_velocity(average_velocity, shape_factor): + + return average_velocity * (1 - shape_factor) / (1 + shape_factor) + + +def stretched_flow_para_function(x, beta, relaxation_rate, alpha, flow_velocity, baseline=1): + """ + flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) + """ + Diff_part = np.exp(-2 * (relaxation_rate * x) ** alpha) + Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 + return beta * Diff_part * Flow_part + baseline + + +def get_g2_fit_general_two_steps( + g2, taus, function="simple_exponential", second_fit_range=[0, 20], sequential_fit=False, *argv, **kwargs +): + """ + Fit g2 in two steps, + i) Using the "function" to fit whole g2 to get baseline and beta (contrast) + ii) Then using the obtained baseline and beta to fit g2 in a "second_fit_range" by using simple_exponential function + """ + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general(g2, taus, function, sequential_fit, *argv, **kwargs) + guess_values = {} + for k in list(g2_fit_result[0].params.keys()): + guess_values[k] = np.array([g2_fit_result[i].params[k].value for i in range(g2.shape[1])]) + + if "guess_limits" in kwargs: + guess_limits = kwargs["guess_limits"] + else: + guess_limits = dict(baseline=[1, 1.8], alpha=[0, 2], beta=[0.0, 1], relaxation_rate=[0.001, 10000]) + + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( + g2, + taus, + function="simple_exponential", + sequential_fit=sequential_fit, + fit_range=second_fit_range, + fit_variables={"baseline": False, "beta": False, "alpha": False, "relaxation_rate": True}, + guess_values=guess_values, + guess_limits=guess_limits, + ) + + return g2_fit_result, taus_fit, g2_fit + + +def get_g2_fit_general( + g2, taus, function="simple_exponential", sequential_fit=False, qval_dict=None, ang_init=90, *argv, **kwargs +): + """ + Nov 9, 2017, give qval_dict for using function of flow_para_function_explicitq + qval_dict: a dict with qr and ang (in unit of degrees).") + + + Dec 29,2016, Y.G.@CHX + + Fit one-time correlation function + + The support functions include simple exponential and stretched/compressed exponential + Parameters + ---------- + g2: one-time correlation function for fit, with shape as [taus, qs] + taus: the time delay + sequential_fit: if True, will use the low-q fit result as initial value to fit the higher Qs + function: + supported function include: + 'simple_exponential' (or 'simple'): fit by a simple exponential function, defined as + beta * np.exp(-2 * relaxation_rate * lags) + baseline + 'streched_exponential'(or 'streched'): fit by a streched exponential function, defined as + beta * ( np.exp( -2 * ( relaxation_rate * tau )**alpha ) + baseline + 'stretched_vibration': fit by a streched exponential function with vibration, defined as + beta * (1 + amp*np.cos( 2*np.pi*60* x) )* np.exp(-2 * (relaxation_rate * x)**alpha) + baseline + 'flow_para_function' (or flow): fit by a flow function + + + kwargs: + could contains: + 'fit_variables': a dict, for vary or not, + keys are fitting para, including + beta, relaxation_rate , alpha ,baseline + values: a False or True, False for not vary + 'guess_values': a dict, for initial value of the fitting para, + the defalut values are + dict( beta=.1, alpha=1.0, relaxation_rate =0.005, baseline=1.0) + + 'guess_limits': a dict, for the limits of the fittting para, for example: + dict( beta=[0, 10],, alpha=[0,100] ) + the default is: + dict( baseline =[0.5, 2.5], alpha=[0, inf] ,beta = [0, 1], relaxation_rate= [0.0,1000] ) + Returns + ------- + fit resutls: a instance in limfit + tau_fit + fit_data by the model, it has the q number of g2 + + an example: + fit_g2_func = 'stretched' + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, + function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, + fit_variables={'baseline':True, 'beta':True, 'alpha':True,'relaxation_rate':True}, + guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,}) + + g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_g2_fit_paras.csv', path=data_dir ) + + + """ + + if "fit_range" in kwargs.keys(): + fit_range = kwargs["fit_range"] + else: + fit_range = None + + num_rings = g2.shape[1] + if "fit_variables" in kwargs: + additional_var = kwargs["fit_variables"] + _vars = [k for k in list(additional_var.keys()) if additional_var[k] == False] + else: + _vars = [] + if function == "simple_exponential" or function == "simple": + _vars = np.unique(_vars + ["alpha"]) + mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= list( _vars) ) + elif function == "stretched_exponential" or function == "stretched": + mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= _vars) + elif function == "stretched_vibration": + mod = Model(stretched_auto_corr_scat_factor_with_vibration) # , independent_vars= _vars) + elif function == "flow_para_function" or function == "flow_para": + mod = Model(flow_para_function) # , independent_vars= _vars) + elif function == "flow_para_function_explicitq" or function == "flow_para_qang": + mod = Model(flow_para_function_explicitq) # , independent_vars= _vars) + elif function == "flow_para_function_with_vibration" or function == "flow_vibration": + mod = Model(flow_para_function_with_vibration) + + else: + print( + "The %s is not supported.The supported functions include simple_exponential and stretched_exponential" + % function + ) + + mod.set_param_hint("baseline", min=0.5, max=2.5) + mod.set_param_hint("beta", min=0.0, max=1.0) + mod.set_param_hint("alpha", min=0.0) + mod.set_param_hint("relaxation_rate", min=0.0, max=1000) + mod.set_param_hint("flow_velocity", min=0) + mod.set_param_hint("diffusion", min=0.0, max=2e8) + + if "guess_limits" in kwargs: + guess_limits = kwargs["guess_limits"] + for k in list(guess_limits.keys()): + mod.set_param_hint(k, min=guess_limits[k][0], max=guess_limits[k][1]) + + if function == "flow_para_function" or function == "flow_para" or function == "flow_vibration": + mod.set_param_hint("flow_velocity", min=0) + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + mod.set_param_hint("flow_velocity", min=0) + mod.set_param_hint("diffusion", min=0.0, max=2e8) + if function == "stretched_vibration" or function == "flow_vibration": + mod.set_param_hint("freq", min=0) + mod.set_param_hint("amp", min=0) + + _guess_val = dict(beta=0.1, alpha=1.0, relaxation_rate=0.005, baseline=1.0) + if "guess_values" in kwargs: + guess_values = kwargs["guess_values"] + _guess_val.update(guess_values) + + _beta = _guess_val["beta"] + _alpha = _guess_val["alpha"] + _relaxation_rate = _guess_val["relaxation_rate"] + _baseline = _guess_val["baseline"] + if isinstance(_beta, (np.ndarray, list)): + _beta_ = _beta[0] + else: + _beta_ = _beta + if isinstance(_baseline, (np.ndarray, list)): + _baseline_ = _baseline[0] + else: + _baseline_ = _baseline + if isinstance(_relaxation_rate, (np.ndarray, list)): + _relaxation_rate_ = _relaxation_rate[0] + else: + _relaxation_rate_ = _relaxation_rate + if isinstance(_alpha, (np.ndarray, list)): + _alpha_ = _alpha[0] + else: + _alpha_ = _alpha + pars = mod.make_params(beta=_beta_, alpha=_alpha_, relaxation_rate=_relaxation_rate_, baseline=_baseline_) + + if function == "flow_para_function" or function == "flow_para": + _flow_velocity = _guess_val["flow_velocity"] + if isinstance(_flow_velocity, (np.ndarray, list)): + _flow_velocity_ = _flow_velocity[0] + else: + _flow_velocity_ = _flow_velocity + pars = mod.make_params( + beta=_beta_, + alpha=_alpha_, + flow_velocity=_flow_velocity_, + relaxation_rate=_relaxation_rate_, + baseline=_baseline_, + ) + + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + _flow_velocity = _guess_val["flow_velocity"] + _diffusion = _guess_val["diffusion"] + _guess_val["qr"] = 1 + _guess_val["q_ang"] = 0 + if isinstance(_flow_velocity, (np.ndarray, list)): + _flow_velocity_ = _flow_velocity[0] + else: + _flow_velocity_ = _flow_velocity + if isinstance(_diffusion, (np.ndarray, list)): + _diffusion_ = _diffusion[0] + else: + _diffusion_ = _diffusion + pars = mod.make_params( + beta=_beta_, + alpha=_alpha_, + flow_velocity=_flow_velocity_, + diffusion=_diffusion_, + baseline=_baseline_, + qr=1, + q_ang=0, + ) + + if function == "stretched_vibration": + _freq = _guess_val["freq"] + _amp = _guess_val["amp"] + pars = mod.make_params( + beta=_beta, alpha=_alpha, freq=_freq, amp=_amp, relaxation_rate=_relaxation_rate, baseline=_baseline + ) + + if function == "flow_vibration": + _flow_velocity = _guess_val["flow_velocity"] + _freq = _guess_val["freq"] + _amp = _guess_val["amp"] + pars = mod.make_params( + beta=_beta, + freq=_freq, + amp=_amp, + flow_velocity=_flow_velocity, + relaxation_rate=_relaxation_rate, + baseline=_baseline, + ) + for v in _vars: + pars["%s" % v].vary = False + # print( pars ) + fit_res = [] + model_data = [] + for i in range(num_rings): + if fit_range != None: + y_ = g2[1:, i][fit_range[0] : fit_range[1]] + lags_ = taus[1:][fit_range[0] : fit_range[1]] + else: + y_ = g2[1:, i] + lags_ = taus[1:] + + mm = ~np.isnan(y_) + y = y_[mm] + lags = lags_[mm] + # print( i, mm.shape, y.shape, y_.shape, lags.shape, lags_.shape ) + # y=y_ + # lags=lags_ + # print( _relaxation_rate ) + for k in list(pars.keys()): + # print(k, _guess_val[k] ) + try: + if isinstance(_guess_val[k], (np.ndarray, list)): + pars[k].value = _guess_val[k][i] + except: + pass + + if True: + if isinstance(_beta, (np.ndarray, list)): + # pars['beta'].value = _guess_val['beta'][i] + _beta_ = _guess_val["beta"][i] + if isinstance(_baseline, (np.ndarray, list)): + # pars['baseline'].value = _guess_val['baseline'][i] + _baseline_ = _guess_val["baseline"][i] + if isinstance(_relaxation_rate, (np.ndarray, list)): + # pars['relaxation_rate'].value = _guess_val['relaxation_rate'][i] + _relaxation_rate_ = _guess_val["relaxation_rate"][i] + if isinstance(_alpha, (np.ndarray, list)): + # pars['alpha'].value = _guess_val['alpha'][i] + _alpha_ = _guess_val["alpha"][i] + # for k in list(pars.keys()): + # print(k, _guess_val[k] ) + # pars[k].value = _guess_val[k][i] + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + if qval_dict == None: + print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") + else: + + pars = mod.make_params( + beta=_beta_, + alpha=_alpha_, + flow_velocity=_flow_velocity_, + diffusion=_diffusion_, + baseline=_baseline_, + qr=qval_dict[i][0], + q_ang=abs(np.radians(qval_dict[i][1] - ang_init)), + ) + + pars["qr"].vary = False + pars["q_ang"].vary = False + for v in _vars: + pars["%s" % v].vary = False + + # if i==20: + # print(pars) + # print( pars ) + result1 = mod.fit(y, pars, x=lags) + # print(qval_dict[i][0], qval_dict[i][1], y) + if sequential_fit: + for k in list(pars.keys()): + # print( pars ) + if k in list(result1.best_values.keys()): + pars[k].value = result1.best_values[k] + fit_res.append(result1) + # model_data.append( result1.best_fit ) + yf = result1.model.eval(params=result1.params, x=lags_) + model_data.append(yf) + return fit_res, lags_, np.array(model_data).T + + +def get_short_long_labels_from_qval_dict(qval_dict, geometry="saxs"): + """Y.G. 2016, Dec 26 + Get short/long labels from a qval_dict + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + geometry: + 'saxs': a saxs with Qr partition + 'ang_saxs': a saxs with Qr and angular partition + 'gi_saxs': gisaxs with Qz, Qr + """ + + Nqs = len(qval_dict.keys()) + len_qrz = len(list(qval_dict.values())[0]) + # qr_label = sorted( np.array( list( qval_dict.values() ) )[:,0] ) + qr_label = np.array(list(qval_dict.values()))[:, 0] + if geometry == "gi_saxs" or geometry == "ang_saxs": # or geometry=='gi_waxs': + if len_qrz < 2: + print("please give qz or qang for the q-label") + else: + # qz_label = sorted( np.array( list( qval_dict.values() ) )[:,1] ) + qz_label = np.array(list(qval_dict.values()))[:, 1] + else: + qz_label = np.array([0]) + + uqz_label = np.unique(qz_label) + num_qz = len(uqz_label) + + uqr_label = np.unique(qr_label) + num_qr = len(uqr_label) + + # print( uqr_label, uqz_label ) + if len(uqr_label) >= len(uqz_label): + master_plot = "qz" # one qz for many sub plots of each qr + else: + master_plot = "qr" + + mastp = master_plot + if geometry == "ang_saxs": + mastp = "ang" + num_short = min(num_qz, num_qr) + num_long = max(num_qz, num_qr) + + # print( mastp, num_short, num_long) + if num_qz != num_qr: + short_label = [qz_label, qr_label][np.argmin([num_qz, num_qr])] + long_label = [qz_label, qr_label][np.argmax([num_qz, num_qr])] + short_ulabel = [uqz_label, uqr_label][np.argmin([num_qz, num_qr])] + long_ulabel = [uqz_label, uqr_label][np.argmax([num_qz, num_qr])] + else: + short_label = qz_label + long_label = qr_label + short_ulabel = uqz_label + long_ulabel = uqr_label + # print( long_ulabel ) + # print( qz_label,qr_label ) + # print( short_label, long_label ) + + if geometry == "saxs" or geometry == "gi_waxs": + ind_long = [range(num_long)] + else: + ind_long = [np.where(short_label == i)[0] for i in short_ulabel] + + if Nqs == 1: + long_ulabel = list(qval_dict.values())[0] + long_label = list(qval_dict.values())[0] + return ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) + + +############################################ +# #a good func to plot g2 for all types of geogmetries +# ########################################### + + +def plot_g2_general( + g2_dict, + taus_dict, + qval_dict, + g2_err_dict=None, + fit_res=None, + geometry="saxs", + filename="g2", + path=None, + function="simple_exponential", + g2_labels=None, + fig_ysize=12, + qth_interest=None, + ylabel="g2", + return_fig=False, + append_name="", + outsize=(2000, 2400), + max_plotnum_fig=16, + figsize=(10, 12), + show_average_ang_saxs=True, + qphi_analysis=False, + fontsize_sublabel=12, + *argv, + **kwargs, +): + """ + Jan 10, 2018 add g2_err_dict option to plot g2 with error bar + Oct31, 2017 add qth_interest option + + Dec 26,2016, Y.G.@CHX + + Plot one/four-time correlation function (with fit) for different geometry + + The support functions include simple exponential and stretched/compressed exponential + Parameters + ---------- + g2_dict: dict, format as {1: g2_1, 2: g2_2, 3: g2_3...} one-time correlation function, g1,g2, g3,...must have the same shape + taus_dict, dict, format {1: tau_1, 2: tau_2, 3: tau_3...}, tau1,tau2, tau3,...must have the same shape + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + + fit_res: give all the fitting parameters for showing in the plot + qth_interest: if not None: should be a list, and will only plot the qth_interest qs + filename: for the title of plot + append_name: if not None, will save as filename + append_name as filename + path: the path to save data + outsize: for gi/ang_saxs, will combine all the different qz images together with outsize + function: + 'simple_exponential': fit by a simple exponential function, defined as + beta * np.exp(-2 * relaxation_rate * lags) + baseline + 'streched_exponential': fit by a streched exponential function, defined as + beta * (np.exp(-2 * relaxation_rate * lags))**alpha + baseline + geometry: + 'saxs': a saxs with Qr partition + 'ang_saxs': a saxs with Qr and angular partition + 'gi_saxs': gisaxs with Qz, Qr + + one_plot: if True, plot all images in one pannel + kwargs: + + Returns + ------- + None + + ToDoList: plot an average g2 for ang_saxs for each q + + """ + + if ylabel == "g2": + ylabel = "g_2" + if ylabel == "g4": + ylabel = "g_4" + + if geometry == "saxs": + if qphi_analysis: + geometry = "ang_saxs" + if qth_interest != None: + if not isinstance(qth_interest, list): + print("Please give a list for qth_interest") + else: + # g2_dict0, taus_dict0, qval_dict0, fit_res0= g2_dict, taus_dict, qval_dict, fit_res + qth_interest = np.array(qth_interest) - 1 + g2_dict_ = {} + # taus_dict_ = {} + for k in list(g2_dict.keys()): + g2_dict_[k] = g2_dict[k][:, [i for i in qth_interest]] + # for k in list(taus_dict.keys()): + # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] + taus_dict_ = taus_dict + qval_dict_ = {k: qval_dict[k] for k in qth_interest} + if fit_res != None: + fit_res_ = [fit_res[k] for k in qth_interest] + else: + fit_res_ = None + else: + g2_dict_, taus_dict_, qval_dict_, fit_res_ = g2_dict, taus_dict, qval_dict, fit_res + + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + fps = [] + + # $print( num_short, num_long ) + + for s_ind in range(num_short): + ind_long_i = ind_long[s_ind] + num_long_i = len(ind_long_i) + # if show_average_ang_saxs: + # if geometry=='ang_saxs': + # num_long_i += 1 + if RUN_GUI: + fig = Figure(figsize=(10, 12)) + else: + # fig = plt.figure( ) + if num_long_i <= 4: + if master_plot != "qz": + fig = plt.figure(figsize=(8, 6)) + else: + if num_short > 1: + fig = plt.figure(figsize=(8, 4)) + else: + fig = plt.figure(figsize=(10, 6)) + # print('Here') + elif num_long_i > max_plotnum_fig: + num_fig = int(np.ceil(num_long_i / max_plotnum_fig)) # num_long_i //16 + fig = [plt.figure(figsize=figsize) for i in range(num_fig)] + # print( figsize ) + else: + # print('Here') + if master_plot != "qz": + fig = plt.figure(figsize=figsize) + else: + fig = plt.figure(figsize=(10, 10)) + + if master_plot == "qz": + if geometry == "ang_saxs": + title_short = "Angle= %.2f" % (short_ulabel[s_ind]) + r"$^\circ$" + elif geometry == "gi_saxs": + title_short = r"$Q_z= $" + "%.4f" % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + else: + title_short = "" + else: # qr + if geometry == "ang_saxs" or geometry == "gi_saxs": + title_short = r"$Q_r= $" + "%.5f " % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + else: + title_short = "" + # print(geometry) + # filename ='' + til = "%s:--->%s" % (filename, title_short) + if num_long_i <= 4: + plt.title(til, fontsize=14, y=1.15) + # plt.title( til,fontsize=20, y =1.06) + # print('here') + else: + plt.title(til, fontsize=20, y=1.06) + # print( num_long ) + if num_long != 1: + # print( 'here') + plt.axis("off") + # sy = min(num_long_i,4) + sy = min(num_long_i, int(np.ceil(min(max_plotnum_fig, num_long_i) / 4))) + # fig.set_size_inches(10, 12) + # fig.set_size_inches(10, fig_ysize ) + else: + sy = 1 + # fig.set_size_inches(8,6) + # plt.axis('off') + sx = min(4, int(np.ceil(min(max_plotnum_fig, num_long_i) / float(sy)))) + + temp = sy + sy = sx + sx = temp + + # print( num_long_i, sx, sy ) + # print( master_plot ) + # print(ind_long_i, len(ind_long_i) ) + + for i, l_ind in enumerate(ind_long_i): + if num_long_i <= max_plotnum_fig: + # if s_ind ==2: + # print('Here') + # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) + ax = fig.add_subplot(sx, sy, i + 1) + if sx == 1: + if sy == 1: + plt.axis("on") + else: + # fig_subnum = l_ind//max_plotnum_fig + # ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) + fig_subnum = i // max_plotnum_fig + # print( i, sx,sy, fig_subnum, max_plotnum_fig, i + 1 - fig_subnum*max_plotnum_fig ) + ax = fig[fig_subnum].add_subplot(sx, sy, i + 1 - fig_subnum * max_plotnum_fig) + + ax.set_ylabel(r"$%s$" % ylabel + "(" + r"$\tau$" + ")") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + if master_plot == "qz" or master_plot == "angle": + if geometry != "gi_waxs": + title_long = r"$Q_r= $" + "%.5f " % (long_label[l_ind]) + r"$\AA^{-1}$" + else: + title_long = r"$Q_r= $" + "%i " % (long_label[l_ind]) + # print( title_long,long_label,l_ind ) + else: + if geometry == "ang_saxs": + # title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) + r'$^\circ$' + '( %d )'%(l_ind) + title_long = "Ang= " + "%.2f" % (long_label[l_ind]) # + r'$^\circ$' + '( %d )'%(l_ind) + elif geometry == "gi_saxs": + title_long = r"$Q_z= $" + "%.5f " % (long_label[l_ind]) + r"$\AA^{-1}$" + else: + title_long = "" + # print( master_plot ) + if master_plot != "qz": + ax.set_title(title_long + " (%s )" % (1 + l_ind), y=1.1, fontsize=12) + else: + ax.set_title(title_long + " (%s )" % (1 + l_ind), y=1.05, fontsize=fontsize_sublabel) + # print( geometry ) + # print( title_long ) + if qth_interest != None: # it might have a bug here, todolist!!! + lab = sorted(list(qval_dict_.keys())) + # print( lab, l_ind) + ax.set_title(title_long + " (%s )" % (lab[l_ind] + 1), y=1.05, fontsize=12) + for ki, k in enumerate(list(g2_dict_.keys())): + if ki == 0: + c = "b" + if fit_res == None: + m = "-o" + else: + m = "o" + elif ki == 1: + c = "r" + if fit_res == None: + m = "s" + else: + m = "-" + elif ki == 2: + c = "g" + m = "-D" + else: + c = colors[ki + 2] + m = "-%s" % markers[ki + 2] + try: + dumy = g2_dict_[k].shape + # print( 'here is the shape' ) + islist = False + except: + islist_n = len(g2_dict_[k]) + islist = True + # print( 'here is the list' ) + if islist: + for nlst in range(islist_n): + m = "-%s" % markers[nlst] + # print(m) + y = g2_dict_[k][nlst][:, l_ind] + x = taus_dict_[k][nlst] + if ki == 0: + ymin, ymax = min(y), max(y[1:]) + if g2_err_dict == None: + if g2_labels == None: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + # print('here ki ={} nlst = {}'.format( ki, nlst )) + if nlst == 0: + ax.semilogx(x, y, m, color=c, markersize=6, label=g2_labels[ki]) + else: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + yerr = g2_err_dict[k][nlst][:, l_ind] + if g2_labels == None: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) + else: + if nlst == 0: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6, label=g2_labels[ki]) + else: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) + ax.set_xscale("log", nonposx="clip") + if nlst == 0: + if l_ind == 0: + ax.legend(loc="best", fontsize=8, fancybox=True, framealpha=0.5) + + else: + y = g2_dict_[k][:, l_ind] + x = taus_dict_[k] + if ki == 0: + ymin, ymax = min(y), max(y[1:]) + if g2_err_dict == None: + if g2_labels == None: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + ax.semilogx(x, y, m, color=c, markersize=6, label=g2_labels[ki]) + else: + yerr = g2_err_dict[k][:, l_ind] + # print(x.shape, y.shape, yerr.shape) + # print(yerr) + if g2_labels == None: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) + else: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6, label=g2_labels[ki]) + ax.set_xscale("log", nonposx="clip") + if l_ind == 0: + ax.legend(loc="best", fontsize=8, fancybox=True, framealpha=0.5) + + if fit_res_ != None: + result1 = fit_res_[l_ind] + # print (result1.best_values) + + beta = result1.best_values["beta"] + baseline = result1.best_values["baseline"] + if function == "simple_exponential" or function == "simple": + rate = result1.best_values["relaxation_rate"] + alpha = 1.0 + elif function == "stretched_exponential" or function == "stretched": + rate = result1.best_values["relaxation_rate"] + alpha = result1.best_values["alpha"] + elif function == "stretched_vibration": + rate = result1.best_values["relaxation_rate"] + alpha = result1.best_values["alpha"] + freq = result1.best_values["freq"] + elif function == "flow_vibration": + rate = result1.best_values["relaxation_rate"] + freq = result1.best_values["freq"] + if function == "flow_para_function" or function == "flow_para" or function == "flow_vibration": + rate = result1.best_values["relaxation_rate"] + flow = result1.best_values["flow_velocity"] + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + diff = result1.best_values["diffusion"] + qrr = short_ulabel[s_ind] + # print(qrr) + rate = diff * qrr**2 + flow = result1.best_values["flow_velocity"] + if qval_dict_ == None: + print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") + else: + pass + + if rate != 0: + txts = r"$\tau_0$" + r"$ = %.3f$" % (1 / rate) + r"$ s$" + else: + txts = r"$\tau_0$" + r"$ = inf$" + r"$ s$" + x = 0.25 + y0 = 0.9 + fontsize = 12 + ax.text(x=x, y=y0, s=txts, fontsize=fontsize, transform=ax.transAxes) + # print(function) + dt = 0 + if ( + function != "flow_para_function" + and function != "flow_para" + and function != "flow_vibration" + and function != "flow_para_qang" + ): + txts = r"$\alpha$" + r"$ = %.3f$" % (alpha) + dt += 0.1 + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r"$baseline$" + r"$ = %.3f$" % (baseline) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + if ( + function == "flow_para_function" + or function == "flow_para" + or function == "flow_vibration" + or function == "flow_para_qang" + ): + txts = r"$flow_v$" + r"$ = %.3f$" % (flow) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + if function == "stretched_vibration" or function == "flow_vibration": + txts = r"$vibration$" + r"$ = %.1f Hz$" % (freq) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r"$\beta$" + r"$ = %.3f$" % (beta) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + try: + ax.set_ylim([ymin * vmin, ymax * vmax]) + except: + pass + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + if num_short == 1: + fp = path + filename + else: + fp = path + filename + "_%s_%s" % (mastp, s_ind) + + if append_name != "": + fp = fp + append_name + fps.append(fp + ".png") + # if num_long_i <= 16: + if num_long_i <= max_plotnum_fig: + fig.set_tight_layout(True) + # fig.tight_layout() + # print(fig) + try: + plt.savefig(fp + ".png", dpi=fig.dpi) + except: + print("Can not save figure here.") + + else: + fps = [] + for fn, f in enumerate(fig): + f.set_tight_layout(True) + fp = path + filename + "_q_%s_%s" % (fn * 16, (fn + 1) * 16) + if append_name != "": + fp = fp + append_name + fps.append(fp + ".png") + f.savefig(fp + ".png", dpi=f.dpi) + # plt.savefig( fp + '.png', dpi=fig.dpi) + # combine each saved images together + + if (num_short != 1) or (num_long_i > 16): + outputfile = path + filename + ".png" + if append_name != "": + outputfile = path + filename + append_name + "__joint.png" + else: + outputfile = path + filename + "__joint.png" + combine_images(fps, outputfile, outsize=outsize) + if return_fig: + return fig + + +def power_func(x, D0, power=2): + return D0 * x**power + + +def get_q_rate_fit_general(qval_dict, rate, geometry="saxs", weights=None, *argv, **kwargs): + """ + Dec 26,2016, Y.G.@CHX + + Fit q~rate by a power law function and fit curve pass (0,0) + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + Return: + D0 + qrate_fit_res + """ + + power_variable = False + + if "fit_range" in kwargs.keys(): + fit_range = kwargs["fit_range"] + else: + fit_range = None + + mod = Model(power_func) + # mod.set_param_hint( 'power', min=0.5, max= 10 ) + # mod.set_param_hint( 'D0', min=0 ) + pars = mod.make_params(power=2, D0=1 * 10 ^ (-5)) + if power_variable: + pars["power"].vary = True + else: + pars["power"].vary = False + + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + Nqr = num_long + Nqz = num_short + D0 = np.zeros(Nqz) + power = 2 # np.zeros( Nqz ) + qrate_fit_res = [] + # print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[i] + y = np.array(rate)[ind_long_i] + x = long_label[ind_long_i] + # print(y,x) + if fit_range != None: + y = y[fit_range[0] : fit_range[1]] + x = x[fit_range[0] : fit_range[1]] + # print (i, y,x) + _result = mod.fit(y, pars, x=x, weights=weights) + qrate_fit_res.append(_result) + D0[i] = _result.best_values["D0"] + # power[i] = _result.best_values['power'] + print("The fitted diffusion coefficient D0 is: %.3e A^2S-1" % D0[i]) + return D0, qrate_fit_res + + +def plot_q_rate_fit_general( + qval_dict, + rate, + qrate_fit_res, + geometry="saxs", + ylim=None, + plot_all_range=True, + plot_index_range=None, + show_text=True, + return_fig=False, + show_fit=True, + *argv, + **kwargs, +): + """ + Dec 26,2016, Y.G.@CHX + + plot q~rate fitted by a power law function and fit curve pass (0,0) + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + """ + + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + power = 2 + fig, ax = plt.subplots() + plt.title(r"$Q^%s$" % (power) + "-Rate-%s_Fit" % (uid), fontsize=20, y=1.06) + Nqz = num_short + if Nqz != 1: + ls = "--" + else: + ls = "" + for i in range(Nqz): + ind_long_i = ind_long[i] + y = np.array(rate)[ind_long_i] + x = long_label[ind_long_i] + D0 = qrate_fit_res[i].best_values["D0"] + # print(i, x, y, D0 ) + if Nqz != 1: + label = r"$q_z=%.5f$" % short_ulabel[i] + else: + label = "" + ax.plot(x**power, y, marker="o", ls=ls, label=label) + yfit = qrate_fit_res[i].best_fit + + if show_fit: + if plot_all_range: + ax.plot(x**power, x**power * D0, "-r") + else: + ax.plot((x**power)[: len(yfit)], yfit, "-r") + + if show_text: + txts = r"$D0: %.3e$" % D0 + r" $A^2$" + r"$s^{-1}$" + dy = 0.1 + ax.text(x=0.15, y=0.65 - dy * i, s=txts, fontsize=14, transform=ax.transAxes) + if Nqz != 1: + legend = ax.legend(loc="best") + + if plot_index_range != None: + d1, d2 = plot_index_range + d2 = min(len(x) - 1, d2) + ax.set_xlim((x**power)[d1], (x**power)[d2]) + ax.set_ylim(y[d1], y[d2]) + if ylim != None: + ax.set_ylim(ylim) + + ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$)") + ax.set_xlabel("$q^%s$" r"($\AA^{-2}$)" % power) + fp = path + "%s_Q_Rate" % (uid) + "_fit.png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + if return_fig: + return fig, ax + + +def save_g2_fit_para_tocsv(fit_res, filename, path): + """Y.G. Dec 29, 2016, + save g2 fitted parameter to csv file + """ + col = list(fit_res[0].best_values.keys()) + m, n = len(fit_res), len(col) + data = np.zeros([m, n]) + for i in range(m): + data[i] = list(fit_res[i].best_values.values()) + df = DataFrame(data) + df.columns = col + filename1 = os.path.join(path, filename) # + '.csv') + df.to_csv(filename1) + print("The g2 fitting parameters are saved in %s" % filename1) + return df + + +def R_2(ydata, fit_data): + """Calculates R squared for a particular fit - by L.W. + usage R_2(ydata,fit_data) + returns R2 + by L.W. Feb. 2019 + """ + y_ave = np.average(ydata) + SS_tot = np.sum((np.array(ydata) - y_ave) ** 2) + # print('SS_tot: %s'%SS_tot) + SS_res = np.sum((np.array(ydata) - np.array(fit_data)) ** 2) + # print('SS_res: %s'%SS_res) + return 1 - SS_res / SS_tot + + +def is_outlier(points, thresh=3.5, verbose=False): + """MAD test""" + points.tolist() + if len(points) == 1: + points = points[:, None] + if verbose: + print("input to is_outlier is a single point...") + median = np.median(points) * np.ones(np.shape(points)) # , axis=0) + + diff = (points - median) ** 2 + diff = np.sqrt(diff) + med_abs_deviation = np.median(diff) + modified_z_score = 0.6745 * diff / med_abs_deviation + return modified_z_score > thresh + + +def outlier_mask( + avg_img, mask, roi_mask, outlier_threshold=7.5, maximum_outlier_fraction=0.1, verbose=False, plot=False +): + """ + outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) + avg_img: average image data (2D) + mask: 2D array, same size as avg_img with pixels that are already masked + roi_mask: 2D array, same size as avg_img, ROI labels 'encoded' as mask values (i.e. all pixels belonging to ROI 5 have the value 5) + outlier_threshold: threshold for MAD test + maximum_outlier_fraction: maximum fraction of pixels in an ROI that can be classifed as outliers. If the detected fraction is higher, no outliers will be masked for that ROI. + verbose: 'True' enables message output + plot: 'True' enables visualization of outliers + returns: mask (dtype=float): 0 for pixels that have been classified as outliers, 1 else + dependency: is_outlier() + + function does outlier detection for each ROI separately based on pixel intensity in avg_img*mask and ROI specified by roi_mask, using the median-absolute-deviation (MAD) method + + by LW 06/21/2023 + """ + hhmask = np.ones(np.shape(roi_mask)) + pc = 1 + + for rn in np.arange(1, np.max(roi_mask) + 1, 1): + rm = np.zeros(np.shape(roi_mask)) + rm = rm - 1 + rm[np.where(roi_mask == rn)] = 1 + pixel = roi.roi_pixel_values(avg_img * rm, roi_mask, [rn]) + out_l = is_outlier((avg_img * mask * rm)[rm > -1], thresh=outlier_threshold) + if np.nanmax(out_l) > 0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l < 1]) + if verbose: + print("ROI #%s\naverage ROI intensity: %s" % (rn, ave_roi_int)) + try: + upper_outlier_threshold = np.nanmin((out_l * pixel[0][0])[out_l * pixel[0][0] > ave_roi_int]) + if verbose: + print("upper outlier threshold: %s" % upper_outlier_threshold) + except: + upper_outlier_threshold = False + if verbose: + print("no upper outlier threshold found") + ind1 = (out_l * pixel[0][0]) > 0 + ind2 = (out_l * pixel[0][0]) < ave_roi_int + try: + lower_outlier_threshold = np.nanmax((out_l * pixel[0][0])[ind1 * ind2]) + except: + lower_outlier_threshold = False + if verbose: + print("no lower outlier threshold found") + else: + if verbose: + print("ROI #%s: no outliers detected" % rn) + + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l) / len(pixel[0][0]) + if verbose: + print("fraction of pixel values detected as outliers: %s" % np.round(outlier_fraction, 2)) + if outlier_fraction > maximum_outlier_fraction: + if verbose: + print( + "fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed" + % maximum_outlier_fraction + ) + upper_outlier_threshold = False + lower_outlier_threshold = False + + if upper_outlier_threshold: + hhmask[avg_img * rm > upper_outlier_threshold] = 0 + if lower_outlier_threshold: + hhmask[avg_img * rm < lower_outlier_threshold] = 0 + + if plot: + if pc == 1: + fig, ax = plt.subplots(1, 5, figsize=(24, 4)) + plt.subplot(1, 5, pc) + pc += 1 + if pc > 5: + pc = 1 + pixel = roi.roi_pixel_values(avg_img * rm * mask, roi_mask, [rn]) + plt.plot(pixel[0][0], "bo", markersize=1.5) + if upper_outlier_threshold or lower_outlier_threshold: + x = np.arange(len(out_l)) + plt.plot( + [x[0], x[-1]], + [ave_roi_int, ave_roi_int], + "g--", + label="ROI average: %s" % np.round(ave_roi_int, 4), + ) + if upper_outlier_threshold: + ind = (out_l * pixel[0][0]) > upper_outlier_threshold + plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") + plt.plot( + [x[0], x[-1]], + [upper_outlier_threshold, upper_outlier_threshold], + "r--", + label="upper thresh.: %s" % np.round(upper_outlier_threshold, 4), + ) + if lower_outlier_threshold: + ind = (out_l * pixel[0][0]) < lower_outlier_threshold + plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") + plt.plot( + [x[0], x[-1]], + [lower_outlier_threshold, lower_outlier_threshold], + "r--", + label="lower thresh.: %s" % np.round(upper_outlier_threshold, 4), + ) + plt.ylabel("Intensity") + plt.xlabel("pixel") + plt.title("ROI #: %s" % rn) + plt.legend(loc="best", fontsize=8) + + if plot: + fig, ax = plt.subplots() + plt.imshow(hhmask) + hot_dark = np.nonzero(hhmask < 1) + cmap = plt.cm.get_cmap("viridis") + plt.plot(hot_dark[1], hot_dark[0], "+", color=cmap(0)) + plt.xlabel("pixel") + plt.ylabel("pixel") + plt.title("masked pixels with outlier threshold: %s" % outlier_threshold) + + return hhmask diff --git a/pyCHX/backups/chx_generic_functions_05012024.py b/pyCHX/backups/chx_generic_functions_05012024.py new file mode 100644 index 0000000..2b780c3 --- /dev/null +++ b/pyCHX/backups/chx_generic_functions_05012024.py @@ -0,0 +1,5809 @@ +from pyCHX.chx_libs import * +#from tqdm import * +from pyCHX.chx_libs import ( colors, markers ) +from scipy.special import erf + +from skimage.filters import prewitt +from skimage.draw import line_aa, line, polygon, ellipse, disk + +from modest_image import imshow +import matplotlib.cm as mcm +from matplotlib import cm +import copy, scipy +import PIL +from shutil import copyfile +import pytz +from datetime import datetime +from skbeam.core.utils import radial_grid, angle_grid, radius_to_twotheta, twotheta_to_q +from os import listdir +import numpy as np + + +markers = ['o', 'D', 'v', '^', '<', '>', 'p', 's', 'H', + 'h', '*', 'd', + '8', '1', '3', '2', '4', '+', 'x', '_', '|', ',', '1',] +markers = np.array( markers *100 ) + + + + +flatten_nestlist = lambda l: [item for sublist in l for item in sublist] +"""a function to flatten a nest list +e.g., flatten( [ ['sg','tt'],'ll' ] ) +gives ['sg', 'tt', 'l', 'l'] +""" + + +def get_frames_from_dscan( uid, detector = 'eiger4m_single_image' ): + '''Get frames from a dscan by giving uid and detector ''' + hdr = db[uid] + return db.get_images(hdr, detector ) + + +def get_roi_intensity( img, roi_mask): + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + avgs = np.zeros(noqs) + for i in tqdm( range(1,1+noqs)): + avgs[i-1] = ( np.average( img[roi_mask==i] ) ) + return avgs + + +def generate_h5_list(inDir, filename): + '''YG DEV at 9/19/2019@CHX generate a lst file containing all h5 fiels in inDir + Input: + inDir: the input direction + filename: the filename for output (have to lst as extension) + Output: + Save the all h5 filenames in a lst file + ''' + fp_list = listdir( inDir ) + if filename[-4:] !='.lst': + filename += '.lst' + for FP in fp_list: + FP_ = inDir+FP + if os.path.isdir(FP_): + fp = listdir( FP_ ) + for fp_ in fp: + if '.h5' in fp_: + append_txtfile( filename = filename, + data = np.array( [ FP_+'/'+fp_ ])) + print('The full path of all the .h5 in %s has been saved in %s.'%(inDir, filename)) + print( 'You can use ./analysis/run_gui to visualize all the h5 file.') + + +def fit_one_peak_curve( x,y, fit_range=None ): + '''YG Dev@Aug 10, 2019 fit a curve with a single Lorentzian shape + Parameters: + x: one-d array, x-axis data + y: one-d array, y-axis data + fit_range: [x1, x2], a list of index, to define the x-range for fit + Return: + center: float, center of the peak + center_std: float, error bar of center in the fitting + fwhm: float, full width at half max intensity of the peak, 2*sigma + fwhm_std:float, error bar of the full width at half max intensity of the peak + xf: the x in the fit + out: the fitting class resutled from lmfit + + ''' + from lmfit.models import LinearModel, LorentzianModel + peak = LorentzianModel() + background = LinearModel() + model = peak + background + if fit_range != None: + x1,x2=fit_range + xf= x[x1:x2] + yf = y[x1:x2] + else: + xf = x + yf = y + model.set_param_hint('slope', value=5 ) + model.set_param_hint('intercept', value=0 ) + model.set_param_hint('center', value=0.005 ) + model.set_param_hint('amplitude', value= 0.1 ) + model.set_param_hint('sigma', value=0.003 ) + #out=model.fit(yf, x=xf)#, method='nelder') + out=model.fit(yf, x=xf, method= 'leastsq' ) + cen = out.params['center'].value + cen_std = out.params['center'].stderr + wid = out.params['sigma'].value *2 + wid_std = out.params['sigma'].stderr *2 + return cen, cen_std, wid, wid_std , xf, out + + +def plot_xy_with_fit( x, y, xf, out, + cen, cen_std,wid, wid_std, + xlim=[1e-3,0.01],xlabel= 'q ('r'$\AA^{-1}$)', + ylabel='I(q)', filename=None): + '''YG Dev@Aug 10, 2019 to plot x,y with fit, + currently this code is dedicated to plot q-Iq with fit and show the fittign parameter, peak pos, peak wid ''' + + yf2=out.model.eval(params=out.params, x=xf) + fig, ax = plt.subplots( ) + plot1D(x=x,y=y,ax=ax,m='o', ls='',c='k', legend='data') + plot1D(x=xf,y=yf2,ax=ax,m='', ls='-',c='r', legend='fit',logy=True) + ax.set_xlim( xlim ) + #ax.set_ylim( 0.1, 4) + #ax.set_title(uid+'--t=%.2f'%tt) + ax.set_xlabel( xlabel ) + ax.set_ylabel(ylabel ) + txts = r'peak' + r' = %.5f +/- %.5f '%( cen, cen_std ) + ax.text(x =0.02, y=.2, s=txts, fontsize=14, transform=ax.transAxes) + txts = r'wid' + r' = %.4f +/- %.4f'%( wid, wid_std) + #txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x =0.02, y=.1, s=txts, fontsize=14, transform=ax.transAxes) + plt.tight_layout() + if filename != None: + plt.savefig( filename ) + return ax + + + + + +def get_touched_qwidth( qcenters ): + '''YG Dev@CHX April 2019, get touched qwidth by giving qcenters + ''' + qwX = np.zeros_like(qcenters) + qW= qcenters[1:] - qcenters[:-1] + qwX[0] = qW[0] + for i in range(1,len(qcenters)-1): + #print(i) + qwX[i] = min( qW[i-1], qW[i] ) + qwX[-1] = qW[-1] + qwX *=0.9999 + return qwX + + + +def append_txtfile( filename, data, fmt='%s', *argv,**kwargs ): + '''YG. Dev May 10, 2109 append data to a file + Create an empty file if the file dose not exist, otherwise, will append the data to it + Input: + fp: filename + data: the data to be append + fmt: the parameter defined in np.savetxt + + ''' + from numpy import savetxt + exists = os.path.isfile( filename) + if not exists: + np.savetxt( filename, [ ] , fmt='%s', ) + print('create new file') + + f=open( filename, 'a') + savetxt( f, data, fmt = fmt , *argv,**kwargs ) + f.close() + +def get_roi_mask_qval_qwid_by_shift( new_cen, new_mask, old_cen,old_roi_mask, + setup_pargs, geometry, + limit_qnum= None): + '''YG Dev April 22, 2019 Get roi_mask, qval_dict, qwid_dict by shift the pre-defined big roi_mask''' + center=setup_pargs['center'] + roi_mask1 = shift_mask( new_cen=center, new_mask=new_mask, old_cen=old_cen, + old_roi_mask=old_roi_mask, limit_qnum= limit_qnum) + qval_dict_, qwid_dict_ = get_masked_qval_qwid_dict_using_Rmax( + new_mask=new_mask, setup_pargs=setup_pargs, + old_roi_mask=old_roi_mask, old_cen=old_cen, geometry = geometry ) + w,w1 = get_zero_nozero_qind_from_roi_mask(roi_mask1,new_mask) + #print(w,w1) + qval_dictx = { k:v for (k,v) in list(qval_dict_.items()) if k in w1 } + qwid_dictx = { k:v for (k,v) in list(qwid_dict_.items()) if k in w1 } + qval_dict={} + qwid_dict={} + for i, k in enumerate( list(qval_dictx.keys())): + qval_dict[i] = qval_dictx[k] + qwid_dict[i] = qwid_dictx[k] + return roi_mask1, qval_dict, qwid_dict + + +def get_zero_nozero_qind_from_roi_mask(roi_mask,mask): + '''YG Dev April 22, 2019 Get unique qind of roi_mask with zero and non-zero pixel number''' + qind, pixelist = roi.extract_label_indices(roi_mask*mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + w=np.where(nopr==0)[0] + w1=np.where(nopr!=0)[0] + return w, w1 + + + +def get_masked_qval_qwid_dict_using_Rmax( new_mask, setup_pargs, old_roi_mask, old_cen, geometry ): + '''YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask using a Rmax method ''' + cy,cx= setup_pargs['center'] + my,mx=new_mask.shape + Rmax = int(np.ceil(max( np.hypot(cx,cy),np.hypot(cx-mx,cy-my),np.hypot(cx,cy-my),np.hypot(cx-mx,cy) ))) + Fmask = np.zeros([Rmax*2,Rmax*2],dtype=int) + Fmask[ Rmax-cy : Rmax-cy+my, Rmax-cx: Rmax-cx + mx]=new_mask + roi_mask1 = shift_mask( new_cen=[Rmax,Rmax], new_mask=np.ones_like(Fmask), old_cen=old_cen, + old_roi_mask=old_roi_mask, limit_qnum= None) + setup_pargs_={ 'center':[Rmax,Rmax], 'dpix': setup_pargs['dpix'], 'Ldet': setup_pargs['Ldet'], + 'lambda_': setup_pargs['lambda_'], } + qval_dict1, qwid_dict1 = get_masked_qval_qwid_dict( roi_mask1, Fmask, setup_pargs_, geometry ) + #w = get_zero_qind_from_roi_mask(roi_mask1,Fmask) + return qval_dict1, qwid_dict1#,w + + + +def get_masked_qval_qwid_dict( roi_mask, mask, setup_pargs, geometry ): + '''YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask ''' + + qval_dict_, qwid_dict_ = get_qval_qwid_dict( roi_mask, setup_pargs, geometry= geometry) + w,w1 = get_zero_nozero_qind_from_roi_mask(roi_mask,mask) + qval_dictx = { k:v for (k,v) in list(qval_dict_.items()) if k not in w } + qwid_dictx = { k:v for (k,v) in list(qwid_dict_.items()) if k not in w } + qval_dict={} + qwid_dict={} + for i, k in enumerate( list(qval_dictx.keys())): + qval_dict[i] = qval_dictx[k] + qwid_dict[i] = qwid_dictx[k] + return qval_dict, qwid_dict + + +def get_qval_qwid_dict( roi_mask, setup_pargs, geometry='saxs'): + '''YG Dev April 6, 2019 + Get qval_dict and qwid_dict by giving roi_mask, setup_pargs + Input: + roi_mask: integer type 2D array + setup_pargs: dict, should at least contains, center (direct beam center), dpix (in mm), + lamda_: in A-1, Ldet: in mm + e.g., + {'Ldet': 1495.0, abs #essential + 'center': [-4469, 363], #essential + 'dpix': 0.075000003562308848, #essential + 'exposuretime': 0.99999702, + 'lambda_': 0.9686265, #essential + 'path': '/XF11ID/analysis/2018_1/jianheng/Results/b85dad/', + 'timeperframe': 1.0, + 'uid': 'uid=b85dad'} + geometry: support saxs for isotropic transmission SAXS + ang_saxs for anisotropic transmission SAXS + flow_saxs for anisotropic transmission SAXS under flow (center symetric) + + Return: + qval_dict: dict, key as q-number, val: q val + qwid_dict: dict, key as q-number, val: q width (qmax - qmin) + + TODOLIST: to make GiSAXS work + + ''' + + origin = setup_pargs['center']#[::-1] + shape = roi_mask.shape + qp_map = radial_grid(origin, shape) + phi_map = np.degrees( angle_grid(origin, shape) ) + two_theta = radius_to_twotheta( setup_pargs['Ldet'], setup_pargs['dpix'] * qp_map ) + q_map = utils.twotheta_to_q(two_theta, setup_pargs['lambda_']) + qind, pixelist = roi.extract_label_indices(roi_mask) + Qval = np.unique(qind) + qval_dict_ = {} + qwid_dict_ = {} + for j, i in enumerate( Qval): + qval = q_map[ roi_mask == i ] + #print( qval ) + if geometry=='saxs': + qval_dict_[j] = [( qval.max() + qval.min() )/2] # np.mean(qval) + qwid_dict_[j] = [( qval.max() - qval.min() ) ] + + elif geometry=='ang_saxs': + aval = phi_map[ roi_mask == i ] + #print(j,i,qval, aval) + qval_dict_[j] = np.zeros(2) + qwid_dict_[j] = np.zeros(2) + + qval_dict_[j][0] = ( qval.max() + qval.min() )/2 # np.mean(qval) + qwid_dict_[j][0] = ( qval.max() - qval.min() ) + + if ( (aval.max() * aval.min())<0 ) & ( aval.max() > 90 ): + qval_dict_[j][1] = ( aval.max() + aval.min() )/2 -180 # np.mean(qval) + qwid_dict_[j][1] = abs( aval.max() - aval.min() -360 ) + #print('here -- %s'%j) + else: + qval_dict_[j][1] = ( aval.max() + aval.min() )/2 # np.mean(qval) + qwid_dict_[j][1] = abs( aval.max() - aval.min() ) + + elif geometry=='flow_saxs': + sx,sy = roi_mask.shape + cx,cy = origin + aval = (phi_map[cx:])[ roi_mask[cx:] == i ] + if len(aval)==0: + aval = (phi_map[:cx])[ roi_mask[:cx] == i ] + 180 + + qval_dict_[j] = np.zeros(2) + qwid_dict_[j] = np.zeros(2) + qval_dict_[j][0] = ( qval.max() + qval.min() )/2 # np.mean(qval) + qwid_dict_[j][0] = ( qval.max() - qval.min() ) + #print(aval) + if ( (aval.max() * aval.min())<0 ) & ( aval.max() > 90 ): + qval_dict_[j][1] = ( aval.max() + aval.min() )/2 -180 # np.mean(qval) + qwid_dict_[j][1] = abs( aval.max() - aval.min() -360 ) + #print('here -- %s'%j) + else: + qval_dict_[j][1] = ( aval.max() + aval.min() )/2 # np.mean(qval) + qwid_dict_[j][1] = abs( aval.max() - aval.min() ) + + return qval_dict_, qwid_dict_ + + + +def get_SG_norm( FD, pixelist, bins=1, mask=None, window_size= 11, order= 5 ): + '''Get normalization of a time series by SavitzkyGolay filter + Input: + FD: file handler for a compressed data + pixelist: pixel list for a roi_mask + bins: the bin number for the time series, if number = total number of the time frame, + it means SG of the time averaged image + mask: the additional mask + window_size, order, for the control of SG filter, see chx_generic_functions.py/sgolay2d for details + Return: + norm: shape as ( length of FD, length of pixelist ) + ''' + if mask == None: + mask = 1 + beg = FD.beg + end = FD.end + N = end-beg + BEG = beg + if bins==1: + END = end + NB = N + MOD=0 + else: + END = N//bins + MOD = N%bins + NB = END + norm = np.zeros( [ end, len(pixelist) ] ) + for i in tqdm( range( NB ) ): + if bins == 1: + img = FD.rdframe(i + BEG) + else: + for j in range( bins): + ct = i * bins + j + BEG + #print(ct) + if j==0: + img = FD.rdframe( ct ) + n = 1.0 + else: + (p,v) = FD.rdrawframe(ct) + np.ravel( img )[p] += v + #img += FD.rdframe( ct ) + n += 1 + img /= n + avg_imgf = sgolay2d( img, window_size= window_size, order= order) * mask + normi = np.ravel(avg_imgf)[pixelist] + if bins==1: + norm[i+beg] = normi + else: + norm[ i*bins+beg: (i+1)*bins+beg ] = normi + if MOD: + for j in range(MOD): + ct = (1+i) * bins + j + BEG + if j==0: + img = FD.rdframe( ct ) + n = 1.0 + else: + (p,v) = FD.rdrawframe(ct) + np.ravel( img )[p] += v + n += 1 + img /= n + #print(ct,n) + img = FD.rdframe( ct ) + avg_imgf = sgolay2d( img, window_size= window_size, order= order) * mask + normi = np.ravel(avg_imgf)[pixelist] + norm[ (i+1)*bins + beg: (i+2)*bins + beg ] = normi + return norm + +def shift_mask( new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None ): + '''Y.G. Dev April 2019@CHX to make a new roi_mask by shift and crop the old roi_mask, which is much bigger than the new mask + Input: + new_cen: [x,y] in uint of pixel + new_mask: provide the shape of the new roi_mask and also multiply this mask to the shifted mask + old_cen: [x,y] in uint of pixel + old_roi_mask: the roi_mask to be shifted + limit_qnum: integer, if not None, defines the max number of unique values of nroi_mask + + Output: + the shifted/croped roi_mask + ''' + nsx,nsy = new_mask.shape + down, up, left, right = new_cen[0], nsx - new_cen[0], new_cen[1], nsy - new_cen[1] + x1,x2,y1,y2 = [ old_cen[0] - down, old_cen[0] + up , old_cen[1] - left, old_cen[1] + right ] + nroi_mask_ = old_roi_mask[ x1:x2, y1:y2 ] * new_mask + nroi_mask = np.zeros_like( nroi_mask_ ) + qind, pixelist = roi.extract_label_indices(nroi_mask_) + qu = np.unique(qind) + #noqs = len( qu ) + #nopr = np.bincount(qind, minlength=(noqs+1))[1:] + #qm = nopr>0 + for j, qv in enumerate(qu): + nroi_mask[nroi_mask_ == qv] = j +1 + if limit_qnum != None: + nroi_mask[ nroi_mask > limit_qnum ]=0 + return nroi_mask + + +def plot_q_g2fitpara_general( g2_dict, g2_fitpara, geometry ='saxs', ylim = None, + plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False, + show_fit=True, ylabel='g2', qth_interest = None, max_plotnum_fig=1600,qphi_analysis=False, + *argv,**kwargs): + ''' + Mar 29,2019, Y.G.@CHX + + plot q~fit parameters + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + ''' + + if 'uid' in kwargs.keys(): + uid_ = kwargs['uid'] + else: + uid_ = 'uid' + if 'path' in kwargs.keys(): + path = kwargs['path'] + else: + path = '' + data_dir = path + if ylabel=='g2': + ylabel='g_2' + if ylabel=='g4': + ylabel='g_4' + + if geometry =='saxs': + if qphi_analysis: + geometry = 'ang_saxs' + + + qval_dict_, fit_res_ = g2_dict, g2_fitpara + + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + fps = [] + + #print(qr_label, qz_label, short_ulabel, long_ulabel) + #$print( num_short, num_long ) + beta, relaxation_rate, baseline, alpha = ( g2_fitpara['beta'], + g2_fitpara['relaxation_rate'], + g2_fitpara['baseline'], + g2_fitpara['alpha'] ) + + fps=[] + for s_ind in range( num_short ): + ind_long_i = ind_long[ s_ind ] + num_long_i = len( ind_long_i ) + betai, relaxation_ratei, baselinei, alphai = (beta[ind_long_i], relaxation_rate[ind_long_i], + baseline[ind_long_i], alpha[ind_long_i] ) + qi = long_ulabel + #print(s_ind, qi, np.array( betai) ) + + if RUN_GUI: + fig = Figure(figsize=(10, 12)) + else: + #fig = plt.figure( ) + if num_long_i <=4: + if master_plot != 'qz': + fig = plt.figure(figsize=(8, 6)) + else: + if num_short>1: + fig = plt.figure(figsize=(8, 4)) + else: + fig = plt.figure(figsize=(10, 6)) + #print('Here') + elif num_long_i > max_plotnum_fig: + num_fig = int(np.ceil(num_long_i/max_plotnum_fig)) #num_long_i //16 + fig = [ plt.figure(figsize=figsize) for i in range(num_fig) ] + #print( figsize ) + else: + #print('Here') + if master_plot != 'qz': + fig = plt.figure(figsize=figsize) + else: + fig = plt.figure(figsize=(10, 10)) + + if master_plot == 'qz': + if geometry=='ang_saxs': + title_short = 'Angle= %.2f'%( short_ulabel[s_ind] ) + r'$^\circ$' + elif geometry=='gi_saxs': + title_short = r'$Q_z= $' + '%.4f'%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' + else: + title_short = '' + else: #qr + if geometry=='ang_saxs' or geometry=='gi_saxs': + title_short = r'$Q_r= $' + '%.5f '%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' + else: + title_short='' + #print(geometry) + #filename ='' + til = '%s:--->%s'%(uid_, title_short ) + if num_long_i <=4: + plt.title( til,fontsize= 14, y =1.15) + else: + plt.title( til,fontsize=20, y =1.06) + #print( num_long ) + if num_long!=1: + #print( 'here') + plt.axis('off') + #sy = min(num_long_i,4) + sy = min(num_long_i, int( np.ceil( min(max_plotnum_fig,num_long_i)/4)) ) + + else: + sy =1 + sx = min(4, int( np.ceil( min(max_plotnum_fig,num_long_i)/float(sy) ) )) + temp = sy + sy = sx + sx = temp + if sx==1: + if sy==1: + plt.axis('on') + ax1 = fig.add_subplot( 4,1,1 ) + ax2 = fig.add_subplot( 4,1,2 ) + ax3 = fig.add_subplot( 4,1,3 ) + ax4 = fig.add_subplot( 4,1,4 ) + plot1D(x=qi, y=betai, m='o', ls='--', c='k', ax=ax1, legend=r'$\beta$', title='') + plot1D(x=qi, y=alphai, m='o', ls='--',c='r', ax=ax2, legend=r'$\alpha$', title='') + plot1D(x=qi, y=baselinei, m='o', ls='--', c='g', ax=ax3, legend=r'$baseline$', title='') + plot1D(x=qi, y=relaxation_ratei, m='o', c='b', ls='--', ax=ax4, legend= r'$\gamma$ $(s^{-1})$' , title='') + + ax4.set_ylabel( r'$\gamma$ $(s^{-1})$' ) + ax4.set_xlabel(r"$q $ $(\AA)$", fontsize=16) + ax3.set_ylabel( r'$baseline' ) + ax2.set_ylabel( r'$\alpha$' ) + ax1.set_ylabel( r'$\beta$' ) + fig.tight_layout() + fp = data_dir + uid_ + 'g2_q_fit_para_%s.png'%short_ulabel[s_ind] + fig.savefig( fp , dpi=fig.dpi) + fps.append(fp) + outputfile = data_dir + '%s_g2_q_fitpara_plot'%uid_ + '.png' + #print(uid) + combine_images( fps, outputfile, outsize= [ 2000,2400 ] ) + + + + + +def plot_q_rate_general( qval_dict, rate, geometry ='saxs', ylim = None, logq=True, lograte=True, + plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False, + show_fit=True, + *argv,**kwargs): + ''' + Mar 29,2019, Y.G.@CHX + + plot q~rate in log-log scale + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + ''' + + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + else: + uid = 'uid' + if 'path' in kwargs.keys(): + path = kwargs['path'] + else: + path = '' + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + fig,ax = plt.subplots() + plt.title(r'$Q$''-Rate-%s'%(uid),fontsize=20, y =1.06) + Nqz = num_short + if Nqz!=1: + ls = '--' + else: + ls='' + #print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[ i ] + y = np.array( rate )[ind_long_i] + x = long_label[ind_long_i] + #print(i, x, y, D0 ) + if Nqz!=1: + label=r'$q_z=%.5f$'%short_ulabel[i] + else: + label='' + ax.loglog(x, y, marker = 'o', ls =ls, label=label) + if Nqz!=1:legend = ax.legend(loc='best') + + if plot_index_range != None: + d1,d2 = plot_index_range + d2 = min( len(x)-1, d2 ) + ax.set_xlim( (x**power)[d1], (x**power)[d2] ) + ax.set_ylim( y[d1],y[d2]) + + if ylim != None: + ax.set_ylim( ylim ) + + ax.set_ylabel('Relaxation rate 'r'$\gamma$'"($s^{-1}$) (log)") + ax.set_xlabel("$q$"r'($\AA$) (log)') + fp = path + '%s_Q_Rate_loglog'%(uid) + '.png' + fig.savefig( fp, dpi=fig.dpi) + fig.tight_layout() + if return_fig: + return fig,ax + + + +def plot_xy_x2( x, y, x2=None, pargs=None, loglog=False, logy=True, fig_ax=None, + xlabel= 'q ('r'$\AA^{-1}$)', xlabel2='q (pixel)', title= '_q_Iq', + ylabel = 'I(q)',save=True, *argv,**kwargs): + '''YG.@CHX 2019/10/ Plot x, y, x2, if have, will plot as twiny( same y, different x) + This funciton is primary for plot q-Iq + + Input: + x: one-d array, x in one unit + y: one-d array, + x2:one-d array, x in anoter unit + pargs: dict, could include 'uid', 'path' + loglog: if True, if plot x and y in log, by default plot in y-log + save: if True, save the plot in the path defined in pargs + kwargs: could include xlim (in unit of index), ylim (in unit of real value) + + ''' + if fig_ax == None: + fig, ax1 = plt.subplots() + else: + fig,ax1=fig_ax + if pargs != None: + uid = pargs['uid'] + path = pargs['path'] + else: + uid='XXX' + path='' + if loglog: + ax1.loglog( x,y, '-o') + elif logy: + ax1.semilogy( x,y, '-o') + else: + ax1.plot( x,y, '-o') + ax1.set_xlabel( xlabel ) + ax1.set_ylabel( ylabel ) + title = ax1.set_title( '%s--'%uid + title) + Nx= len(x) + if 'xlim' in kwargs.keys(): + xlim = kwargs['xlim'] + if xlim[1]>Nx: + xlim[1]=Nx-1 + else: + xlim=[ 0, Nx] + if 'ylim' in kwargs.keys(): + ylim = kwargs['ylim'] + else: + ylim=[y.min(), y.max()] + lx1,lx2=xlim + ax1.set_xlim( [ x[lx1], x[lx2] ] ) + ax1.set_ylim( ylim ) + if x2 != None: + ax2 = ax1.twiny() + ax2.set_xlabel( xlabel2 ) + ax2.set_ylabel( ylabel ) + ax2.set_xlim( [ x2[lx1], x2[lx2] ] ) + title.set_y(1.1) + fig.subplots_adjust(top=0.85) + if save: + path = pargs['path'] + fp = path + '%s_q_Iq'%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + + + + +def save_oavs_tifs( uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1,threshold = 0 ): + '''save oavs as png''' + tifs = list( db[uid].data( 'OAV_image') )[0] + try: + pixel_scalebar=np.ceil(scalebar_size/md['OAV resolution um_pixel']) + except: + pixel_scalebar=None + print('No OAVS resolution is available.') + + text_string='%s $\mu$m'%scalebar_size + h = db[uid] + oavs=tifs + + # 12/03/2023: have a problem with OAV not being detector [0]...just try and go throught the list + detectors = sorted(get_detectors(h)) + for d in range(len(detectors)): + try: + oav_period=h['descriptors'][d]['configuration']['OAV']['data']['OAV_cam_acquire_period'] + oav_expt=h['descriptors'][d]['configuration']['OAV']['data']['OAV_cam_acquire_time'] + except: + pass + oav_times=[] + for i in range(len(oavs)): + oav_times.append(oav_expt+i*oav_period) + fig=plt.subplots(int(np.ceil(len(oavs)/3)),3,figsize=(3*5.08,int(np.ceil(len(oavs)/3))*4)) + for m in range(len(oavs)): + plt.subplot(int(np.ceil(len(oavs)/3)),3,m+1) + #plt.subplots(figsize=(5.2,4)) + img = oavs[m] + try: + ind = np.flipud(img*scale)[:,:,2] < threshold + except: + ind = np.flipud(img*scale) < threshold + rgb_cont_img=np.copy(np.flipud(img)) + #rgb_cont_img[ind,0]=1000 + if brightness_scale !=1: + rgb_cont_img=scale_rgb(rgb_cont_img,scale=brightness_scale) + + plt.imshow(rgb_cont_img,interpolation='none',resample=True, cmap = 'gray') + plt.axis('equal') + cross=[685,440,50] # definintion of direct beam: x, y, size + plt.plot([cross[0]-cross[2]/2,cross[0]+cross[2]/2],[cross[1],cross[1]],'r-') + plt.plot([cross[0],cross[0]],[cross[1]-cross[2]/2,cross[1]+cross[2]/2],'r-') + if pixel_scalebar != None: + plt.plot([1100,1100+pixel_scalebar],[150,150],'r-',Linewidth=5) # scale bar. + plt.text(1000,50,text_string,fontsize=14,color='r') + plt.text(600,50,str(oav_times[m])[:5]+' [s]',fontsize=14,color='r') + plt.axis('off') + plt.savefig( data_dir + 'uid=%s_OVA_images.png'%uid) + + + + + +def shift_mask_old( mask, shiftx, shifty): + '''YG Dev Feb 4@CHX create new mask by shift mask in x and y direction with unit in pixel + Input: + mask: int-type array, + shiftx: int scalar, shift value in x direction with unit in pixel + shifty: int scalar, shift value in y direction with unit in pixel + Output: + maskn: int-type array, shifted mask + + ''' + qind, pixelist = roi.extract_label_indices( mask ) + dims = mask.shape + imgwidthy = dims[1] #dimension in y, but in plot being x + imgwidthx = dims[0] #dimension in x, but in plot being y + pixely = pixelist%imgwidthy + pixelx = pixelist//imgwidthy + pixelyn = pixely + shiftx + pixelxn = pixelx + shifty + w = (pixelyn < imgwidthy ) & (pixelyn >= 0 ) & (pixelxn < imgwidthx ) & (pixelxn >= 0 ) + pixelist_new = pixelxn[w] * imgwidthy + pixelyn[w] + maskn = np.zeros_like( mask ) + maskn.ravel()[pixelist_new] = qind[w] + return maskn + + +def get_current_time(): + '''get current time in a fomart of year/month/date/hour(24)/min/sec/, + e.g. 2009-01-05 22:14:39 + ''' + loc_dt = datetime.now(pytz.timezone('US/Eastern')) + fmt = "%Y-%m-%d %H:%M:%S" + return loc_dt.strftime(fmt) + + + +def evalue_array( array, verbose = True ): + '''Y.G., Dev Nov 1, 2018 Get min, max, avg, std of an array ''' + _min, _max, avg, std = np.min( array), np.max( array), np.average( array ), np.std( array ) + if verbose: + print( 'The min, max, avg, std of this array are: %s %s %s %s, respectively.'%(_min, _max, avg, std ) ) + return _min, _max, avg, std + + + +def find_good_xpcs_uids( fuids, Nlim=100, det = [ '4m', '1m', '500'] ): + '''Y.G., Dev Nov 1, 2018 Find the good xpcs series + Input: + fuids: list, a list of full uids + Nlim: integer, the smallest number of images to be considered as XCPS sereis + det: list, a list of detector (can be short string of the full name of the detector) + Return: + the xpcs uids list + + ''' + guids = [] + for i, uid in enumerate(fuids): + if db[uid]['start']['plan_name'] == 'count' or db[uid]['start']['plan_name'] == 'manual_count': + head = db[uid]['start'] + for dec in head['detectors']: + for dt in det: + if dt in dec: + if 'number of images' in head: + if float(head['number of images'] ) >= Nlim: + #print(i, uid) + guids.append(uid) + G = np.unique( guids ) + print('Found %s uids for XPCS series.'%len(G) ) + return G + + +def create_fullImg_with_box( shape, box_nx = 9 , box_ny = 8, ): + '''Y.G. 2018/10/26 Divide image with multi touched boxes + Input + shape: the shape of image + box_nx: the number of box in x + box_ny: the number width of box in y + Return: + roi_mask, (* mask ) + ''' + + #shape = mask.shape + Wrow, Wcol = int( np.ceil( shape[0]/box_nx )), int(np.ceil(shape[1]/box_ny) ) + #print(Wrow, Wcol) + roi_mask = np.zeros( shape, dtype=np.int32 ) + for i in range( box_nx ): + for j in range(box_ny): + roi_mask[ i*Wrow: (i+1)*Wrow , j*Wcol: (j+1)*Wcol ] = i * box_ny + j + 1 + #roi_mask *= mask + return roi_mask + + + +def get_refl_y0( inc_ang, inc_y0, Ldet, pixel_size, ): + ''' Get reflection beam center y + Input: + inc_ang: incident angle in degree + inc_y0: incident beam y center in pixel + Ldet: sample to detector distance in meter + pixel_size: pixel size in meter + Return: reflection beam center y in pixel + ''' + return Ldet * np.tan( np.radians(inc_ang)) * 2 / pixel_size + inc_y0 + + +def lin2log_g2(lin_tau,lin_g2,num_points=False): + """ + Lutz developed at Aug,2018 + function to resample g2 with linear time steps into logarithmics + g2 values between consecutive logarthmic time steps are averaged to increase statistics + calling sequence: lin2log_g2(lin_tau,lin_g2,num_points=False) + num_points=False -> determine number of logortihmically sampled time points automatically (8 pts./decade) + num_points=18 -> use 18 logarithmically spaced time points + """ + #prep taus and g2s: remove nan and first data point at tau=0 + rem = lin_tau==0 + #print('lin_tau: '+str(lin_tau.size)) + #print('lin_g2: '+str(lin_g2.size)) + lin_tau[rem]=np.nan + #lin_tau[0]=np.nan;#lin_g2[0]=np.nan + lin_g2 = lin_g2[np.isfinite(lin_tau)] + lin_tau = lin_tau[np.isfinite(lin_tau)] + #print('from lin-to-log-g2_sampling: ',lin_tau) + if num_points == False: + # automatically decide how many log-points (8/decade) + dec=int(np.ceil((np.log10(lin_tau.max())-np.log10(lin_tau.min()))*8)) + else: + dec=int(num_points) + log_tau=np.logspace(np.log10(lin_tau[0]),np.log10(lin_tau.max()),dec) + # re-sample correlation function: + log_g2=[] + for i in range(log_tau.size-1): + y=[i,log_tau[i]-(log_tau[i+1]-log_tau[i])/2,log_tau[i]+(log_tau[i+1]-log_tau[i])/2] + #x=lin_tau[lin_tau>y[1]] + x1=lin_tau>y[1]; x2=lin_tauy[1]; x2=lin_tau peak has to be taller factor 2 above background) + replot: if True, will plot data (if error func) with the fit and peak/cen/com position + logplot: if on, will plot in log scale + x: if not None, give x-data + + + ''' + if x == None: + x = np.arange( len(y) ) + x=np.array(x) + y=np.array(y) + + PEAK=x[np.argmax(y)] + PEAK_y=np.max(y) + COM=np.sum(x * y) / np.sum(y) + ### from Maksim: assume this is a peak profile: + def is_positive(num): + return True if num > 0 else False + # Normalize values first: + ym = (y - np.min(y)) / (np.max(y) - np.min(y)) - shift # roots are at Y=0 + positive = is_positive(ym[0]) + list_of_roots = [] + for i in range(len(y)): + current_positive = is_positive(ym[i]) + if current_positive != positive: + list_of_roots.append(x[i - 1] + (x[i] - x[i - 1]) / (abs(ym[i]) + abs(ym[i - 1])) * abs(ym[i - 1])) + positive = not positive + if len(list_of_roots) >= 2: + FWHM=abs(list_of_roots[-1] - list_of_roots[0]) + CEN=list_of_roots[0]+0.5*(list_of_roots[1]-list_of_roots[0]) + ps.fwhm=FWHM + ps.cen=CEN + yf=ym + #return { + # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), + # 'x_range': list_of_roots, + #} + else: # ok, maybe it's a step function.. + #print('no peak...trying step function...') + ym = ym + shift + def err_func(x, x0, k=2, A=1, base=0 ): #### erf fit from Yugang + return base - A * erf(k*(x-x0)) + mod = Model( err_func ) + ### estimate starting values: + x0=np.mean(x) + #k=0.1*(np.max(x)-np.min(x)) + pars = mod.make_params( x0=x0, k=2, A = 1., base = 0. ) + result = mod.fit(ym, pars, x = x ) + CEN=result.best_values['x0'] + FWHM = result.best_values['k'] + A = result.best_values['A'] + b = result.best_values['base'] + yf_ = err_func(x, CEN, k=FWHM, A=A, base=b ) #result.best_fit + yf = (yf_ ) * (np.max(y) - np.min(y)) + np.min(y) + + #(y - np.min(y)) / (np.max(y) - np.min(y)) - shift + + + ps.cen = CEN + ps.fwhm = FWHM + + if replot: + ### re-plot results: + if logplot=='on': + fig, ax = plt.subplots() #plt.figure() + ax.semilogy([PEAK,PEAK],[np.min(y),np.max(y)],'k--',label='PEAK') + ax.hold(True) + ax.semilogy([CEN,CEN],[np.min(y),np.max(y)],'r-.',label='CEN') + ax.semilogy([COM,COM],[np.min(y),np.max(y)],'g.-.',label='COM') + ax.semilogy(x,y,'bo-') + #plt.xlabel(field);plt.ylabel(intensity_field) + ax.legend() + #plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + #plt.show() + else: + #plt.close(999) + fig, ax = plt.subplots() #plt.figure() + ax.plot([PEAK,PEAK],[np.min(y),np.max(y)],'k--',label='PEAK') + + #ax.hold(True) + ax.plot([CEN,CEN],[np.min(y),np.max(y)],'m-.',label='CEN') + ax.plot([COM,COM],[np.min(y),np.max(y)],'g.-.',label='COM') + ax.plot(x,y,'bo--') + ax.plot(x,yf,'r-', label='Fit') + + #plt.xlabel(field);plt.ylabel(intensity_field) + ax.legend() + #plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + #plt.show() + + ### assign values of interest as function attributes: + ps.peak=PEAK + ps.com=COM + return ps.cen + + + + + + + + + +def create_seg_ring( ring_edges, ang_edges, mask, setup_pargs ): + '''YG Dev April 6, 2018 + Create segment ring mask + Input: + ring_edges: edges of rings (in pixel), e.g., [ [320,340], [450, 460], ] + ang_edges: edges of angles, e.g., [ [20,40], [50, 60], ] + mask: bool type 2D array + set_pargs: dict, should at least contains, center + e.g., + {'Ldet': 1495.0, abs #essential + 'center': [-4469, 363], #essential + 'dpix': 0.075000003562308848, #essential + 'exposuretime': 0.99999702, + 'lambda_': 0.9686265, #essential + 'path': '/XF11ID/analysis/2018_1/jianheng/Results/b85dad/', + 'timeperframe': 1.0, + 'uid': 'uid=b85dad'} + Return: + roi_mask: segmented ring mask: two-D array + qval_dict: dict, key as q-number, val: q val + + ''' + + roi_mask_qr, qr, qr_edge = get_ring_mask(mask, inner_radius= None, outer_radius = None, + width = None, num_rings = None, edges= np.array( ring_edges), unit='pixel', + pargs= setup_pargs) + + roi_mask_ang, ang_center, ang_edge = get_angular_mask( mask, inner_angle= None, + outer_angle = None, width = None, edges = np.array( ang_edges ), + num_angles = None, center = center, flow_geometry= False ) + + + roi_mask, good_ind = combine_two_roi_mask( roi_mask_qr, roi_mask_ang,pixel_num_thres=100) + qval_dict_ = get_qval_dict( qr_center = qr, qz_center = ang_center,one_qz_multi_qr=False) + qval_dict = { i:qval_dict_[k] for (i,k) in enumerate( good_ind) } + return roi_mask, qval_dict + + + + +def find_bad_pixels_FD( bad_frame_list, FD, img_shape = [514, 1030], + threshold= 15, show_progress=True): + '''Designed to find bad pixel list in 500K + threshold: the max intensity in 5K + ''' + bad = np.zeros( img_shape, dtype=bool ) + if show_progress: + for i in tqdm(bad_frame_list[ bad_frame_list>=FD.beg]): + p,v = FD.rdrawframe(i) + w = np.where( v > threshold)[0] + bad.ravel()[ p[w] ] = 1 + # x,y = np.where( imgsa[i] > threshold) + # bad[x[0],y[0]] = 1 + else: + for i in bad_frame_list[ bad_frame_list>=FD.beg]: + p,v = FD.rdrawframe(i) + w = np.where( v > threshold)[0] + bad.ravel()[ p[w] ] = 1 + + return ~bad + + +def get_q_iq_using_dynamic_mask( FD, mask, setup_pargs, bin_number=1, threshold=15 ): + '''DEV by Yugang@CHX, June 6, 2019 + Get circular average of a time series using a dynamics mask, which pixel values are defined as + zeors if above a threshold. + Return an averaged q(pix)-Iq-q(A-1) of the whole time series using bin frames with bin_number + Input: + FD: the multifile handler for the time series + mask: a two-d bool type array + setup_pargs: dict, parameters of setup for calculate q-Iq + should have keys as + 'dpix', 'Ldet','lambda_', 'center' + bin_number: bin number of the frame + threshold: define the dynamics mask, which pixel values are defined as + zeors if above this threshold + Output: + qp_saxs: q in pixel + iq_saxs: intenstity + q_saxs: q in A-1 + ''' + beg = FD.beg + end = FD.end + shape = FD.rdframe(beg).shape + Nimg_ = FD.end-FD.beg + #Nimg_ = 100 + Nimg = Nimg_//bin_number + time_edge = np.array(create_time_slice( N= Nimg_, + slice_num= Nimg, slice_width= bin_number )) + beg + for n in tqdm( range(Nimg) ): + t1,t2 = time_edge[n] + #print(t1,t2) + if bin_number==1: + avg_imgi = FD.rdframe(t1) + else: + avg_imgi = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1, + plot_ = False,show_progress= False) + badpi = find_bad_pixels_FD( np.arange(t1,t2) , FD, + img_shape = avg_imgi.shape, threshold= threshold, show_progress=False ) + img = avg_imgi* mask * badpi + qp_saxsi, iq_saxsi, q_saxsi = get_circular_average( img, + mask * badpi, save= False, + pargs=setup_pargs ) + #print( img.max()) + if t1==FD.beg: + qp_saxs, iq_saxs, q_saxs = np.zeros_like( qp_saxsi ), np.zeros_like( iq_saxsi ), np.zeros_like( q_saxsi ) + qp_saxs += qp_saxsi + iq_saxs += iq_saxsi + q_saxs += q_saxsi + qp_saxs /= Nimg + iq_saxs /= Nimg + q_saxs /= Nimg + + return qp_saxs, iq_saxs, q_saxs + +def get_waxs_beam_center( gamma, origin = [432, 363], Ldet = 1495, pixel_size = 75 * 1e-3 ): + '''YG Feb 10, 2018 + Calculate beam center for WAXS geometry by giving beam center at gamma=0 and the target gamma + Input: + gamma: angle in degree + Ldet: sample to detector distance, 1495 mm for CHX WAXS + origin: beam center for gamma = 0, (python x,y coordinate in pixel) + pxiel size: 75 * 1e-3 mm for Eiger 1M + output: + beam center: for the target gamma, in pixel + ''' + return [ int( origin[0] + np.tan( np.radians(gamma)) * Ldet/pixel_size) ,origin[1] ] + + + +def get_img_from_iq( qp, iq, img_shape, center): + '''YG Jan 24, 2018 + Get image from circular average + Input: + qp: q in pixel unit + iq: circular average + image_shape, e.g., [256,256] + center: [center_y, center_x] e.g., [120, 200] + Output: + img: recovered image + ''' + pixelist = np.arange( img_shape[0] * img_shape[1] ) + pixely = pixelist%img_shape[1] -center[1] + pixelx = pixelist//img_shape[1] - center[0] + r= np.hypot(pixelx, pixely) #leave as float. + #r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 + return (np.interp( r, qp, iq )).reshape( img_shape ) + + +def average_array_withNan( array, axis=0, mask=None): + '''YG. Jan 23, 2018 + Average array invovling np.nan along axis + + Input: + array: ND array, actually should be oneD or twoD at this stage..TODOLIST for ND + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + avg: averaged array along axis + ''' + shape = array.shape + if mask == None: + mask = np.isnan(array) + #mask = np.ma.masked_invalid(array).mask + array_ = np.ma.masked_array(array, mask=mask) + try: + sums = np.array( np.ma.sum( array_[:,:], axis= axis ) ) + except: + sums = np.array( np.ma.sum( array_[:], axis= axis ) ) + + cts = np.sum(~mask,axis=axis) + #print(cts) + return sums/cts + +def deviation_array_withNan( array, axis=0, mask=None): + '''YG. Jan 23, 2018 + Get the deviation of array invovling np.nan along axis + + Input: + array: ND array + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + dev: the deviation of array along axis + ''' + avg2 = average_array_withNan( array**2, axis = axis, mask = mask ) + avg = average_array_withNan( array, axis = axis, mask = mask ) + return np.sqrt( avg2 - avg**2 ) + + + +def refine_roi_mask( roi_mask, pixel_num_thres=10): + '''YG Dev Jan20,2018 + remove bad roi which pixel numbe is lower pixel_num_thres + roi_mask: array, + pixel_num_thres: integer, the low limit pixel number in each roi of the combined mask, + i.e., if the pixel number in one roi of the combined mask smaller than pixel_num_thres, + that roi will be considered as bad one and be removed. + ''' + new_mask = np.zeros_like( roi_mask ) + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + good_ind = np.where( nopr >= pixel_num_thres)[0] +1 + l = len(good_ind) + new_ind = np.arange( 1, l+1 ) + for i, gi in enumerate( good_ind ): + new_mask.ravel()[ + np.where( roi_mask.ravel() == gi)[0] ] = new_ind[i] + return new_mask, good_ind -1 + +def shrink_image_stack( imgs, bins): + '''shrink imgs by bins + imgs: shape as [Nimg, imx, imy] ''' + Nimg, imx, imy = imgs.shape + bx, by = bins + imgsk = np.zeros( [Nimg, imx//bx, imy//by] ) + N = len(imgs) + for i in range(N): + imgsk[i] = shrink_image(imgs[i], bins ) + return imgsk + +def shrink_image(img, bins ): + '''YG Dec 12, 2017 dev@CHX shrink a two-d image by factor as bins, i.e., bins_x, bins_y + input: + img: 2d array, + bins: integer list, eg. [2,2] + output: + imgb: binned img + ''' + m,n = img.shape + bx, by = bins + Nx, Ny = m//bx, n//by + #print(Nx*bx, Ny*by) + return img[:Nx*bx, :Ny*by].reshape( Nx,bx, Ny, by).mean(axis=(1,3) ) + + +def get_diff_fv( g2_fit_paras, qval_dict, ang_init=137.2): + '''YG@CHX Nov 9,2017 + Get flow velocity and diff from g2_fit_paras ''' + g2_fit_para_ = g2_fit_paras.copy() + qr = np.array( [qval_dict[k][0] for k in sorted( qval_dict.keys())] ) + qang = np.array( [qval_dict[k][1] for k in sorted( qval_dict.keys())] ) + #x=g2_fit_para_.pop( 'relaxation_rate' ) + #x=g2_fit_para_.pop( 'flow_velocity' ) + g2_fit_para_['diff'] = g2_fit_paras[ 'relaxation_rate' ]/qr**2 + cos_part = np.abs( np.cos( np.radians( qang - ang_init)) ) + g2_fit_para_['fv'] = g2_fit_paras[ 'flow_velocity' ]/cos_part/qr + return g2_fit_para_ + + + + +# function to get indices of local extrema (=indices of speckle echo maximum amplitudes): +def get_echos(dat_arr,min_distance=10): + """ + getting local maxima and minima from 1D data -> e.g. speckle echos + strategy: using peak_local_max (from skimage) with min_distance parameter to find well defined local maxima + using np.argmin to find absolute minima between relative maxima + returns [max_ind,min_ind] -> lists of indices corresponding to local maxima/minima + by LW 10/23/2018 + """ + from skimage.feature import peak_local_max + max_ind=peak_local_max(dat_arr, min_distance) # !!! careful, skimage function reverses the order (wtf?) + min_ind=[] + for i in range(len(max_ind[:-1])): + min_ind.append(max_ind[i+1][0]+np.argmin(dat_arr[max_ind[i+1][0]:max_ind[i][0]])) + #unfortunately, skimage function fu$$s up the format: max_ind is an array of a list of lists...fix this: + mmax_ind=[] + for l in max_ind: + mmax_ind.append(l[0]) + #return [mmax_ind,min_ind] + return [list(reversed(mmax_ind)),list(reversed(min_ind))] + + +def pad_length(arr,pad_val=np.nan): + """ + arr: 2D matrix + pad_val: values being padded + adds pad_val to each row, to make the length of each row equal to the lenght of the longest row of the original matrix + -> used to convert python generic data object to HDF5 native format + function fixes python bug in padding (np.pad) integer array with np.nan + update June 2023: remove use of np.shape and np.size that doesn't work (anymore?) on arrays with inhomogenous size + by LW 12/30/2017 + """ + max_len=[] + for i in range(len(arr)): + max_len.append([len(arr[i])]) + max_len=np.max(max_len) + for l in range(len(arr)): + arr[l]=np.pad(arr[l]*1.,(0,max_len-np.size(arr[l])),mode='constant',constant_values=pad_val) + return arr + + + +def save_array_to_tiff(array, output, verbose=True): + '''Y.G. Nov 1, 2017 + Save array to a tif file + ''' + img = PIL.Image.fromarray(array) + img.save( output ) + if verbose: + print( 'The data is save to: %s.'%( output )) + + + +def load_pilatus(filename): + '''Y.G. Nov 1, 2017 + Load a pilatus 2D image + ''' + return np.array( PIL.Image.open(filename).convert('I') ) + +def ls_dir(inDir, have_list=[], exclude_list=[] ): + '''Y.G. Aug 1, 2019 + List all filenames in a filefolder + inDir: fullpath of the inDir + have_string: only retrun filename containing the string + exclude_string: only retrun filename not containing the string + + ''' + from os import listdir + from os.path import isfile, join + + tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] ) + tifs_ = [] + for tif in tifs: + flag=1 + for string in have_list: + if string not in tif: + flag *=0 + for string in exclude_list: + if string in tif: + flag *=0 + if flag: + tifs_.append( tif ) + + return np.array( tifs_ ) + + +def ls_dir2(inDir, string=None): + '''Y.G. Nov 1, 2017 + List all filenames in a filefolder (not include hidden files and subfolders) + inDir: fullpath of the inDir + string: if not None, only retrun filename containing the string + ''' + from os import listdir + from os.path import isfile, join + if string == None: + tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] ) + else: + tifs = np.array( [f for f in listdir(inDir) if (isfile(join(inDir, f)))&(string in f) ] ) + return tifs + +def re_filename( old_filename, new_filename, inDir=None, verbose=True ): + '''Y.G. Nov 28, 2017 + Rename old_filename with new_filename in a inDir + inDir: fullpath of the inDir, if None, the filename should have the fullpath + old_filename/ new_filename: string + an example: + re_filename( 'uid=run20_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', + 'uid=run17_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', + '/home/yuzhang/Analysis/Timepix/2017_3/Results/run17/run17_pos1/' + ) + ''' + if inDir != None: + os.rename(inDir + old_filename, inDir+new_filename) + else: + os.rename( old_filename, new_filename) + print('The file: %s is changed to: %s.'%(old_filename, new_filename)) + + +def re_filename_dir( old_pattern, new_pattern, inDir,verbose=True ): + '''Y.G. Nov 28, 2017 + Rename all filenames with old_pattern with new_pattern in a inDir + inDir: fullpath of the inDir, if None, the filename should have the fullpath + old_pattern, new_pattern + an example, + re_filename_dir('20_', '17_', inDir ) + ''' + fps = ls_dir(inDir) + for fp in fps: + if old_pattern in fp: + old_filename = fp + new_filename = fp.replace(old_pattern, new_pattern) + re_filename( old_filename, new_filename, inDir,verbose= verbose ) + +def get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False,q_thresh=0, p_thresh=0, silent=True, qprecision=5): + """ + function to return roi number from qval_dict, corresponding Q and phi, lists (sets) of all available Qs and phis + [roi_nr,Q,phi,Q_list,phi_list]=get_roi_nr(..) + calling sequence: get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False, verbose=True) + qdict: qval_dict from analysis pipeline/hdf5 result file + q: q of interest, can be either value (q_nr=False) or q-number (q_nr=True) + q_thresh: threshold for comparing Q-values, set to 0 for exact comparison + phi: phi of interest, can be either value (phi_nr=False) or q-number (phi_nr=True) + p_thresh: threshold for comparing phi values, set to 0 for exact comparison + silent=True/False: Don't/Do print lists of available qs and phis, q and phi of interest + by LW 10/21/2017 + update by LW 08/22/2018: introduced thresholds for comparison of Q and phi values (before: exact match required) + update 2019/09/28 add qprecision to get unique Q + update 2020/3/12 explicitly order input dictionary to fix problem with environments >= 2019-3.0.1 + """ + import collections + from collections import OrderedDict + qdict = collections.OrderedDict(sorted(qdict.items())) + qs=[] + phis=[] + for i in qdict.keys(): + qs.append(qdict[i][0]) + phis.append(qdict[i][1]) + qslist=list(OrderedDict.fromkeys(qs)) + qslist = np.unique( np.round(qslist, qprecision ) ) + phislist=list(OrderedDict.fromkeys(phis)) + qslist=list(np.sort(qslist)) + phislist=list(np.sort(phislist)) + if q_nr: + qinterest=qslist[q] + qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] + else: + qinterest=q + qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] # new + if phi_nr: + phiinterest=phislist[phi] + phiindices = [i for i,x in enumerate(phis) if x == phiinterest] + else: + phiinterest=phi + phiindices = [i for i,x in enumerate(phis) if np.abs(x-phiinterest) < p_thresh] # new + ret_list=[list(set(qindices).intersection(phiindices))[0],qinterest,phiinterest,qslist,phislist] #-> this is the original + if silent == False: + print('list of available Qs:') + print(qslist) + print('list of available phis:') + print(phislist) + print('Roi number for Q= '+str(ret_list[1])+' and phi= '+str(ret_list[2])+': '+str(ret_list[0])) + return ret_list + +def get_fit_by_two_linear(x,y, mid_xpoint1, mid_xpoint2=None, xrange=None, ): + '''YG Octo 16,2017 Fit a curve with two linear func, the curve is splitted by mid_xpoint, + namely, fit the curve in two regions defined by (xmin,mid_xpoint ) and (mid_xpoint2, xmax) + Input: + x: 1D np.array + y: 1D np.array + mid_xpoint: float, the middle point of x + xrange: [x1,x2] + Return: + D1, gmfit1, D2, gmfit2 : + fit parameter (slope, background) of linear fit1 + convinent fit class, gmfit1(x) gives yvale + fit parameter (slope, background) of linear fit2 + convinent fit class, gmfit2(x) gives yvale + + ''' + if xrange == None: + x1,x2 = min(x), max(x) + x1,x2=xrange + if mid_xpoint2 == None: + mid_xpoint2= mid_xpoint1 + D1, gmfit1 = linear_fit( x,y, xrange= [ x1,mid_xpoint1 ]) + D2, gmfit2 = linear_fit( x,y, xrange= [mid_xpoint2, x2 ]) + return D1, gmfit1, D2, gmfit2 + +def get_cross_point( x, gmfit1, gmfit2 ): + '''YG Octo 16,2017 + Get croess point of two curve + ''' + y1 = gmfit1(x) + y2 = gmfit2(x) + return x[np.argmin( np.abs(y1-y2) )] + +def get_curve_turning_points( x, y, mid_xpoint1, mid_xpoint2=None, xrange=None, ): + '''YG Octo 16,2017 + Get a turning point of a curve by doing a two-linear fit + ''' + D1, gmfit1, D2, gmfit2 = get_fit_by_two_linear(x,y, mid_xpoint1, mid_xpoint2, xrange ) + return get_cross_point( x, gmfit1, gmfit2 ) + + +def plot_fit_two_linear_fit(x,y, gmfit1, gmfit2, ax=None ): + '''YG Octo 16,2017 Plot data with two fitted linear func + ''' + if ax == None: + fig, ax =plt.subplots() + plot1D( x = x, y = y, ax =ax, c='k', legend='data', m='o', ls='')#logx=True, logy=True ) + plot1D( x = x, y = gmfit1(x), ax =ax, c='r', m='', ls='-',legend='fit1' ) + plot1D( x = x, y = gmfit2(x), ax =ax, c='b', m='', ls='-',legend='fit2' ) + return ax + + +def linear_fit( x,y, xrange=None): + '''YG Octo 16,2017 copied from XPCS_SAXS + a linear fit + ''' + if xrange != None: + xmin, xmax = xrange + x1,x2 = find_index( x,xmin,tolerance= None),find_index( x,xmax,tolerance= None) + x_ = x[x1:x2] + y_ = y[x1:x2] + else: + x_=x + y_=y + D0 = np.polyfit(x_, y_, 1) + gmfit = np.poly1d(D0) + return D0, gmfit + + +def find_index( x,x0,tolerance= None): + '''YG Octo 16,2017 copied from SAXS + find index of x0 in x + #find the position of P in a list (plist) with tolerance + ''' + + N=len(x) + i=0 + if x0 > max(x): + position= len(x) -1 + elif x0 max(x): + position= len(x) -1 + elif x0 di: + try: + els = line.split() + if good_cols == None: + temp = np.array( els, dtype=float ) + else: + temp= np.array( [els[j] for j in good_cols], dtype=float ) + data=np.vstack( (data,temp)) + except: + pass + if labels == None: + labels = np.arange(data.shape[1]) + df = pds.DataFrame( data, index= np.arange(data.shape[0]), columns= labels ) + return df + + + +def get_print_uids( start_time, stop_time, return_all_info=False): + '''Update Feb 20, 2018 also return full uids + YG. Octo 3, 2017@CHX + Get full uids and print uid plus Measurement contents by giving start_time, stop_time + + ''' + hdrs = list( db(start_time= start_time, stop_time = stop_time) ) + fuids = np.zeros( len(hdrs),dtype=object) + uids = np.zeros( len(hdrs),dtype=object) + sids = np.zeros( len(hdrs), dtype=object) + n=0 + all_info = np.zeros( len(hdrs), dtype=object) + for i in range(len(hdrs)): + fuid = hdrs[-i-1]['start']['uid'] #reverse order + uid = fuid[:6] #reverse order + sid = hdrs[-i-1]['start']['scan_id'] + fuids[n]=fuid + uids[n]=uid + sids[n]=sid + date = time.ctime(hdrs[-i-1]['start']['time']) + try: + m = hdrs[-i-1]['start']['Measurement'] + except: + m='' + info = "%3d: uid = '%s' ##%s #%s: %s-- %s "%(i,uid,date,sid,m, fuid) + print( info ) + if return_all_info: + all_info[n]=info + n +=1 + if not return_all_info: + return fuids, uids, sids + else: + return fuids, uids, sids, all_info + + + +def get_last_uids( n=-1 ): + '''YG Sep 26, 2017 + A Convinient function to copy uid to jupyter for analysis''' + uid = db[n]['start']['uid'][:8] + sid = db[n]['start']['scan_id'] + m = db[n]['start']['Measurement'] + return " uid = '%s' #(scan num: %s (Measurement: %s "%(uid,sid,m) + + + +def get_base_all_filenames( inDir, base_filename_cut_length = -7 ): + '''YG Sep 26, 2017 + Get base filenames and their related all filenames + Input: + inDir, str, input data dir + base_filename_cut_length: to which length the base name is unique + Output: + dict: keys, base filename + vales, all realted filename + ''' + from os import listdir + from os.path import isfile, join + tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] ) + tifsc = list(tifs.copy()) + utifs = np.sort( np.unique( np.array([ f[:base_filename_cut_length] for f in tifs] ) ) )[::-1] + files = {} + for uf in utifs: + files[uf] = [] + i = 0 + reName = [] + for i in range(len(tifsc)): + if uf in tifsc[i]: + files[uf].append( tifsc[i] ) + reName.append(tifsc[i]) + for fn in reName: + tifsc.remove(fn) + return files + + +def create_ring_mask( shape, r1, r2, center, mask=None): + '''YG. Sep 20, 2017 Develop@CHX + Create 2D ring mask + input: + shape: two integer number list, mask shape, e.g., [100,100] + r1: the inner radius + r2: the outer radius + center: two integer number list, [cx,cy], ring center, e.g., [30,50] + output: + 2D numpy array, 0,1 type + ''' + + m = np.zeros( shape, dtype= bool) + rr,cc = disk((center[1], center[0]), r2, shape=shape ) + m[rr,cc] = 1 + rr,cc = disk((center[1], center[0]), r1,shape=shape ) + m[rr,cc] = 0 + if mask != None: + m += mask + return m + +def get_image_edge(img): + ''' + Y.G. Developed at Sep 8, 2017 @CHX + Get sharp edges of an image + img: two-D array, e.g., a roi mask + ''' + edg_ = prewitt(img/1.0) + edg = np.zeros_like(edg_) + w = np.where(edg_ > 1e-10) + edg[w] = img[w] + edg[np.where(edg==0)] = 1 + return edg + +def get_image_with_roi( img, roi_mask, scale_factor = 2): + ''' + Y.G. Developed at Sep 8, 2017 @CHX + Get image with edges of roi_mask by doing + i) get edges of roi_mask by function get_image_edge + ii) scale img at region of interest (ROI) by scale_factor + img: two-D array for image + roi_mask: two-D array for ROI + scale_factor: scaling factor of ROI in image + ''' + edg = get_image_edge( roi_mask ) + img_ = img.copy() + w = np.where(roi_mask) + img_[w] = img[w] * scale_factor + return img_ * edg + + + + + +def get_today_date( ): + from time import gmtime, strftime + return strftime("%m-%d-%Y", gmtime() ) + + +def move_beamstop( mask, xshift, yshift ): + '''Y.G. Developed at July 18, 2017 @CHX + Create new mask by shift the old one with xshift, yshift + Input + --- + mask: 2D numpy array, 0 for bad pixels, 1 for good pixels + xshift, integer, shift value along x direction + yshift, integer, shift value along y direction + + Output + --- + mask, 2D numpy array, + ''' + m = np.ones_like(mask) + W,H = mask.shape + w = np.where(mask==0) + nx, ny = w[0]+ int(yshift), w[1]+ int(xshift ) + gw = np.where( (nx >= 0) & (nx= 0) & (ny= xmax) | ( pixel <= xmin) )[0] + else: + badp = filter_dict[k] + if len(badp)!=0: + pls = np.where([rf==k])[1] + rf[ pls[badp] ] = 0 + return rm + + +## +#Dev at March 31 for create Eiger chip mask +def create_chip_edges_mask( det='1M' ): + ''' Create a chip edge mask for Eiger detector + + ''' + if det == '1M': + shape = [1065, 1030] + w = 4 + mask = np.ones( shape , dtype = np.int32) + cx = [ 1030//4 *i for i in range(1,4) ] + #cy = [ 1065//4 *i for i in range(1,4) ] + cy = [808, 257 ] + #print (cx, cy ) + for c in cx: + mask[:, c-w//2:c+w//2 ] = 0 + for c in cy: + mask[ c-w//2:c+w//2, : ] = 0 + + return mask + +def create_ellipse_donut( cx, cy , wx_inner, wy_inner, wx_outer, wy_outer, roi_mask, gap=0): + Nmax = np.max( np.unique( roi_mask ) ) + rr1, cc1 = ellipse( cy,cx, wy_inner, wx_inner ) + rr2, cc2 = ellipse( cy, cx, wy_inner + gap, wx_inner +gap ) + rr3, cc3 = ellipse( cy, cx, wy_outer,wx_outer ) + roi_mask[rr3,cc3] = 2 + Nmax + roi_mask[rr2,cc2] = 0 + roi_mask[rr1,cc1] = 1 + Nmax + return roi_mask + +def create_box( cx, cy, wx, wy, roi_mask): + Nmax = np.max( np.unique( roi_mask ) ) + for i, [cx_,cy_] in enumerate(list( zip( cx,cy ))): #create boxes + x = np.array( [ cx_-wx, cx_+wx, cx_+wx, cx_-wx]) + y = np.array( [ cy_-wy, cy_-wy, cy_+wy, cy_+wy]) + rr, cc = polygon( y,x) + roi_mask[rr,cc] = i +1 + Nmax + return roi_mask + + + + +def create_folder( base_folder, sub_folder ): + ''' + Crate a subfolder under base folder + Input: + base_folder: full path of the base folder + sub_folder: sub folder name to be created + Return: + Created full path of the created folder + ''' + + data_dir0 = os.path.join( base_folder, sub_folder ) + ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + os.makedirs(data_dir0, exist_ok=True) + print('Results from this analysis will be stashed in the directory %s' % data_dir0) + return data_dir0 + + + + + +def create_user_folder( CYCLE, username=None, default_dir= '/XF11ID/analysis/' ): + ''' + Crate a folder for saving user data analysis result + Input: + CYCLE: run cycle + username: if None, get username from the jupyter username + Return: + Created folder name + ''' + if username !='Default': + if username == None: + username = getpass.getuser() + data_dir0 = os.path.join(default_dir, CYCLE, username, 'Results/') + else: + data_dir0 = os.path.join(default_dir, CYCLE +'/') + ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + os.makedirs(data_dir0, exist_ok=True) + print('Results from this analysis will be stashed in the directory %s' % data_dir0) + return data_dir0 + + + + + + +################################## +#########For dose analysis ####### +################################## +def get_fra_num_by_dose( exp_dose, exp_time, att=1, dead_time =2 ): + ''' + Calculate the frame number to be correlated by giving a X-ray exposure dose + + Paramters: + exp_dose: a list, the exposed dose, e.g., in unit of exp_time(ms)*N(fram num)*att( attenuation) + exp_time: float, the exposure time for a xpcs time sereies + dead_time: dead time for the fast shutter reponse time, CHX = 2ms + Return: + noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) + e.g., + + no_dose_fra = get_fra_num_by_dose( exp_dose = [ 3.34* 20, 3.34*50, 3.34*100, 3.34*502, 3.34*505 ], + exp_time = 1.34, dead_time = 2) + + --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) + ''' + return np.int_( np.array( exp_dose )/( exp_time + dead_time)/ att ) + + +def get_multi_tau_lag_steps( fra_max, num_bufs = 8 ): + ''' + Get taus in log steps ( a multi-taus defined taus ) for a time series with max frame number as fra_max + Parameters: + fra_max: integer, the maximun frame number + buf_num (default=8), + Return: + taus_in_log, a list + + e.g., + get_multi_tau_lag_steps( 20, 8 ) --> array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]) + + ''' + num_levels = int(np.log( fra_max/(num_bufs-1))/np.log(2) +1) +1 + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + return lag_steps[lag_steps < fra_max] + + + +def get_series_g2_taus( fra_max_list, acq_time=1, max_fra_num=None, log_taus = True, + num_bufs = 8): + ''' + Get taus for dose dependent analysis + Parameters: + fra_max_list: a list, a lsit of largest available frame number + acq_time: acquistion time for each frame + log_taus: if true, will use the multi-tau defined taus bu using buf_num (default=8), + otherwise, use deltau =1 + Return: + tausd, a dict, with keys as taus_max_list items + e.g., + get_series_g2_taus( fra_max_list=[20,30,40], acq_time=1, max_fra_num=None, log_taus = True, num_bufs = 8) + --> + {20: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]), + 30: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28]), + 40: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32]) + } + + ''' + tausd = {} + for n in fra_max_list: + if max_fra_num != None: + L = max_fra_num + else: + L = np.infty + if n>L: + warnings.warn("Warning: the dose value is too large, and please" + "check the maxium dose in this data set and give a smaller dose value." + "We will use the maxium dose of the data.") + n = L + if log_taus: + lag_steps = get_multi_tau_lag_steps(n, num_bufs) + else: + lag_steps = np.arange( n ) + tausd[n] = lag_steps * acq_time + return tausd + + + + +def check_lost_metadata(md, Nimg=None, inc_x0 =None, inc_y0= None, pixelsize=7.5*10*(-5) ): + '''Y.G. Dec 31, 2016, check lost metadata + + Parameter: + md: dict, meta data dictionay + Nimg: number of frames for this uid metadata + inc_x0/y0: incident beam center x0/y0, if None, will over-write the md['beam_center_x/y'] + pixelsize: if md don't have ['x_pixel_size'], the pixelsize will add it + Return: + dpix: pixelsize, in mm + lambda_: wavelegth of the X-rays in Angstroms + exposuretime: exposure time in sec + timeperframe: acquisition time is sec + center: list, [x,y], incident beam center in pixel + Will also update md + ''' + mdn = md.copy() + if 'number of images' not in list(md.keys()): + md['number of images'] = Nimg + if 'x_pixel_size' not in list(md.keys()): + md['x_pixel_size'] = 7.5000004e-05 + dpix = md['x_pixel_size'] * 1000. #in mm, eiger 4m is 0.075 mm + try: + lambda_ =md['wavelength'] + except: + lambda_ =md['incident_wavelength'] # wavelegth of the X-rays in Angstroms + try: + Ldet = md['det_distance'] + if Ldet<=1000: + Ldet *=1000 + md['det_distance'] = Ldet + except: + Ldet = md['detector_distance'] + if Ldet<=1000: + Ldet *=1000 + md['detector_distance'] = Ldet + + + try:#try exp time from detector + exposuretime= md['count_time'] #exposure time in sec + except: + exposuretime= md['cam_acquire_time'] #exposure time in sec + try:#try acq time from detector + acquisition_period = md['frame_time'] + except: + try: + acquisition_period = md['acquire period'] + except: + uid = md['uid'] + acquisition_period = float( db[uid]['start']['acquire period'] ) + timeperframe = acquisition_period + if inc_x0 != None: + mdn['beam_center_x']= inc_y0 + print( 'Beam_center_x has been changed to %s. (no change in raw metadata): '%inc_y0) + if inc_y0 != None: + mdn['beam_center_y']= inc_x0 + print( 'Beam_center_y has been changed to %s. (no change in raw metadata): '%inc_x0) + center = [ int(mdn['beam_center_x']),int( mdn['beam_center_y'] ) ] #beam center [y,x] for python image + center=[center[1], center[0]] + + return dpix, lambda_, Ldet, exposuretime, timeperframe, center + + +def combine_images( filenames, outputfile, outsize=(2000, 2400)): + '''Y.G. Dec 31, 2016 + Combine images together to one image using PIL.Image + Input: + filenames: list, the images names to be combined + outputfile: str, the filename to generate + outsize: the combined image size + Output: + save a combined image file + ''' + N = len( filenames) + #nx = np.int( np.ceil( np.sqrt(N)) ) + #ny = np.int( np.ceil( N / float(nx) ) ) + + ny = int( np.ceil( np.sqrt(N)) ) + nx = int( np.ceil( N / float(ny) ) ) + + #print(nx,ny) + result = Image.new("RGB", outsize, color=(255,255,255,0)) + basewidth = int( outsize[0]/nx ) + hsize = int( outsize[1]/ny ) + for index, file in enumerate(filenames): + path = os.path.expanduser(file) + img = Image.open(path) + bands = img.split() + ratio = img.size[1]/ img.size[0] #h/w + if hsize > basewidth * ratio: + basewidth_ = basewidth + hsize_ = int( basewidth * ratio ) + else: + basewidth_ = int( hsize/ratio ) + hsize_ = hsize + #print( index, file, basewidth, hsize ) + size = (basewidth_,hsize_) + bands = [b.resize(size, Image.Resampling.BILINEAR) for b in bands] + img = Image.merge('RGBA', bands) + x = index % nx * basewidth + y = index // nx * hsize + w, h = img.size + #print('pos {0},{1} size {2},{3}'.format(x, y, w, h)) + result.paste(img, (x, y, x + w, y + h )) + result.save( outputfile,quality=100, optimize=True ) + print( 'The combined image is saved as: %s'%outputfile) + + +def get_qval_dict( qr_center, qz_center=None, qval_dict = None, multi_qr_for_one_qz= True, + one_qz_multi_qr = True): + '''Y.G. Dec 27, 2016 + Map the roi label array with qr or (qr,qz) or (q//, q|-) values + Parameters: + qr_center: list, a list of qr + qz_center: list, a list of qz, + multi_qr_for_one_qz: by default=True, + if one_qz_multi_qr: + one qz_center corresponds to all qr_center, in other words, there are totally, len(qr_center)* len(qz) qs + else: + one qr_center corresponds to all qz_center, + else: one qr with one qz + qval_dict: if not None, will append the new dict to the qval_dict + Return: + qval_dict, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + + ''' + + if qval_dict == None: + qval_dict = {} + maxN = 0 + else: + maxN = np.max( list( qval_dict.keys() ) ) +1 + + if qz_center != None: + if multi_qr_for_one_qz: + if one_qz_multi_qr: + for qzind in range( len( qz_center)): + for qrind in range( len( qr_center)): + qval_dict[ maxN + qzind* len( qr_center) + qrind ] = np.array( [qr_center[qrind], qz_center[qzind] ] ) + else: + for qrind in range( len( qr_center)): + for qzind in range( len( qz_center)): + qval_dict[ maxN + qrind* len( qz_center) + qzind ] = np.array( [qr_center[qrind], qz_center[qzind] ] ) + + + else: + for i, [qr, qz] in enumerate(zip( qr_center, qz_center)): + qval_dict[ maxN + i ] = np.array( [ qr, qz ] ) + else: + for qrind in range( len( qr_center)): + qval_dict[ maxN + qrind ] = np.array( [ qr_center[qrind] ] ) + return qval_dict + + +def update_qval_dict( qval_dict1, qval_dict2 ): + ''' Y.G. Dec 31, 2016 + Update qval_dict1 with qval_dict2 + Input: + qval_dict1, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + qval_dict2, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + Output: + qval_dict, a dict, with the same key as dict1, and all key in dict2 but which key plus max(dict1.keys()) + ''' + maxN = np.max( list( qval_dict1.keys() ) ) +1 + qval_dict = {} + qval_dict.update( qval_dict1 ) + for k in list( qval_dict2.keys() ): + qval_dict[k + maxN ] = qval_dict2[k] + return qval_dict + +def update_roi_mask( roi_mask1, roi_mask2 ): + ''' Y.G. Dec 31, 2016 + Update qval_dict1 with qval_dict2 + Input: + roi_mask1, 2d-array, label array, same shape as xpcs frame, + roi_mask2, 2d-array, label array, same shape as xpcs frame, + Output: + roi_mask, 2d-array, label array, same shape as xpcs frame, update roi_mask1 with roi_mask2 + ''' + roi_mask = roi_mask1.copy() + w= np.where( roi_mask2 ) + roi_mask[w] = roi_mask2[w] + np.max( roi_mask ) + return roi_mask + + +def check_bad_uids(uids, mask, img_choice_N = 10, bad_uids_index = None ): + '''Y.G. Dec 22, 2016 + Find bad uids by checking the average intensity by a selection of the number img_choice_N of frames for the uid. If the average intensity is zeros, the uid will be considered as bad uid. + Parameters: + uids: list, a list of uid + mask: array, bool type numpy.array + img_choice_N: random select number of the uid + bad_uids_index: a list of known bad uid list, default is None + Return: + guids: list, good uids + buids, list, bad uids + ''' + import random + buids = [] + guids = list( uids ) + #print( guids ) + if bad_uids_index == None: + bad_uids_index = [] + for i, uid in enumerate(uids): + #print( i, uid ) + if i not in bad_uids_index: + detector = get_detector( db[uid ] ) + imgs = load_data( uid, detector ) + img_samp_index = random.sample( range(len(imgs)), img_choice_N) + imgsa = apply_mask( imgs, mask ) + avg_img = get_avg_img( imgsa, img_samp_index, plot_ = False, uid =uid) + if avg_img.max() == 0: + buids.append( uid ) + guids.pop( list( np.where( np.array(guids) == uid)[0] )[0] ) + print( 'The bad uid is: %s'%uid ) + else: + guids.pop( list( np.where( np.array(guids) == uid)[0] )[0] ) + buids.append( uid ) + print( 'The bad uid is: %s'%uid ) + print( 'The total and bad uids number are %s and %s, repsectively.'%( len(uids), len(buids) ) ) + return guids, buids + + + +def find_uids(start_time, stop_time ): + '''Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end time + Return: + sids: list, scan id + uids: list, uid with 8 character length + fuids: list, uid with full length + + ''' + hdrs = db(start_time= start_time, stop_time = stop_time) + try: + print ('Totally %s uids are found.'%(len(list(hdrs)))) + except: + pass + sids=[] + uids=[] + fuids=[] + for hdr in hdrs: + s= get_sid_filenames( hdr) + #print (s[1][:8]) + sids.append( s[0] ) + uids.append( s[1][:8] ) + fuids.append( s[1] ) + sids=sids[::-1] + uids=uids[::-1] + fuids=fuids[::-1] + return np.array(sids), np.array(uids), np.array(fuids) + + +def ployfit( y, x=None, order = 20 ): + ''' + fit data (one-d array) by a ploynominal function + return the fitted one-d array + ''' + if x == None: + x = range(len(y)) + pol = np.polyfit(x, y, order) + return np.polyval(pol, x) + +def check_bad_data_points( data, fit=True, polyfit_order = 30, legend_size = 12, + plot=True, scale=1.0, good_start=None, good_end=None, path=None, return_ylim=False ): + ''' + data: 1D array + scale: the scale of deviation + fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve + else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve + + ''' + if good_start == None: + good_start=0 + if good_end == None: + good_end = len( data ) + bd1 = [i for i in range(0, good_start)] + bd3 = [i for i in range(good_end,len( data ) )] + + d_ = data[good_start:good_end] + + if fit: + pfit = ployfit( d_, order = polyfit_order) + d = d_ - pfit + else: + d = d_ + pfit = np.ones_like(d) * data.mean() + + ymin = d.mean()-scale *d.std() + ymax = d.mean()+scale *d.std() + + if plot: + fig = plt.figure( ) + ax = fig.add_subplot(2,1,1 ) + plot1D( d_, ax = ax, color='k', legend='data',legend_size=legend_size ) + plot1D( pfit,ax=ax, color='b', legend='ploy-fit', title='Find Bad Points',legend_size=legend_size ) + + ax2 = fig.add_subplot(2,1,2 ) + plot1D( d, ax = ax2,legend='difference',marker='s', color='b', ) + + #print('here') + plot1D(x=[0,len(d_)], y=[ymin,ymin], ax = ax2, ls='--',lw= 3, marker='o', color='r', legend='low_thresh', legend_size=legend_size ) + + plot1D(x=[0,len(d_)], y=[ymax,ymax], ax = ax2 , ls='--', lw= 3,marker='o', color='r',legend='high_thresh',title='',legend_size=legend_size ) + + if path != None: + fp = path + '%s'%( uid ) + '_find_bad_points' + '.png' + plt.savefig( fp, dpi=fig.dpi) + bd2= list( np.where( np.abs(d -d.mean()) > scale *d.std() )[0] + good_start ) + + if return_ylim: + return np.array( bd1 + bd2 + bd3 ), ymin, ymax,pfit + else: + return np.array( bd1 + bd2 + bd3 ), pfit + + + + +def get_bad_frame_list( imgsum, fit=True, polyfit_order = 30,legend_size = 12, + plot=True, scale=1.0, good_start=None, good_end=None, uid='uid',path=None, + + return_ylim=False): + ''' + imgsum: the sum intensity of a time series + scale: the scale of deviation + fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve + else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve + + ''' + if good_start == None: + good_start=0 + if good_end == None: + good_end = len( imgsum ) + bd1 = [i for i in range(0, good_start)] + bd3 = [i for i in range(good_end,len( imgsum ) )] + + imgsum_ = imgsum[good_start:good_end] + + if fit: + pfit = ployfit( imgsum_, order = polyfit_order) + data = imgsum_ - pfit + else: + data = imgsum_ + pfit = np.ones_like(data) * data.mean() + + ymin = data.mean()-scale *data.std() + ymax = data.mean()+scale *data.std() + + if plot: + fig = plt.figure( ) + ax = fig.add_subplot(2,1,1 ) + plot1D( imgsum_, ax = ax, color='k', legend='data',legend_size=legend_size ) + plot1D( pfit,ax=ax, color='b', legend='ploy-fit', title=uid + '_imgsum',legend_size=legend_size ) + + ax2 = fig.add_subplot(2,1,2 ) + plot1D( data, ax = ax2,legend='difference',marker='s', color='b', ) + + #print('here') + plot1D(x=[0,len(imgsum_)], y=[ymin,ymin], ax = ax2, ls='--',lw= 3, marker='o', color='r', legend='low_thresh', legend_size=legend_size ) + + plot1D(x=[0,len(imgsum_)], y=[ymax,ymax], ax = ax2 , ls='--', lw= 3,marker='o', color='r',legend='high_thresh',title='imgsum_to_find_bad_frame',legend_size=legend_size ) + + if path != None: + fp = path + '%s'%( uid ) + '_imgsum_analysis' + '.png' + plt.savefig( fp, dpi=fig.dpi) + + + + bd2= list( np.where( np.abs(data -data.mean()) > scale *data.std() )[0] + good_start ) + + if return_ylim: + return np.array( bd1 + bd2 + bd3 ), ymin, ymax + else: + return np.array( bd1 + bd2 + bd3 ) + +def save_dict_csv( mydict, filename, mode='w'): + import csv + with open(filename, mode) as csv_file: + spamwriter = csv.writer(csv_file) + for key, value in mydict.items(): + spamwriter.writerow([key, value]) + + + +def read_dict_csv( filename ): + import csv + with open(filename, 'r') as csv_file: + reader = csv.reader(csv_file) + mydict = dict(reader) + return mydict + + +def find_bad_pixels( FD, bad_frame_list, uid='uid'): + bpx = [] + bpy=[] + for n in bad_frame_list: + if n>= FD.beg and n<=FD.end: + f = FD.rdframe(n) + w = np.where( f == f.max()) + if len(w[0])==1: + bpx.append( w[0][0] ) + bpy.append( w[1][0] ) + + + return trans_data_to_pd( [bpx,bpy], label=[ uid+'_x', uid +'_y' ], dtype='list') + + + + + +def mask_exclude_badpixel( bp, mask, uid ): + + for i in range( len(bp)): + mask[ int( bp[bp.columns[0]][i] ), int( bp[bp.columns[1]][i] )]=0 + return mask + + + +def print_dict( dicts, keys=None): + ''' + print keys: values in a dicts + if keys is None: print all the keys + ''' + if keys == None: + keys = list( dicts.keys()) + for k in keys: + try: + print('%s--> %s'%(k, dicts[k]) ) + except: + pass + +def get_meta_data( uid, default_dec = 'eiger', *argv,**kwargs ): + ''' + Jan 25, 2018 add default_dec opt + + Y.G. Dev Dec 8, 2016 + + Get metadata from a uid + + - Adds detector key with detector name + + Parameters: + uid: the unique data acquisition id + kwargs: overwrite the meta data, for example + get_meta_data( uid = uid, sample = 'test') --> will overwrtie the meta's sample to test + return: + meta data of the uid: a dictionay + with keys: + detector + suid: the simple given uid + uid: full uid + filename: the full path of the data + start_time: the data acquisition starting time in a human readable manner + And all the input metadata + ''' + + if 'verbose' in kwargs.keys(): # added: option to suppress output + verbose= kwargs['verbose'] + else: + verbose=True + + import time + header = db[uid] + md ={} + + md['suid'] = uid #short uid + try: + md['filename'] = get_sid_filenames(header)[2][0] + except: + md['filename'] = 'N.A.' + + devices = sorted( list(header.devices()) ) + if len(devices) > 1: + if verbose: # added: mute output + print( "More than one device. This would have unintented consequences.Currently, only the device contains 'default_dec=%s'."%default_dec) + #raise ValueError("More than one device. This would have unintented consequences.") + dec = devices[0] + for dec_ in devices: + if default_dec in dec_: + dec = dec_ + + #print(dec) + #detector_names = sorted( header.start['detectors'] ) + detector_names = sorted( get_detectors(db[uid]) ) + #if len(detector_names) > 1: + # raise ValueError("More than one det. This would have unintented consequences.") + detector_name = detector_names[0] + #md['detector'] = detector_name + md['detector'] = get_detector( header ) + #print( md['detector'] ) + new_dict = header.config_data(dec)['primary'][0] + for key, val in new_dict.items(): + newkey = key.replace(detector_name+"_", "") + md[newkey] = val + + # for k,v in ev['descriptor']['configuration'][dec]['data'].items(): + # md[ k[len(dec)+1:] ]= v + + try: + md.update(header.start['plan_args'].items()) + md.pop('plan_args') + except: + pass + md.update(header.start.items()) + + + # print(header.start.time) + md['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(header.start['time'])) + md['stop_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime( header.stop['time'])) + try: # added: try to handle runs that don't contain image data + if "primary" in header.v2: + descriptor = header.v2["primary"].descriptors[0] + md['img_shape'] = descriptor['data_keys'][md['detector']]['shape'][:2][::-1] + except: + if verbose: + print("couldn't find image shape...skip!") + else: + pass + md.update(kwargs) + + #for k, v in sorted(md.items()): + # ... + # print(f'{k}: {v}') + + return md + + + +def get_max_countc(FD, labeled_array ): + """YG. 2016, Nov 18 + Compute the max intensity of ROIs in the compressed file (FD) + + Parameters + ---------- + FD: Multifile class + compressed file + labeled_array : array + labeled array; 0 is background. + Each ROI is represented by a nonzero integer. It is not required that + the ROI labels are contiguous + index : int, list, optional + The ROI's to use. If None, this function will extract averages for all + ROIs + + Returns + ------- + max_intensity : a float + index : list + The labels for each element of the `mean_intensity` list + """ + + qind, pixelist = roi.extract_label_indices( labeled_array ) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + + if labeled_array.shape != ( FD.md['ncols'],FD.md['nrows']): + raise ValueError( + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( FD.md['ncols'],FD.md['nrows'], labeled_array.shape[0], labeled_array.shape[1]) ) + + max_inten =0 + for i in tqdm(range( FD.beg, FD.end, 1 ), desc= 'Get max intensity of ROIs in all frames' ): + try: + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + max_inten = max( max_inten, np.max(v[w]) ) + except: + pass + return max_inten + + +def create_polygon_mask( image, xcorners, ycorners ): + ''' + Give image and x/y coners to create a polygon mask + image: 2d array + xcorners, list, points of x coners + ycorners, list, points of y coners + Return: + the polygon mask: 2d array, the polygon pixels with values 1 and others with 0 + + Example: + + + ''' + from skimage.draw import line_aa, line, polygon, disk + imy, imx = image.shape + bst_mask = np.zeros_like( image , dtype = bool) + rr, cc = polygon( ycorners,xcorners,shape = image.shape) + bst_mask[rr,cc] =1 + #full_mask= ~bst_mask + return bst_mask + + +def create_rectangle_mask( image, xcorners, ycorners ): + ''' + Give image and x/y coners to create a rectangle mask + image: 2d array + xcorners, list, points of x coners + ycorners, list, points of y coners + Return: + the polygon mask: 2d array, the polygon pixels with values 1 and others with 0 + + Example: + + + ''' + from skimage.draw import line_aa, line, polygon, disk + imy, imx = image.shape + bst_mask = np.zeros_like( image , dtype = bool) + rr, cc = polygon( ycorners,xcorners,shape = image.shape) + bst_mask[rr,cc] =1 + #full_mask= ~bst_mask + return bst_mask + + +def create_multi_rotated_rectangle_mask( image, center=None, length=100, width=50, angles=[0] ): + ''' Developed at July 10, 2017 by Y.G.@CHX, NSLS2 + Create multi rectangle-shaped mask by rotating a rectangle with a list of angles + The original rectangle is defined by four corners, i.e., + [ (center[1] - width//2, center[0]), + (center[1] + width//2, center[0]), + (center[1] + width//2, center[0] + length), + (center[1] - width//2, center[0] + length) + ] + + Parameters: + image: 2D numpy array, to give mask shape + center: integer list, if None, will be the center of the image + length: integer, the length of the non-ratoted rectangle + width: integer, the width of the non-ratoted rectangle + angles: integer list, a list of rotated angles + + Return: + mask: 2D bool-type numpy array + ''' + + from skimage.draw import polygon + from skimage.transform import rotate + cx,cy = center + imy, imx = image.shape + mask = np.zeros( image.shape, dtype = bool) + wy = length + wx = width + x = np.array( [ max(0, cx - wx//2), min(imx, cx+wx//2), min(imx, cx+wx//2), max(0,cx-wx//2 ) ]) + y = np.array( [ cy, cy, min( imy, cy + wy) , min(imy, cy + wy) ]) + rr, cc = polygon( y,x, shape = image.shape) + mask[rr,cc] =1 + mask_rot= np.zeros( image.shape, dtype = bool) + for angle in angles: + mask_rot += np.array( rotate( mask, angle, center= center ), dtype=bool) #, preserve_range=True) + return ~mask_rot + +def create_wedge( image, center, radius, wcors, acute_angle=True) : + '''YG develop at June 18, 2017, @CHX + Create a wedge by a combination of disk and a triangle defined by center and wcors + wcors: [ [x1,x2,x3...], [y1,y2,y3..] + + ''' + from skimage.draw import line_aa, line, polygon, disk + imy, imx = image.shape + cy,cx = center + x = [cx] + list(wcors[0]) + y = [cy] + list(wcors[1]) + + maskc = np.zeros_like( image , dtype = bool) + rr, cc = disk((cy, cx), radius, shape = image.shape) + maskc[rr,cc] =1 + + maskp = np.zeros_like( image , dtype = bool) + x = np.array( x ) + y = np.array( y ) + print(x,y) + rr, cc = polygon( y,x, shape = image.shape) + maskp[rr,cc] =1 + if acute_angle: + return maskc*maskp + else: + return maskc*~maskp + + + +def create_cross_mask( image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4, + center_disk = True, center_radius=10 + ): + ''' + Give image and the beam center to create a cross-shaped mask + wy_left: the width of left h-line + wy_right: the width of rigth h-line + wx_up: the width of up v-line + wx_down: the width of down v-line + center_disk: if True, create a disk with center and center_radius + + Return: + the cross mask + ''' + from skimage.draw import line_aa, line, polygon, disk + + imy, imx = image.shape + cx,cy = center + bst_mask = np.zeros_like( image , dtype = bool) + ### + #for right part + wy = wy_right + x = np.array( [ cx, imx, imx, cx ]) + y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy]) + rr, cc = polygon( y,x, shape = image.shape) + bst_mask[rr,cc] =1 + + ### + #for left part + wy = wy_left + x = np.array( [0, cx, cx,0 ]) + y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy]) + rr, cc = polygon( y,x, shape = image.shape) + bst_mask[rr,cc] =1 + + ### + #for up part + wx = wx_up + x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) + y = np.array( [ cy, cy, imy, imy]) + rr, cc = polygon( y,x, shape = image.shape) + bst_mask[rr,cc] =1 + + ### + #for low part + wx = wx_down + x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) + y = np.array( [ 0,0, cy, cy]) + rr, cc = polygon( y,x, shape = image.shape) + bst_mask[rr,cc] =1 + + if center_radius!=0: + rr, cc = disk((cy, cx), center_radius, shape = bst_mask.shape) + bst_mask[rr,cc] =1 + + + full_mask= ~bst_mask + + return full_mask + + + + + +def generate_edge( centers, width): + '''YG. 10/14/2016 + give centers and width (number or list) to get edges''' + edges = np.zeros( [ len(centers),2]) + edges[:,0] = centers - width + edges[:,1] = centers + width + return edges + + +def export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'], + path='/XF11ID/analysis/2016_3/commissioning/Results/' ): + '''YG. 10/17/2016 + export uid data to a txt file + uid: unique scan id + x: the x-col + y: the y-cols + path: save path + Example: + data = export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'], + path='/XF11ID/analysis/2016_3/commissioning/Results/exported/' ) + A plot for the data: + d.plot(x='dcm_b', y = 'xray_eye1_stats1_total', marker='o', ls='-', color='r') + + ''' + from databroker import DataBroker as db + from pyCHX.chx_generic_functions import trans_data_to_pd + + hdr = db[uid] + print(hdr.fields()) + data = db[uid].table() + xp = data[x] + datap = np.zeros( [len(xp), len(y)+1]) + datap[:,0] = xp + for i, yi in enumerate(y): + datap[:,i+1] = data[yi] + + datap = trans_data_to_pd( datap, label=[x] + [yi for yi in y]) + datap.to_csv( path + 'uid=%s.csv'%uid) + return datap + + + + +##### +#load data by databroker + +def get_flatfield( uid, reverse=False ): + import h5py + detector = get_detector( db[uid ] ) + sud = get_sid_filenames(db[uid]) + master_path = '%s_master.h5'%(sud[2][0]) + print( master_path) + f= h5py.File(master_path, 'r') + k= 'entry/instrument/detector/detectorSpecific/' #data_collection_date' + d= np.array( f[ k]['flatfield'] ) + f.close() + if reverse: + d = reverse_updown( d ) + + return d + + + +def get_detector( header ): + '''Get the first detector image string by giving header ''' + keys = get_detectors(header) + for k in keys: + if 'eiger' in k: + return k + +def get_detectors( header ): + '''Get all the detector image strings by giving header ''' + if "primary" in header.v2: + descriptor = header.v2["primary"].descriptors[0] + keys = [k for k, v in descriptor['data_keys'].items() if 'external' in v] + return sorted(set(keys)) + return [] + +def get_full_data_path( uid ): + '''A dirty way to get full data path''' + header = db[uid] + d = header.db + s = list(d.get_documents( db[uid ])) + #print(s[2]) + p = s[2][1]['resource_path'] + p2 = s[3][1]['datum_kwargs']['seq_id'] + #print(p,p2) + return p + '_' + str(p2) + '_master.h5' + +def get_sid_filenames(hdr,verbose=False): + """ + get scan_id, uid and detector filename from databroker + get_sid_filenames(hdr,verbose=False) + hdr = db[uid] + returns (scan_id, uid, filepath) + LW 04/30/2024 + """ + import glob + from time import strftime, localtime + start_doc = hdr.start + stop_doc = hdr.stop + success = False + + ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{start_doc['data path']}*_{start_doc['sequence id']}_master.h5")) # looking for (eiger) datafile at the path specified in metadata + if len(ret[2])==0: + if verbose: print('could not find detector filename from "data_path" in metadata: %s'%start_doc['data path']) + else: + if verbose: + print('Found detector filename from "data_path" in metadata!') + success=True + + if not success: # looking at path in metadata, but taking the date from the run start document + data_path=start_doc['data path'][:-11]+strftime("%Y/%m/%d/",localtime(start_doc['time'])) + ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5")) + if len(ret[2])==0: + if verbose: print('could not find detector filename in %s'%data_path) + else: + if verbose: + print('Found detector filename in %s'%data_path) + success=True + + if not success: # looking at path in metadata, but taking the date from the run stop document (in case the date rolled over between creating the start doc and staging the detector) + data_path=start_doc['data path'][:-11]+strftime("%Y/%m/%d/",localtime(stop_doc['time'])) + ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5")) + if len(ret[2])==0: + if verbose: print('Sorry, could not find detector filename....') + else: + if verbose: + print('Found detector filename in %s'%data_path) + success=True + return ret + + +# def get_sid_filenames(header): +# """YG. Dev Jan, 2016 +# Get a bluesky scan_id, unique_id, filename by giveing uid + +# Parameters +# ---------- +# header: a header of a bluesky scan, e.g. db[-1] + +# Returns +# ------- +# scan_id: integer +# unique_id: string, a full string of a uid +# filename: sring + +# Usuage: +# sid,uid, filenames = get_sid_filenames(db[uid]) + +# """ +# from collections import defaultdict +# from glob import glob +# from pathlib import Path + +# filepaths = [] +# resources = {} # uid: document +# datums = defaultdict(list) # uid: List(document) +# for name, doc in header.documents(): +# if name == "resource": +# resources[doc["uid"]] = doc +# elif name == "datum": +# datums[doc["resource"]].append(doc) +# elif name == "datum_page": +# for datum in event_model.unpack_datum_page(doc): +# datums[datum["resource"]].append(datum) +# for resource_uid, resource in resources.items(): +# file_prefix = Path(resource.get('root', '/'), resource["resource_path"]) +# if 'eiger' not in resource['spec'].lower(): +# continue +# for datum in datums[resource_uid]: +# dm_kw = datum["datum_kwargs"] +# seq_id = dm_kw['seq_id'] +# new_filepaths = glob(f'{file_prefix!s}_{seq_id}*') +# filepaths.extend(new_filepaths) +# return header.start['scan_id'], header.start['uid'], filepaths + +def load_dask_data(uid,detector,mask_path_full,reverse=False,rot90=False): + """ + load data as dask-array + get image md (direct beam, wavelength, sample-detector distance,...) from databroker documents (no need to read an actual image) + get pixel_mask and binary_mask from static location (getting it from image metadata takes forever in some conda envs...) + load_dask_data(uid,detector,reverse=False,rot90=False) + uid: uid (str) + detector: md['detector'] + mask_path_full: current standard would be _mask_path_+'pixel_masks/' + returns detector_images(dask-array), image_md + LW 04/26/2024 + """ + import dask + hdr=db[uid] + det=detector.split('_image')[0] + # collect image metadata from loading single image + img_md_dict={'detector_distance':'det_distance','incident_wavelength':'wavelength','frame_time':'cam_acquire_period','count_time':'cam_acquire_time','num_images':'cam_num_images','beam_center_x':'beam_center_x','beam_center_y':'beam_center_y'} + img_md={} + for k in list(img_md_dict.keys()): + img_md[k]=hdr.config_data(det)['primary'][0]['%s_%s'%(det,img_md_dict[k])] + if md['detector'] in ['eiger4m_single_image','eiger1m_single_image','eiger500K_single_image']: + img_md.update({'y_pixel_size': 7.5e-05, 'x_pixel_size': 7.5e-05}) + got_pixel_mask=True + else: + img_md.update({'y_pixel_size': None, 'x_pixel_size': None}) + got_pixel_mask=False + # load pixel mask from static location + if got_pixel_mask: + json_open=open(_mask_path_+'pixel_masks/pixel_mask_compression_%s.json'%detector.split('_')[0]) + mask_dict=json.load(json_open) + img_md['pixel_mask']=np.array(mask_dict['pixel_mask']) + img_md['binary_mask']=np.array(mask_dict['binary_mask']) + del mask_dict + + # load image data as dask-arry: + dimg=hdr.xarray_dask()[md['detector']][0] + if reverse: + dimg=dask.array.flip(dimg,axis=(1,1)) + if rot90: + dimg=dask.array.rot90(dimg,axes=(1,2)) + return dimg,img_md + +def load_data(uid, detector='eiger4m_single_image', fill=True, reverse=False, rot90=False): + """load bluesky scan data by giveing uid and detector + + Parameters + ---------- + uid: unique ID of a bluesky scan + detector: the used area detector + fill: True to fill data + reverse: if True, reverse the image upside down to match the "real" image geometry (should always be True in the future) + + Returns + ------- + image data: a pims frames series + if not success read the uid, will return image data as 0 + + Usuage: + imgs = load_data( uid, detector ) + md = imgs.md + """ + hdr = db[uid] + + if False: + ATTEMPTS = 0 + for attempt in range(ATTEMPTS): + try: + ev, = hdr.events(fields=[detector], fill=fill) + break + + except Exception: + print ('Trying again ...!') + if attempt == ATTEMPTS - 1: + # We're out of attempts. Raise the exception to help with debugging. + raise + else: + # We didn't succeed + raise Exception("Failed after {} repeated attempts".format(ATTEMPTS)) + + # TODO(mrakitin): replace with the lazy loader (when it's implemented): + imgs = list(hdr.data(detector)) + + if len(imgs[0])>=1: + md = imgs[0].md + imgs = pims.pipeline(lambda img: img)(imgs[0]) + imgs.md = md + + if reverse: + md = imgs.md + imgs = reverse_updown( imgs ) # Why not np.flipud? + imgs.md = md + + if rot90: + md = imgs.md + imgs = rot90_clockwise( imgs ) # Why not np.flipud? + imgs.md = md + + return imgs + + +def mask_badpixels( mask, detector ): + ''' + Mask known bad pixel from the giveing mask + + ''' + if detector =='eiger1m_single_image': + #to be determined + mask = mask + elif detector =='eiger4m_single_image' or detector == 'image': + mask[513:552,:] =0 + mask[1064:1103,:] =0 + mask[1615:1654,:] =0 + mask[:,1029:1041] = 0 + mask[:, 0] =0 + mask[0:, 2069] =0 + mask[0] =0 + mask[2166] =0 + + elif detector =='eiger500K_single_image': + #to be determined + mask = mask + else: + mask = mask + return mask + + + + + +def load_data2( uid , detector = 'eiger4m_single_image' ): + """load bluesky scan data by giveing uid and detector + + Parameters + ---------- + uid: unique ID of a bluesky scan + detector: the used area detector + + Returns + ------- + image data: a pims frames series + if not success read the uid, will return image data as 0 + + Usuage: + imgs = load_data( uid, detector ) + md = imgs.md + """ + hdr = db[uid] + flag =1 + while flag<4 and flag !=0: + try: + ev, = hdr.events(fields=[detector]) + flag =0 + except: + flag += 1 + print ('Trying again ...!') + + if flag: + print ("Can't Load Data!") + uid = '00000' #in case of failling load data + imgs = 0 + else: + imgs = ev['data'][detector] + + #print (imgs) + return imgs + + + +def psave_obj(obj, filename ): + '''save an object with filename by pickle.dump method + This function automatically add '.pkl' as filename extension + Input: + obj: the object to be saved + filename: filename (with full path) to be saved + Return: + None + ''' + with open( filename + '.pkl', 'wb') as f: + pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) + +def pload_obj(filename ): + '''load a pickled filename + This function automatically add '.pkl' to filename extension + Input: + filename: filename (with full path) to be saved + Return: + load the object by pickle.load method + ''' + with open( filename + '.pkl', 'rb') as f: + return pickle.load(f) + + + +def load_mask( path, mask_name, plot_ = False, reverse=False, rot90=False, *argv,**kwargs): + + """load a mask file + the mask is a numpy binary file (.npy) + + Parameters + ---------- + path: the path of the mask file + mask_name: the name of the mask file + plot_: a boolen type + reverse: if True, reverse the image upside down to match the "real" image geometry (should always be True in the future) + Returns + ------- + mask: array + if plot_ =True, will show the mask + + Usuage: + mask = load_mask( path, mask_name, plot_ = True ) + """ + + mask = np.load( path + mask_name ) + mask = np.array(mask, dtype = np.int32) + if reverse: + mask = mask[::-1,:] + if rot90: + mask = np.rot90( mask ) + if plot_: + show_img( mask, *argv,**kwargs) + return mask + + + +def create_hot_pixel_mask(img, threshold, center=None, center_radius=300, outer_radius=0 ): + '''create a hot pixel mask by giving threshold + Input: + img: the image to create hot pixel mask + threshold: the threshold above which will be considered as hot pixels + center: optional, default=None + else, as a two-element list (beam center), i.e., [center_x, center_y] + if center is not None, the hot pixel will not include a disk region + which is defined by center and center_radius ( in unit of pixel) + Output: + a bool types numpy array (mask), 1 is good and 0 is excluded + + ''' + bst_mask = np.ones_like( img , dtype = bool) + if center != None: + from skimage.draw import disk + imy, imx = img.shape + cy,cx = center + rr, cc = disk((cy, cx), center_radius,shape=img.shape ) + bst_mask[rr,cc] =0 + if outer_radius: + bst_mask = np.zeros_like( img , dtype = bool) + rr2, cc2 = disk((cy, cx), outer_radius,shape=img.shape ) + bst_mask[rr2,cc2] =1 + bst_mask[rr,cc] =0 + hmask = np.ones_like( img ) + hmask[np.where( img * bst_mask > threshold)]=0 + return hmask + + + + +def apply_mask( imgs, mask): + '''apply mask to imgs to produce a generator + + Usuages: + imgsa = apply_mask( imgs, mask ) + good_series = apply_mask( imgs[good_start:], mask ) + + ''' + return pims.pipeline(lambda img: np.int_(mask) * img)(imgs) # lazily apply mask + + +def reverse_updown( imgs): + '''reverse imgs upside down to produce a generator + + Usuages: + imgsr = reverse_updown( imgs) + + + ''' + return pims.pipeline(lambda img: img[::-1,:])(imgs) # lazily apply mask + +def rot90_clockwise( imgs): + '''reverse imgs upside down to produce a generator + + Usuages: + imgsr = rot90_clockwise( imgs) + + ''' + return pims.pipeline(lambda img: np.rot90(img) )(imgs) # lazily apply mask + +def RemoveHot( img,threshold= 1E7, plot_=True ): + '''Remove hot pixel from img''' + + mask = np.ones_like( np.array( img ) ) + badp = np.where( np.array(img) >= threshold ) + if len(badp[0])!=0: + mask[badp] = 0 + if plot_: + show_img( mask ) + return mask + + +############ +###plot data + +def show_img( image, ax=None,label_array=None, alpha=0.5, interpolation='nearest', + xlim=None, ylim=None, save=False,image_name=None,path=None, + aspect=None, logs=False,vmin=None,vmax=None,return_fig=False,cmap='viridis', + show_time= False, file_name =None, ylabel=None, xlabel=None, extent=None, + show_colorbar=True, tight=True, show_ticks=True, save_format = 'png', dpi= None, + center=None,origin='lower', lab_fontsize = 16, tick_size = 12, colorbar_fontsize = 8, + use_mat_imshow=False, + *argv,**kwargs ): + """YG. Sep26, 2017 Add label_array/alpha option to show a mask on top of image + + a simple function to show image by using matplotlib.plt imshow + pass *argv,**kwargs to imshow + + Parameters + ---------- + image : array + Image to show + Returns + ------- + None + """ + if ax == None: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots() + else: + fig, ax=ax + + + if center != None: + plot1D(center[1],center[0],ax=ax, c='b', m='o', legend='') + if not logs: + if not use_mat_imshow: + im=imshow(ax, image, origin=origin,cmap=cmap,interpolation=interpolation, vmin=vmin,vmax=vmax, + extent=extent) #vmin=0,vmax=1, + else: + im=ax.imshow( image, origin=origin,cmap=cmap,interpolation=interpolation, vmin=vmin,vmax=vmax, + extent=extent) #vmin=0,vmax=1, + else: + if not use_mat_imshow: + im=imshow(ax, image, origin=origin,cmap=cmap, + interpolation=interpolation, norm=LogNorm(vmin, vmax),extent=extent) + else: + im=ax.imshow(image, origin=origin,cmap=cmap, + interpolation=interpolation, norm=LogNorm(vmin, vmax),extent=extent) + if label_array != None: + im2=show_label_array(ax, label_array, alpha= alpha, cmap=cmap, interpolation=interpolation ) + + ax.set_title( image_name ) + if xlim != None: + ax.set_xlim( xlim ) + if ylim != None: + ax.set_ylim( ylim ) + + if not show_ticks: + ax.set_yticks([]) + ax.set_xticks([]) + else: + + ax.tick_params(axis='both', which='major', labelsize=tick_size ) + ax.tick_params(axis='both', which='minor', labelsize=tick_size ) + #mpl.rcParams['xtick.labelsize'] = tick_size + #mpl.rcParams['ytick.labelsize'] = tick_size + #print(tick_size) + + if ylabel != None: + #ax.set_ylabel(ylabel)#, fontsize = 9) + ax.set_ylabel( ylabel , fontsize = lab_fontsize ) + if xlabel != None: + ax.set_xlabel(xlabel , fontsize = lab_fontsize ) + + if aspect != None: + #aspect = image.shape[1]/float( image.shape[0] ) + ax.set_aspect(aspect) + else: + ax.set_aspect(aspect='auto') + + if show_colorbar: + cbar = fig.colorbar(im, extend='neither', spacing='proportional', + orientation='vertical' ) + cbar.ax.tick_params(labelsize=colorbar_fontsize) + fig.set_tight_layout(tight) + if save: + if show_time: + dt =datetime.now() + CurTime = '_%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + fp = path + '%s'%( file_name ) + CurTime + '.' + save_format + else: + fp = path + '%s'%( image_name ) + '.' + save_format + if dpi == None: + dpi = fig.dpi + plt.savefig( fp, dpi= dpi) + #fig.set_tight_layout(tight) + if return_fig: + return im #fig + + + + +def plot1D( y,x=None, yerr=None, ax=None,return_fig=False, ls='-', figsize=None,legend=None, + legend_size=None, lw=None, markersize=None, tick_size=8, *argv,**kwargs): + """a simple function to plot two-column data by using matplotlib.plot + pass *argv,**kwargs to plot + + Parameters + ---------- + y: column-y + x: column-x, by default x=None, the plot will use index of y as x-axis + the other paramaters are defined same as plt.plot + Returns + ------- + None + """ + if ax == None: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + if figsize != None: + fig, ax = plt.subplots(figsize=figsize) + else: + fig, ax = plt.subplots() + + if legend == None: + legend = ' ' + try: + logx = kwargs['logx'] + except: + logx=False + try: + logy = kwargs['logy'] + except: + logy=False + + try: + logxy = kwargs['logxy'] + except: + logxy= False + + if logx==True and logy==True: + logxy = True + + try: + marker = kwargs['marker'] + except: + try: + marker = kwargs['m'] + except: + marker= next( markers_ ) + try: + color = kwargs['color'] + except: + try: + color = kwargs['c'] + except: + color = next( colors_ ) + + if x == None: + x=range(len(y)) + if yerr == None: + ax.plot(x,y, marker=marker,color=color,ls=ls,label= legend, lw=lw, + markersize=markersize, )#,*argv,**kwargs) + else: + ax.errorbar(x,y,yerr, marker=marker,color=color,ls=ls,label= legend, + lw=lw,markersize=markersize,)#,*argv,**kwargs) + if logx: + ax.set_xscale('log') + if logy: + ax.set_yscale('log') + if logxy: + ax.set_xscale('log') + ax.set_yscale('log') + + + ax.tick_params(axis='both', which='major', labelsize=tick_size ) + ax.tick_params(axis='both', which='minor', labelsize=tick_size ) + + if 'xlim' in kwargs.keys(): + ax.set_xlim( kwargs['xlim'] ) + if 'ylim' in kwargs.keys(): + ax.set_ylim( kwargs['ylim'] ) + if 'xlabel' in kwargs.keys(): + ax.set_xlabel(kwargs['xlabel']) + if 'ylabel' in kwargs.keys(): + ax.set_ylabel(kwargs['ylabel']) + + if 'title' in kwargs.keys(): + title = kwargs['title'] + else: + title = 'plot' + ax.set_title( title ) + #ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)') + if (legend!='') and (legend!=None): + ax.legend(loc = 'best', fontsize=legend_size ) + if 'save' in kwargs.keys(): + if kwargs['save']: + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + #fp = kwargs['path'] + '%s'%( title ) + CurTime + '.png' + fp = kwargs['path'] + '%s'%( title ) + '.png' + plt.savefig( fp, dpi=fig.dpi) + if return_fig: + return fig + + +### + +def check_shutter_open( data_series, min_inten=0, time_edge = [0,10], plot_ = False, *argv,**kwargs): + + '''Check the first frame with shutter open + + Parameters + ---------- + data_series: a image series + min_inten: the total intensity lower than min_inten is defined as shtter close + time_edge: the searching frame number range + + return: + shutter_open_frame: a integer, the first frame number with open shutter + + Usuage: + good_start = check_shutter_open( imgsa, min_inten=5, time_edge = [0,20], plot_ = False ) + + ''' + imgsum = np.array( [np.sum(img ) for img in data_series[time_edge[0]:time_edge[1]:1]] ) + if plot_: + fig, ax = plt.subplots() + ax.plot(imgsum,'bo') + ax.set_title('uid=%s--imgsum'%uid) + ax.set_xlabel( 'Frame' ) + ax.set_ylabel( 'Total_Intensity' ) + #plt.show() + shutter_open_frame = np.where( np.array(imgsum) > min_inten )[0][0] + print ('The first frame with open shutter is : %s'%shutter_open_frame ) + return shutter_open_frame + + + +def get_each_frame_intensity( data_series, sampling = 50, + bad_pixel_threshold=1e10, + plot_ = False, save= False, *argv,**kwargs): + '''Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + ''' + + #print ( argv, kwargs ) + imgsum = np.array( [np.sum(img ) for img in tqdm( data_series[::sampling] , leave = True ) ] ) + if plot_: + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + fig, ax = plt.subplots() + ax.plot(imgsum,'bo') + ax.set_title('uid= %s--imgsum'%uid) + ax.set_xlabel( 'Frame_bin_%s'%sampling ) + ax.set_ylabel( 'Total_Intensity' ) + if save: + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs['path'] + if 'uid' in kwargs: + uid = kwargs['uid'] + else: + uid = 'uid' + #fp = path + "Uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-"%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + #plt.show() + + bad_frame_list = np.where( np.array(imgsum) > bad_pixel_threshold )[0] + if len(bad_frame_list): + print ('Bad frame list are: %s' %bad_frame_list) + else: + print ('No bad frames are involved.') + return imgsum,bad_frame_list + + + + +def create_time_slice( N, slice_num, slice_width, edges=None ): + '''create a ROI time regions ''' + if edges != None: + time_edge = edges + else: + if slice_num==1: + time_edge = [ [0,N] ] + else: + tstep = N // slice_num + te = np.arange( 0, slice_num +1 ) * tstep + tc = np.int_( (te[:-1] + te[1:])/2 )[1:-1] + if slice_width%2: + sw = slice_width//2 +1 + time_edge = [ [0,slice_width], ] + [ [s-sw+1,s+sw] for s in tc ] + [ [N-slice_width,N]] + else: + sw= slice_width//2 + time_edge = [ [0,slice_width], ] + [ [s-sw,s+sw] for s in tc ] + [ [N-slice_width,N]] + + + + return np.array(time_edge) + + +def show_label_array(ax, label_array, cmap=None, aspect=None,interpolation='nearest',**kwargs): + """ + YG. Sep 26, 2017 + Modified show_label_array(ax, label_array, cmap=None, **kwargs) + from https://github.com/Nikea/xray-vision/blob/master/xray_vision/mpl_plotting/roi.py + Display a labeled array nicely + Additional kwargs are passed through to `ax.imshow`. + If `vmin` is in kwargs, it is clipped to minimum of 0.5. + Parameters + ---------- + ax : Axes + The `Axes` object to add the artist too + label_array: ndarray + Expected to be an unsigned integer array. 0 is background, + positive integers label region of interest + cmap : str or colormap, optional + Color map to use, defaults to 'Paired' + Returns + ------- + img : AxesImage + The artist added to the axes + """ + if cmap == None: + cmap = 'viridis' + #print(cmap) + _cmap = copy.copy((mcm.get_cmap(cmap))) + _cmap.set_under('w', 0) + vmin = max(.5, kwargs.pop('vmin', .5)) + im = ax.imshow(label_array, cmap=cmap, + interpolation=interpolation, + vmin=vmin, + **kwargs) + if aspect == None: + ax.set_aspect(aspect='auto') + #ax.set_aspect('equal') + return im + + + +def show_label_array_on_image(ax, image, label_array, cmap=None,norm=None, log_img=True,alpha=0.3, vmin=0.1, vmax=5, + imshow_cmap='gray', **kwargs): #norm=LogNorm(), + """ + This will plot the required ROI's(labeled array) on the image + + Additional kwargs are passed through to `ax.imshow`. + If `vmin` is in kwargs, it is clipped to minimum of 0.5. + Parameters + ---------- + ax : Axes + The `Axes` object to add the artist too + image : array + The image array + label_array : array + Expected to be an unsigned integer array. 0 is background, + positive integers label region of interest + cmap : str or colormap, optional + Color map to use for plotting the label_array, defaults to 'None' + imshow_cmap : str or colormap, optional + Color map to use for plotting the image, defaults to 'gray' + norm : str, optional + Normalize scale data, defaults to 'Lognorm()' + Returns + ------- + im : AxesImage + The artist added to the axes + im_label : AxesImage + The artist added to the axes + """ + ax.set_aspect('equal') + + #print (vmin, vmax ) + if log_img: + im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',norm=LogNorm(vmin, vmax),**kwargs) #norm=norm, + else: + im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',vmin=vmin, vmax=vmax,**kwargs) #norm=norm, + + im_label = mpl_plot.show_label_array(ax, label_array, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha, + **kwargs) # norm=norm, + + + return im, im_label + + + +def show_ROI_on_image( image, ROI, center=None, rwidth=400,alpha=0.3, label_on = True, + save=False, return_fig = False, rect_reqion=None, log_img = True, vmin=0.01, vmax=5, + show_ang_cor = False,cmap = cmap_albula, fig_ax=None, + uid='uid', path='', aspect = 1, show_colorbar=True, show_roi_edge=False, *argv,**kwargs): + + '''show ROI on an image + image: the data frame + ROI: the interested region + center: the plot center + rwidth: the plot range around the center + + ''' + + + if RUN_GUI: + fig = Figure(figsize=(8,8)) + axes = fig.add_subplot(111) + elif fig_ax != None: + fig, axes = fig_ax + else: + fig, axes = plt.subplots( ) #plt.subplots(figsize=(8,8)) + + #print( vmin, vmax) + #norm=LogNorm(vmin, vmax) + + axes.set_title( "%s_ROI_on_Image"%uid ) + if log_img: + if vmin==0: + vmin += 1e-10 + + vmax = max(1, vmax ) + if not show_roi_edge: + #print('here') + im,im_label = show_label_array_on_image(axes, image, ROI, imshow_cmap='viridis', + cmap=cmap,alpha=alpha, log_img=log_img, + vmin=vmin, vmax=vmax, origin="lower") + else: + edg = get_image_edge( ROI ) + image_ = get_image_with_roi( image, ROI, scale_factor = 2) + #fig, axes = plt.subplots( ) + show_img( image_, ax=[fig,axes], vmin=vmin, vmax=vmax, + logs= log_img, image_name= "%s_ROI_on_Image"%uid, + cmap = cmap ) + + + if rect_reqion == None: + if center != None: + x1,x2 = [center[1] - rwidth, center[1] + rwidth] + y1,y2 = [center[0] - rwidth, center[0] + rwidth] + axes.set_xlim( [x1,x2]) + axes.set_ylim( [y1,y2]) + else: + x1,x2,y1,y2= rect_reqion + axes.set_xlim( [x1,x2]) + axes.set_ylim( [y1,y2]) + + if label_on: + num_qzr = len(np.unique( ROI )) -1 + for i in range( 1, num_qzr + 1 ): + ind = np.where( ROI == i)[1] + indz = np.where( ROI == i)[0] + c = '%i'%i + y_val = int( indz.mean() ) + x_val = int( ind.mean() ) + #print (xval, y) + axes.text(x_val, y_val, c, color='b',va='center', ha='center') + if show_ang_cor: + axes.text(-0.0, 0.5, '-/+180' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) + axes.text(1.0, 0.5, '0' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) + axes.text(0.5, -0.0, '-90'+ r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) + axes.text(0.5, 1.0, '90' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) + + axes.set_aspect(aspect) + #fig.colorbar(im_label) + if show_colorbar: + if not show_roi_edge: + fig.colorbar(im) + if save: + fp = path + "%s_ROI_on_Image"%uid + '.png' + plt.savefig( fp, dpi=fig.dpi) + #plt.show() + if return_fig: + return fig, axes, im + + + + +def crop_image( image, crop_mask ): + + ''' Crop the non_zeros pixels of an image to a new image + + + ''' + from skimage.util import crop, pad + pxlst = np.where(crop_mask.ravel())[0] + dims = crop_mask.shape + imgwidthy = dims[1] #dimension in y, but in plot being x + imgwidthx = dims[0] #dimension in x, but in plot being y + #x and y are flipped??? + #matrix notation!!! + pixely = pxlst%imgwidthy + pixelx = pxlst//imgwidthy + + minpixelx = np.min(pixelx) + minpixely = np.min(pixely) + maxpixelx = np.max(pixelx) + maxpixely = np.max(pixely) + crops = crop_mask*image + img_crop = crop( crops, ((minpixelx, imgwidthx - maxpixelx -1 ), + (minpixely, imgwidthy - maxpixely -1 )) ) + return img_crop + + +def get_avg_img( data_series, img_samp_index=None, sampling = 100, plot_ = False , save=False, *argv,**kwargs): + '''Get average imagef from a data_series by every sampling number to save time''' + if img_samp_index == None: + avg_img = np.average(data_series[:: sampling], axis=0) + else: + avg_img = np.zeros_like( data_series[0] ) + n=0 + for i in img_samp_index: + avg_img += data_series[i] + n +=1 + avg_img = np.array( avg_img) / n + + if plot_: + fig, ax = plt.subplots() + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + + im = ax.imshow(avg_img , cmap='viridis',origin='lower', + norm= LogNorm(vmin=0.001, vmax=1e2)) + #ax.set_title("Masked Averaged Image") + ax.set_title('uid= %s--Masked Averaged Image'%uid) + fig.colorbar(im) + + if save: + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs['path'] + if 'uid' in kwargs: + uid = kwargs['uid'] + else: + uid = 'uid' + #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-"%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + #plt.show() + + return avg_img + + + +def check_ROI_intensity( avg_img, ring_mask, ring_number=3 , save=False, plot=True, *argv,**kwargs): + + """plot intensity versus pixel of a ring + Parameters + ---------- + avg_img: 2D-array, the image + ring_mask: 2D-array + ring_number: which ring to plot + + Returns + ------- + + + """ + #print('here') + + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + pixel = roi.roi_pixel_values(avg_img, ring_mask, [ring_number] ) + + if plot: + fig, ax = plt.subplots() + ax.set_title('%s--check-RIO-%s-intensity'%(uid, ring_number) ) + ax.plot( pixel[0][0] ,'bo', ls='-' ) + ax.set_ylabel('Intensity') + ax.set_xlabel('pixel') + if save: + path = kwargs['path'] + fp = path + "%s_Mean_intensity_of_one_ROI"%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + if save: + path = kwargs['path'] + save_lists( [range( len( pixel[0][0] )), pixel[0][0]], label=['pixel_list', 'roi_intensity'], + filename="%s_Mean_intensity_of_one_ROI"%uid, path= path) + #plt.show() + return pixel[0][0] + +#from tqdm import tqdm + +def cal_g2( image_series, ring_mask, bad_image_process, + bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None ): + '''calculation g2 by using a multi-tau algorithm''' + + noframes = len( image_series) # number of frames, not "no frames" + #num_buf = 8 # number of buffers + + if bad_image_process: + import skbeam.core.mask as mask_image + bad_img_list = np.array( bad_frame_list) - good_start + new_imgs = mask_image.bad_to_nan_gen( image_series, bad_img_list) + + if num_lev == None: + num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) + print ('%s frames will be processed...'%(noframes)) + print( 'Bad Frames involved!') + + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm( new_imgs) ) + print( 'G2 calculation DONE!') + + else: + + if num_lev == None: + num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) + print ('%s frames will be processed...'%(noframes)) + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(image_series) ) + print( 'G2 calculation DONE!') + + return g2, lag_steps + + + +def run_time(t0): + '''Calculate running time of a program + Parameters + ---------- + t0: time_string, t0=time.time() + The start time + Returns + ------- + Print the running time + + One usage + --------- + t0=time.time() + .....(the running code) + run_time(t0) + ''' + + elapsed_time = time.time() - t0 + if elapsed_time<60: + print ('Total time: %.3f sec' %(elapsed_time )) + else: + print ('Total time: %.3f min' %(elapsed_time/60.)) + + +def trans_data_to_pd(data, label=None,dtype='array'): + ''' + convert data into pandas.DataFrame + Input: + data: list or np.array + label: the coloum label of the data + dtype: list or array [[NOT WORK or dict (for dict only save the scalar not arrays values)]] + Output: + a pandas.DataFrame + ''' + #lists a [ list1, list2...] all the list have the same length + from numpy import arange,array + import pandas as pd,sys + if dtype == 'list': + data=array(data).T + N,M=data.shape + elif dtype == 'array': + data=array(data) + N,M=data.shape + else: + print("Wrong data type! Now only support 'list' and 'array' tpye") + + + index = arange( N ) + if label == None:label=['data%s'%i for i in range(M)] + #print label + df = pd.DataFrame( data, index=index, columns= label ) + return df + + +def save_lists( data, label=None, filename=None, path=None, return_res = False, verbose=False): + ''' + save_lists( data, label=None, filename=None, path=None) + + save lists to a CSV file with filename in path + Parameters + ---------- + data: list + label: the column name, the length should be equal to the column number of list + filename: the filename to be saved + path: the filepath to be saved + + Example: + save_arrays( [q,iq], label= ['q_A-1', 'Iq'], filename='uid=%s-q-Iq'%uid, path= data_dir ) + ''' + + M,N = len(data[0]),len(data) + d = np.zeros( [N,M] ) + for i in range(N): + d[i] = data[i] + + df = trans_data_to_pd(d.T, label, 'array') + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + if filename == None: + filename = 'data' + filename = os.path.join(path, filename )#+'.csv') + df.to_csv(filename) + if verbose: + print('The data was saved in: %s.'%filename) + if return_res: + return df + +def get_pos_val_overlap( p1, v1, p2,v2, Nl): + '''get the overlap of v1 and v2 + p1: the index of array1 in array with total length as Nl + v1: the corresponding value of p1 + p2: the index of array2 in array with total length as Nl + v2: the corresponding value of p2 + Return: + The values in v1 with the position in overlap of p1 and p2 + The values in v2 with the position in overlap of p1 and p2 + + An example: + Nl =10 + p1= np.array( [1,3,4,6,8] ) + v1 = np.array( [10,20,30,40,50]) + p2= np.array( [ 0,2,3,5,7,8]) + v2=np.array( [10,20,30,40,50,60,70]) + + get_pos_val_overlap( p1, v1, p2,v2, Nl) + + ''' + ind = np.zeros( Nl, dtype=np.int32 ) + ind[p1] = np.arange( len(p1) ) +1 + w2 = np.where( ind[p2] )[0] + w1 = ind[ p2[w2]] -1 + return v1[w1], v2[w2] + + + +def save_arrays( data, label=None, dtype='array', filename=None, path=None, return_res = False,verbose=False): + ''' + July 10, 2016, Y.G.@CHX + save_arrays( data, label=None, dtype='array', filename=None, path=None): + save data to a CSV file with filename in path + Parameters + ---------- + data: arrays + label: the column name, the length should be equal to the column number of data + dtype: array or list + filename: the filename to be saved + path: the filepath to be saved + + Example: + + save_arrays( qiq, label= ['q_A-1', 'Iq'], dtype='array', filename='uid=%s-q-Iq'%uid, path= data_dir ) + + + ''' + df = trans_data_to_pd(data, label,dtype) + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + if filename == None: + filename = 'data' + filename_ = os.path.join(path, filename)# +'.csv') + df.to_csv(filename_) + if verbose: + print( 'The file: %s is saved in %s'%(filename, path) ) + #print( 'The g2 of uid= %s is saved in %s with filename as g2-%s-%s.csv'%(uid, path, uid, CurTime)) + if return_res: + return df + +def cal_particle_g2( radius, viscosity, qr, taus, beta=0.2, T=298): + '''YG Dev Nov 20, 2017@CHX + calculate particle g2 fucntion by giving particle radius, Q , and solution viscosity using a simple + exponetional model + Input: + radius: m + qr, list, in A-1 + visocity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) + T: temperture, in K + e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: + 1.38064852*10**(-123)*298 / ( 6*np.pi * 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10**5 A2/s + taus: time + beta: contrast + + cal_particle_g2( radius=125 *10**(-9), qr=[0.01,0.015], viscosity= 8.9*1e-4) + + ''' + D0 = get_diffusion_coefficient( viscosity, radius, T=T) + g2_q1 = np.zeros(len(qr), dtype = object) + for i, q1 in enumerate(qr): + relaxation_rate = D0 * q1**2 + g2_q1[i] = simple_exponential( taus, beta=beta, relaxation_rate = relaxation_rate, baseline=1) + return g2_q1 + +def get_Reynolds_number( flow_rate, flow_radius, fluid_density, fluid_viscosity ): + '''May 10, 2019, Y.G.@CHX + get Reynolds_number , the ratio of the inertial to viscous forces, V*Dia*density/eta + Reynolds_number << 1000 gives a laminar flow + flow_rate: ul/s + flow_radius: mm + fluid_density: Kg/m^3 ( for water, 1000 Kg/m^3 = 1 g/cm^3 ) + fliud_viscosity: N*s/m^2 ( Kg /(s*m) ) + + return Reynolds_number + ''' + return flow_rate * 1e-6 * flow_radius * 1e-3 *2 * fluid_density/ fluid_viscosity + +def get_Deborah_number( flow_rate, beam_size, q_vector, diffusion_coefficient ): + '''May 10, 2019, Y.G.@CHX + get Deborah_number, the ratio of transit time to diffusion time, (V/beam_size)/ ( D*q^2) + flow_rate: ul/s + beam_size: ul + q_vector: A-1 + diffusion_coefficient: A^2/s + + return Deborah_number + ''' + return (flow_rate /beam_size) / ( diffusion_coefficient * q_vector**2 ) + + + +def get_viscosity( diffusion_coefficient , radius, T=298): + '''May 10, 2019, Y.G.@CHX + get visocity of a Brownian motion particle with radius in fuild with diffusion_coefficient + diffusion_coefficient in unit of A^2/s + radius: m + T: K + k: 1.38064852(79)*10**(−23) J/T, Boltzmann constant + + return visosity: N*s/m^2 (water at 25K = 8.9*10**(-4) ) + ''' + + k= 1.38064852*10**(-23) + return k*T / ( 6*np.pi* diffusion_coefficient * radius) * 10**20 + +def get_diffusion_coefficient( viscosity, radius, T=298): + '''July 10, 2016, Y.G.@CHX + get diffusion_coefficient of a Brownian motion particle with radius in fuild with visocity + viscosity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) + radius: m + T: K + k: 1.38064852(79)×10−23 J/T, Boltzmann constant + + return diffusion_coefficient in unit of A^2/s + e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: + 1.38064852*10**(−23) *298 / ( 6*np.pi* 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10^5 A2/s + + get_diffusion_coefficient( 0.20871, 250 *10**(-9), T=298) + + ''' + + k= 1.38064852*10**(-23) + return k*T / ( 6*np.pi* viscosity * radius) * 10**20 + + +def ring_edges(inner_radius, width, spacing=0, num_rings=None): + """ + Aug 02, 2016, Y.G.@CHX + ring_edges(inner_radius, width, spacing=0, num_rings=None) + + Calculate the inner and outer radius of a set of rings. + + The number of rings, their widths, and any spacing between rings can be + specified. They can be uniform or varied. + + LW 04/02/2024: fixed checking whether width and spacing are iterable + + Parameters + ---------- + inner_radius : float + inner radius of the inner-most ring + + width : float or list of floats + ring thickness + If a float, all rings will have the same thickness. + + spacing : float or list of floats, optional + margin between rings, 0 by default + If a float, all rings will have the same spacing. If a list, + the length of the list must be one less than the number of + rings. + + num_rings : int, optional + number of rings + Required if width and spacing are not lists and number + cannot thereby be inferred. If it is given and can also be + inferred, input is checked for consistency. + + Returns + ------- + edges : array + inner and outer radius for each ring + + Example + ------- + # Make two rings starting at r=1px, each 5px wide + >>> ring_edges(inner_radius=1, width=5, num_rings=2) + [(1, 6), (6, 11)] + # Make three rings of different widths and spacings. + # Since the width and spacings are given individually, the number of + # rings here is simply inferred. + >>> ring_edges(inner_radius=1, width=(5, 4, 3), spacing=(1, 2)) + [(1, 6), (7, 11), (13, 16)] + + """ + # All of this input validation merely checks that width, spacing, and + # num_rings are self-consistent and complete. + try: + iter(width) + width_is_list=True + except: width_is_list=False + try: + iter(spacing) + spacing_is_list=True + except: spacing_is_list=False + + # width_is_list = isinstance(width, collections.Iterable) + # spacing_is_list = isinstance(spacing, collections.Iterable) + if (width_is_list and spacing_is_list): + if len(width) != len(spacing) + 1: + raise ValueError("List of spacings must be one less than list " + "of widths.") + if num_rings == None: + try: + num_rings = len(width) + except TypeError: + try: + num_rings = len(spacing) + 1 + except TypeError: + raise ValueError("Since width and spacing are constant, " + "num_rings cannot be inferred and must be " + "specified.") + else: + if width_is_list: + if num_rings != len(width): + raise ValueError("num_rings does not match width list") + if spacing_is_list: + if num_rings-1 != len(spacing): + raise ValueError("num_rings does not match spacing list") + # Now regularlize the input. + if not width_is_list: + width = np.ones(num_rings) * width + + if spacing == None: + spacing = [] + else: + if not spacing_is_list: + spacing = np.ones(num_rings - 1) * spacing + # The inner radius is the first "spacing." + all_spacings = np.insert(spacing, 0, inner_radius) + steps = np.array([all_spacings, width]).T.ravel() + edges = np.cumsum(steps).reshape(-1, 2) + return edges + + + +def get_non_uniform_edges( centers, width = 4, number_rings=1, spacing=0, ): + ''' + YG CHX Spe 6 + get_non_uniform_edges( centers, width = 4, number_rings=3 ) + + Calculate the inner and outer radius of a set of non uniform distributed + rings by giving ring centers + For each center, there are number_rings with each of width + + LW 04/02/2024: fixed checking whether 'width' is iterable + + Parameters + ---------- + centers : float + the center of the rings + + width : float or list of floats + ring thickness + If a float, all rings will have the same thickness. + + num_rings : int, optional + number of rings + Required if width and spacing are not lists and number + cannot thereby be inferred. If it is given and can also be + inferred, input is checked for consistency. + + Returns + ------- + edges : array + inner and outer radius for each ring + ''' + + if number_rings == None: + number_rings = 1 + edges = np.zeros( [len(centers)*number_rings, 2] ) + + try: + iter(width) + except: + width = np.ones_like( centers ) * width + for i, c in enumerate(centers): + edges[i*number_rings:(i+1)*number_rings,:] = ring_edges( inner_radius = c - width[i]*number_rings/2, + width= width[i], spacing= spacing, num_rings=number_rings) + return edges + + +def trans_tf_to_td(tf, dtype = 'dframe'): + '''July 02, 2015, Y.G.@CHX + Translate epoch time to string + ''' + import pandas as pd + import numpy as np + from datetime import datetime + '''translate time.float to time.date, + td.type dframe: a dataframe + td.type list, a list + ''' + if dtype == 'dframe':ind = tf.index + else:ind = range(len(tf)) + td = np.array([ datetime.fromtimestamp(tf[i]) for i in ind ]) + return td + + + +def trans_td_to_tf(td, dtype = 'dframe'): + '''July 02, 2015, Y.G.@CHX + Translate string to epoch time + + ''' + import time + import numpy as np + '''translate time.date to time.float, + td.type dframe: a dataframe + td.type list, a list + ''' + if dtype == 'dframe':ind = td.index + else:ind = range(len(td)) + #tf = np.array([ time.mktime(td[i].timetuple()) for i in range(len(td)) ]) + tf = np.array([ time.mktime(td[i].timetuple()) for i in ind]) + return tf + + + +def get_averaged_data_from_multi_res( multi_res, keystr='g2', different_length= True, verbose=False, + cal_errorbar=False): + '''Y.G. Dec 22, 2016 + get average data from multi-run analysis result + Parameters: + multi_res: dict, generated by function run_xpcs_xsvs_single + each key is a uid, inside each uid are also dict with key as 'g2','g4' et.al. + keystr: string, get the averaged keystr + different_length: if True, do careful average for different length results + return: + array, averaged results + + ''' + maxM = 0 + mkeys = multi_res.keys() + if not different_length: + n=0 + for i, key in enumerate( list( mkeys) ): + keystri = multi_res[key][keystr] + if i ==0: + keystr_average = keystri + else: + keystr_average += keystri + n +=1 + keystr_average /=n + + else: + length_dict = {} + D= 1 + for i, key in enumerate( list( mkeys) ): + if verbose: + print(i,key) + shapes = multi_res[key][keystr].shape + M=shapes[0] + if i ==0: + if len(shapes)==2: + D=2 + maxN = shapes[1] + elif len(shapes)==3: + D=3 + maxN = shapes[2] #in case of two-time correlation + if (M) not in length_dict: + length_dict[(M) ] =1 + else: + length_dict[(M) ] += 1 + maxM = max( maxM, M ) + #print( length_dict ) + avg_count = {} + sk = np.array( sorted(length_dict) ) + for i, k in enumerate( sk ): + avg_count[k] = np.sum( np.array( [ length_dict[k] for k in sk[i:] ] ) ) + #print(length_dict, avg_count) + if D==2: + #print('here') + keystr_average = np.zeros( [maxM, maxN] ) + elif D==3: + keystr_average = np.zeros( [maxM, maxM, maxN ] ) + else: + keystr_average = np.zeros( [maxM] ) + for i, key in enumerate( list( mkeys) ): + keystri = multi_res[key][keystr] + Mi = keystri.shape[0] + if D!=3: + keystr_average[:Mi] += keystri + else: + keystr_average[:Mi,:Mi,:] += keystri + if D!=3: + keystr_average[:sk[0]] /= avg_count[sk[0]] + else: + keystr_average[:sk[0],:sk[0], : ] /= avg_count[sk[0]] + for i in range( 0, len(sk)-1 ): + if D!=3: + keystr_average[sk[i]:sk[i+1]] /= avg_count[sk[i+1]] + else: + keystr_average[sk[i]:sk[i+1],sk[i]:sk[i+1],:] /= avg_count[sk[i+1]] + + return keystr_average + + +def save_g2_general( g2, taus, qr=None, qz=None, uid='uid', path=None, return_res= False ): + + '''Y.G. Dec 29, 2016 + + save g2 results, + res_pargs should contain + g2: one-time correlation function + taus, lags of g2 + qr: the qr center, same length as g2 + qz: the qz or angle center, same length as g2 + path: + uid: + + ''' + + df = DataFrame( np.hstack( [ (taus).reshape( len(g2),1) , g2] ) ) + t,qs = g2.shape + if qr is None: + qr = range( qs ) + if qz is None: + df.columns = ( ['tau'] + [str(qr_) for qr_ in qr ] ) + else: + df.columns = ( ['tau'] + [ str(qr_) +'_'+ str(qz_) for (qr_,qz_) in zip(qr,qz) ] ) + + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + + #if filename is None: + + filename = uid + #filename = 'uid=%s--g2.csv' % (uid) + #filename += '-uid=%s-%s.csv' % (uid,CurTime) + #filename += '-uid=%s.csv' % (uid) + filename1 = os.path.join(path, filename) + df.to_csv(filename1) + print( 'The correlation function is saved in %s with filename as %s'%( path, filename)) + if return_res: + return df + + +########### +#*for g2 fit and plot + +def stretched_auto_corr_scat_factor(x, beta, relaxation_rate, alpha=1.0, baseline=1): + return beta * np.exp(-2 * (relaxation_rate * x)**alpha ) + baseline + +def simple_exponential(x, beta, relaxation_rate, baseline=1): + '''relation_rate: unit 1/s ''' + return beta * np.exp(-2 * relaxation_rate * x) + baseline + + +def simple_exponential_with_vibration(x, beta, relaxation_rate, freq, amp, baseline=1): + return beta * (1 + amp*np.cos( 2*np.pi*freq* x) )* np.exp(-2 * relaxation_rate * x) + baseline + +def stretched_auto_corr_scat_factor_with_vibration(x, beta, relaxation_rate, alpha, freq, amp, baseline=1): + return beta * (1 + amp*np.cos( 2*np.pi*freq* x) )* np.exp(-2 * (relaxation_rate * x)**alpha ) + baseline + + +def flow_para_function_with_vibration( x, beta, relaxation_rate, flow_velocity, freq, amp, baseline=1): + vibration_part = (1 + amp*np.cos( 2*np.pi*freq* x) ) + Diff_part= np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 + return beta* vibration_part* Diff_part * Flow_part + baseline + +def flow_para_function( x, beta, relaxation_rate, flow_velocity, baseline=1): + '''flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )''' + + Diff_part= np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 + return beta*Diff_part * Flow_part + baseline + + +def flow_para_function_explicitq( x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0 ): + '''Nov 9, 2017 Basically, make q vector to (qr, angle), + ###relaxation_rate is actually a diffusion rate + flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) + Diffusion part: np.exp( -2*D q^2 *tau ) + q_ang: would be np.radians( ang - 90 ) + + ''' + + Diff_part= np.exp(-2 * ( diffusion* qr**2 * x)**alpha ) + if flow_velocity !=0: + if np.cos( q_ang ) >= 1e-8: + Flow_part = np.pi**2/(16*x*flow_velocity*qr* abs(np.cos(q_ang)) ) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity * qr* abs(np.cos(q_ang)) ) ) )**2 + else: + Flow_part = 1 + else: + Flow_part = 1 + return beta*Diff_part * Flow_part + baseline + + + +def get_flow_velocity( average_velocity, shape_factor): + + return average_velocity * (1- shape_factor)/(1+ shape_factor) + +def stretched_flow_para_function( x, beta, relaxation_rate, alpha, flow_velocity, baseline=1): + ''' + flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) + ''' + Diff_part= np.exp(-2 * (relaxation_rate * x)**alpha ) + Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 + return beta*Diff_part * Flow_part + baseline + + +def get_g2_fit_general_two_steps( g2, taus, function='simple_exponential', + second_fit_range=[0,20], + sequential_fit=False, *argv,**kwargs): + ''' + Fit g2 in two steps, + i) Using the "function" to fit whole g2 to get baseline and beta (contrast) + ii) Then using the obtained baseline and beta to fit g2 in a "second_fit_range" by using simple_exponential function + ''' + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, function, sequential_fit, *argv,**kwargs) + guess_values = {} + for k in list (g2_fit_result[0].params.keys()): + guess_values[k] = np.array( [ g2_fit_result[i].params[k].value + for i in range( g2.shape[1] ) ]) + + if 'guess_limits' in kwargs: + guess_limits = kwargs['guess_limits'] + else: + guess_limits = dict( baseline =[1, 1.8], alpha=[0, 2], + beta = [0., 1], relaxation_rate= [0.001, 10000]) + + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, function ='simple_exponential', + sequential_fit= sequential_fit, fit_range=second_fit_range, + fit_variables={'baseline':False, 'beta': False, 'alpha':False,'relaxation_rate':True}, + guess_values= guess_values, guess_limits = guess_limits ) + + return g2_fit_result, taus_fit, g2_fit + + +def get_g2_fit_general( g2, taus, function='simple_exponential', + sequential_fit=False, qval_dict = None, + ang_init = 90, *argv,**kwargs): + ''' + Nov 9, 2017, give qval_dict for using function of flow_para_function_explicitq + qval_dict: a dict with qr and ang (in unit of degrees).") + + + Dec 29,2016, Y.G.@CHX + + Fit one-time correlation function + + The support functions include simple exponential and stretched/compressed exponential + Parameters + ---------- + g2: one-time correlation function for fit, with shape as [taus, qs] + taus: the time delay + sequential_fit: if True, will use the low-q fit result as initial value to fit the higher Qs + function: + supported function include: + 'simple_exponential' (or 'simple'): fit by a simple exponential function, defined as + beta * np.exp(-2 * relaxation_rate * lags) + baseline + 'streched_exponential'(or 'streched'): fit by a streched exponential function, defined as + beta * ( np.exp( -2 * ( relaxation_rate * tau )**alpha ) + baseline + 'stretched_vibration': fit by a streched exponential function with vibration, defined as + beta * (1 + amp*np.cos( 2*np.pi*60* x) )* np.exp(-2 * (relaxation_rate * x)**alpha) + baseline + 'flow_para_function' (or flow): fit by a flow function + + + kwargs: + could contains: + 'fit_variables': a dict, for vary or not, + keys are fitting para, including + beta, relaxation_rate , alpha ,baseline + values: a False or True, False for not vary + 'guess_values': a dict, for initial value of the fitting para, + the defalut values are + dict( beta=.1, alpha=1.0, relaxation_rate =0.005, baseline=1.0) + + 'guess_limits': a dict, for the limits of the fittting para, for example: + dict( beta=[0, 10],, alpha=[0,100] ) + the default is: + dict( baseline =[0.5, 2.5], alpha=[0, inf] ,beta = [0, 1], relaxation_rate= [0.0,1000] ) + Returns + ------- + fit resutls: a instance in limfit + tau_fit + fit_data by the model, it has the q number of g2 + + an example: + fit_g2_func = 'stretched' + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, + function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, + fit_variables={'baseline':True, 'beta':True, 'alpha':True,'relaxation_rate':True}, + guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,}) + + g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_g2_fit_paras.csv', path=data_dir ) + + + ''' + + if 'fit_range' in kwargs.keys(): + fit_range = kwargs['fit_range'] + else: + fit_range=None + + + num_rings = g2.shape[1] + if 'fit_variables' in kwargs: + additional_var = kwargs['fit_variables'] + _vars =[ k for k in list( additional_var.keys()) if additional_var[k] == False] + else: + _vars = [] + if function=='simple_exponential' or function=='simple': + _vars = np.unique ( _vars + ['alpha']) + mod = Model(stretched_auto_corr_scat_factor)#, independent_vars= list( _vars) ) + elif function=='stretched_exponential' or function=='stretched': + mod = Model(stretched_auto_corr_scat_factor)#, independent_vars= _vars) + elif function=='stretched_vibration': + mod = Model(stretched_auto_corr_scat_factor_with_vibration)#, independent_vars= _vars) + elif function=='flow_para_function' or function=='flow_para': + mod = Model(flow_para_function)#, independent_vars= _vars) + elif function=='flow_para_function_explicitq' or function=='flow_para_qang': + mod = Model(flow_para_function_explicitq)#, independent_vars= _vars) + elif function=='flow_para_function_with_vibration' or function=='flow_vibration': + mod = Model( flow_para_function_with_vibration ) + + else: + print ("The %s is not supported.The supported functions include simple_exponential and stretched_exponential"%function) + + mod.set_param_hint( 'baseline', min=0.5, max= 2.5 ) + mod.set_param_hint( 'beta', min=0.0, max=1.0 ) + mod.set_param_hint( 'alpha', min=0.0 ) + mod.set_param_hint( 'relaxation_rate', min=0.0, max= 1000 ) + mod.set_param_hint( 'flow_velocity', min=0) + mod.set_param_hint( 'diffusion', min=0.0, max= 2e8 ) + + if 'guess_limits' in kwargs: + guess_limits = kwargs['guess_limits'] + for k in list( guess_limits.keys() ): + mod.set_param_hint( k, min= guess_limits[k][0], max= guess_limits[k][1] ) + + if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration': + mod.set_param_hint( 'flow_velocity', min=0) + if function=='flow_para_function_explicitq' or function=='flow_para_qang': + mod.set_param_hint( 'flow_velocity', min=0) + mod.set_param_hint( 'diffusion', min=0.0, max= 2e8 ) + if function=='stretched_vibration' or function=='flow_vibration': + mod.set_param_hint( 'freq', min=0) + mod.set_param_hint( 'amp', min=0) + + _guess_val = dict( beta=.1, alpha=1.0, relaxation_rate =0.005, baseline=1.0) + if 'guess_values' in kwargs: + guess_values = kwargs['guess_values'] + _guess_val.update( guess_values ) + + _beta=_guess_val['beta'] + _alpha=_guess_val['alpha'] + _relaxation_rate = _guess_val['relaxation_rate'] + _baseline= _guess_val['baseline'] + if isinstance( _beta, (np.ndarray, list) ): + _beta_=_beta[0] + else: + _beta_=_beta + if isinstance( _baseline, (np.ndarray, list) ): + _baseline_ = _baseline[0] + else: + _baseline_ = _baseline + if isinstance( _relaxation_rate, (np.ndarray, list) ): + _relaxation_rate_= _relaxation_rate[0] + else: + _relaxation_rate_= _relaxation_rate + if isinstance( _alpha, (np.ndarray, list) ): + _alpha_ = _alpha[0] + else: + _alpha_ = _alpha + pars = mod.make_params( beta=_beta_, alpha=_alpha_, + relaxation_rate =_relaxation_rate_, baseline= _baseline_) + + if function=='flow_para_function' or function=='flow_para': + _flow_velocity =_guess_val['flow_velocity'] + if isinstance( _flow_velocity, (np.ndarray, list) ): + _flow_velocity_ = _flow_velocity[0] + else: + _flow_velocity_ = _flow_velocity + pars = mod.make_params( beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_, + relaxation_rate =_relaxation_rate_, baseline= _baseline_) + + if function=='flow_para_function_explicitq' or function=='flow_para_qang': + _flow_velocity =_guess_val['flow_velocity'] + _diffusion =_guess_val['diffusion'] + _guess_val['qr'] = 1 + _guess_val['q_ang'] = 0 + if isinstance( _flow_velocity, (np.ndarray, list) ): + _flow_velocity_ = _flow_velocity[0] + else: + _flow_velocity_ = _flow_velocity + if isinstance( _diffusion, (np.ndarray, list) ): + _diffusion_ = _diffusion[0] + else: + _diffusion_ = _diffusion + pars = mod.make_params( beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_, + diffusion =_diffusion_, baseline= _baseline_, + qr=1, q_ang=0 + ) + + if function=='stretched_vibration': + _freq =_guess_val['freq'] + _amp = _guess_val['amp'] + pars = mod.make_params( beta=_beta, alpha=_alpha, freq=_freq, amp = _amp, + relaxation_rate =_relaxation_rate, baseline= _baseline) + + if function=='flow_vibration': + _flow_velocity =_guess_val['flow_velocity'] + _freq =_guess_val['freq'] + _amp = _guess_val['amp'] + pars = mod.make_params( beta=_beta, freq=_freq, amp = _amp,flow_velocity=_flow_velocity, + relaxation_rate =_relaxation_rate, baseline= _baseline) + for v in _vars: + pars['%s'%v].vary = False + #print( pars ) + fit_res = [] + model_data = [] + for i in range(num_rings): + if fit_range != None: + y_=g2[1:, i][fit_range[0]:fit_range[1]] + lags_=taus[1:][fit_range[0]:fit_range[1]] + else: + y_=g2[1:, i] + lags_=taus[1:] + + mm = ~np.isnan(y_) + y = y_[mm] + lags = lags_[mm] + #print( i, mm.shape, y.shape, y_.shape, lags.shape, lags_.shape ) + #y=y_ + #lags=lags_ + #print( _relaxation_rate ) + for k in list(pars.keys()): + #print(k, _guess_val[k] ) + try: + if isinstance( _guess_val[k], (np.ndarray, list) ): + pars[k].value = _guess_val[k][i] + except: + pass + + if True: + if isinstance( _beta, (np.ndarray, list) ): + #pars['beta'].value = _guess_val['beta'][i] + _beta_ = _guess_val['beta'][i] + if isinstance( _baseline, (np.ndarray, list) ): + #pars['baseline'].value = _guess_val['baseline'][i] + _baseline_ = _guess_val['baseline'][i] + if isinstance( _relaxation_rate, (np.ndarray, list) ): + #pars['relaxation_rate'].value = _guess_val['relaxation_rate'][i] + _relaxation_rate_ = _guess_val['relaxation_rate'][i] + if isinstance( _alpha, (np.ndarray, list) ): + #pars['alpha'].value = _guess_val['alpha'][i] + _alpha_ = _guess_val['alpha'][i] + #for k in list(pars.keys()): + #print(k, _guess_val[k] ) + # pars[k].value = _guess_val[k][i] + if function=='flow_para_function_explicitq' or function=='flow_para_qang': + if qval_dict == None: + print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") + else: + + pars = mod.make_params( + beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_, + diffusion =_diffusion_, baseline= _baseline_, + qr = qval_dict[i][0], q_ang = abs(np.radians( qval_dict[i][1] - ang_init) ) ) + + + pars['qr'].vary = False + pars['q_ang'].vary = False + for v in _vars: + pars['%s'%v].vary = False + + #if i==20: + # print(pars) + #print( pars ) + result1 = mod.fit(y, pars, x =lags ) + #print(qval_dict[i][0], qval_dict[i][1], y) + if sequential_fit: + for k in list(pars.keys()): + #print( pars ) + if k in list(result1.best_values.keys()): + pars[k].value = result1.best_values[k] + fit_res.append( result1) + #model_data.append( result1.best_fit ) + yf=result1.model.eval(params=result1.params, x= lags_ ) + model_data.append( yf ) + return fit_res, lags_, np.array( model_data ).T + + + + +def get_short_long_labels_from_qval_dict(qval_dict, geometry='saxs'): + '''Y.G. 2016, Dec 26 + Get short/long labels from a qval_dict + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + geometry: + 'saxs': a saxs with Qr partition + 'ang_saxs': a saxs with Qr and angular partition + 'gi_saxs': gisaxs with Qz, Qr + ''' + + Nqs = len( qval_dict.keys()) + len_qrz = len( list( qval_dict.values() )[0] ) + #qr_label = sorted( np.array( list( qval_dict.values() ) )[:,0] ) + qr_label = np.array( list( qval_dict.values() ) )[:,0] + if geometry=='gi_saxs' or geometry=='ang_saxs':# or geometry=='gi_waxs': + if len_qrz < 2: + print( "please give qz or qang for the q-label") + else: + #qz_label = sorted( np.array( list( qval_dict.values() ) )[:,1] ) + qz_label = np.array( list( qval_dict.values() ) )[:,1] + else: + qz_label = np.array( [0] ) + + uqz_label = np.unique( qz_label ) + num_qz = len( uqz_label) + + uqr_label = np.unique( qr_label ) + num_qr = len( uqr_label) + + #print( uqr_label, uqz_label ) + if len( uqr_label ) >= len( uqz_label ): + master_plot= 'qz' #one qz for many sub plots of each qr + else: + master_plot= 'qr' + + mastp= master_plot + if geometry == 'ang_saxs': + mastp= 'ang' + num_short = min(num_qz, num_qr) + num_long = max(num_qz, num_qr) + + #print( mastp, num_short, num_long) + if num_qz != num_qr: + short_label = [qz_label,qr_label][ np.argmin( [num_qz, num_qr] ) ] + long_label = [qz_label,qr_label][ np.argmax( [num_qz, num_qr] ) ] + short_ulabel = [uqz_label,uqr_label][ np.argmin( [num_qz, num_qr] ) ] + long_ulabel = [uqz_label,uqr_label][ np.argmax( [num_qz, num_qr] ) ] + else: + short_label = qz_label + long_label = qr_label + short_ulabel = uqz_label + long_ulabel = uqr_label + #print( long_ulabel ) + #print( qz_label,qr_label ) + #print( short_label, long_label ) + + if geometry == 'saxs' or geometry == 'gi_waxs': + ind_long = [ range( num_long ) ] + else: + ind_long = [ np.where( short_label == i)[0] for i in short_ulabel ] + + + if Nqs == 1: + long_ulabel = list( qval_dict.values() )[0] + long_label = list( qval_dict.values() )[0] + return qr_label, qz_label, num_qz, num_qr, num_short,num_long, short_label, long_label,short_ulabel,long_ulabel, ind_long, master_plot, mastp + + +############################################ +##a good func to plot g2 for all types of geogmetries +############################################ + + + + +def plot_g2_general( g2_dict, taus_dict, qval_dict, g2_err_dict = None, + fit_res=None, geometry='saxs',filename='g2', + path=None, function='simple_exponential', g2_labels=None, + fig_ysize= 12, qth_interest = None, + ylabel='g2', return_fig=False, append_name='', outsize=(2000, 2400), + max_plotnum_fig=16, figsize=(10, 12), show_average_ang_saxs=True, + qphi_analysis = False, fontsize_sublabel = 12, + *argv,**kwargs): + ''' + Jan 10, 2018 add g2_err_dict option to plot g2 with error bar + Oct31, 2017 add qth_interest option + + Dec 26,2016, Y.G.@CHX + + Plot one/four-time correlation function (with fit) for different geometry + + The support functions include simple exponential and stretched/compressed exponential + Parameters + ---------- + g2_dict: dict, format as {1: g2_1, 2: g2_2, 3: g2_3...} one-time correlation function, g1,g2, g3,...must have the same shape + taus_dict, dict, format {1: tau_1, 2: tau_2, 3: tau_3...}, tau1,tau2, tau3,...must have the same shape + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + + fit_res: give all the fitting parameters for showing in the plot + qth_interest: if not None: should be a list, and will only plot the qth_interest qs + filename: for the title of plot + append_name: if not None, will save as filename + append_name as filename + path: the path to save data + outsize: for gi/ang_saxs, will combine all the different qz images together with outsize + function: + 'simple_exponential': fit by a simple exponential function, defined as + beta * np.exp(-2 * relaxation_rate * lags) + baseline + 'streched_exponential': fit by a streched exponential function, defined as + beta * (np.exp(-2 * relaxation_rate * lags))**alpha + baseline + geometry: + 'saxs': a saxs with Qr partition + 'ang_saxs': a saxs with Qr and angular partition + 'gi_saxs': gisaxs with Qz, Qr + + one_plot: if True, plot all images in one pannel + kwargs: + + Returns + ------- + None + + ToDoList: plot an average g2 for ang_saxs for each q + + ''' + + if ylabel=='g2': + ylabel='g_2' + if ylabel=='g4': + ylabel='g_4' + + if geometry =='saxs': + if qphi_analysis: + geometry = 'ang_saxs' + if qth_interest != None: + if not isinstance(qth_interest, list): + print('Please give a list for qth_interest') + else: + #g2_dict0, taus_dict0, qval_dict0, fit_res0= g2_dict, taus_dict, qval_dict, fit_res + qth_interest = np.array( qth_interest ) -1 + g2_dict_ = {} + #taus_dict_ = {} + for k in list(g2_dict.keys()): + g2_dict_[k] = g2_dict[k][:,[i for i in qth_interest]] + #for k in list(taus_dict.keys()): + # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] + taus_dict_ = taus_dict + qval_dict_ = {k:qval_dict[k] for k in qth_interest} + if fit_res != None: + fit_res_ = [ fit_res[k] for k in qth_interest ] + else: + fit_res_ = None + else: + g2_dict_, taus_dict_, qval_dict_, fit_res_ = g2_dict, taus_dict, qval_dict, fit_res + + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + fps = [] + + #$print( num_short, num_long ) + + for s_ind in range( num_short ): + ind_long_i = ind_long[ s_ind ] + num_long_i = len( ind_long_i ) + #if show_average_ang_saxs: + # if geometry=='ang_saxs': + # num_long_i += 1 + if RUN_GUI: + fig = Figure(figsize=(10, 12)) + else: + #fig = plt.figure( ) + if num_long_i <=4: + if master_plot != 'qz': + fig = plt.figure(figsize=(8, 6)) + else: + if num_short>1: + fig = plt.figure(figsize=(8, 4)) + else: + fig = plt.figure(figsize=(10, 6)) + #print('Here') + elif num_long_i > max_plotnum_fig: + num_fig = int(np.ceil(num_long_i/max_plotnum_fig)) #num_long_i //16 + fig = [ plt.figure(figsize=figsize) for i in range(num_fig) ] + #print( figsize ) + else: + #print('Here') + if master_plot != 'qz': + fig = plt.figure(figsize=figsize) + else: + fig = plt.figure(figsize=(10, 10)) + + if master_plot == 'qz': + if geometry=='ang_saxs': + title_short = 'Angle= %.2f'%( short_ulabel[s_ind] ) + r'$^\circ$' + elif geometry=='gi_saxs': + title_short = r'$Q_z= $' + '%.4f'%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' + else: + title_short = '' + else: #qr + if geometry=='ang_saxs' or geometry=='gi_saxs': + title_short = r'$Q_r= $' + '%.5f '%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' + else: + title_short='' + #print(geometry) + #filename ='' + til = '%s:--->%s'%(filename, title_short ) + if num_long_i <=4: + plt.title( til,fontsize= 14, y =1.15) + #plt.title( til,fontsize=20, y =1.06) + #print('here') + else: + plt.title( til,fontsize=20, y =1.06) + #print( num_long ) + if num_long!=1: + #print( 'here') + plt.axis('off') + #sy = min(num_long_i,4) + sy = min(num_long_i, int( np.ceil( min(max_plotnum_fig,num_long_i)/4)) ) + #fig.set_size_inches(10, 12) + #fig.set_size_inches(10, fig_ysize ) + else: + sy =1 + #fig.set_size_inches(8,6) + #plt.axis('off') + sx = min(4, int( np.ceil( min(max_plotnum_fig,num_long_i)/float(sy) ) )) + + temp = sy + sy = sx + sx = temp + + #print( num_long_i, sx, sy ) + #print( master_plot ) + #print(ind_long_i, len(ind_long_i) ) + + for i, l_ind in enumerate( ind_long_i ): + if num_long_i <= max_plotnum_fig: + #if s_ind ==2: + # print('Here') + # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) + ax = fig.add_subplot(sx,sy, i + 1 ) + if sx==1: + if sy==1: + plt.axis('on') + else: + #fig_subnum = l_ind//max_plotnum_fig + #ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) + fig_subnum = i//max_plotnum_fig + #print( i, sx,sy, fig_subnum, max_plotnum_fig, i + 1 - fig_subnum*max_plotnum_fig ) + ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) + + + ax.set_ylabel( r"$%s$"%ylabel + '(' + r'$\tau$' + ')' ) + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + if master_plot == 'qz' or master_plot == 'angle': + if geometry!='gi_waxs': + title_long = r'$Q_r= $'+'%.5f '%( long_label[l_ind] ) + r'$\AA^{-1}$' + else: + title_long = r'$Q_r= $'+'%i '%( long_label[l_ind] ) + #print( title_long,long_label,l_ind ) + else: + if geometry=='ang_saxs': + #title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) + r'$^\circ$' + '( %d )'%(l_ind) + title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) #+ r'$^\circ$' + '( %d )'%(l_ind) + elif geometry=='gi_saxs': + title_long = r'$Q_z= $'+ '%.5f '%( long_label[l_ind] ) + r'$\AA^{-1}$' + else: + title_long = '' + #print( master_plot ) + if master_plot != 'qz': + ax.set_title(title_long + ' (%s )'%(1+l_ind), y =1.1, fontsize=12) + else: + ax.set_title(title_long + ' (%s )'%(1+l_ind), y =1.05, fontsize= fontsize_sublabel) + #print( geometry ) + #print( title_long ) + if qth_interest != None:#it might have a bug here, todolist!!! + lab = sorted(list(qval_dict_.keys())) + #print( lab, l_ind) + ax.set_title(title_long + ' (%s )'%( lab[l_ind] +1), y =1.05, fontsize= 12) + for ki, k in enumerate( list(g2_dict_.keys()) ): + if ki==0: + c='b' + if fit_res == None: + m='-o' + else: + m='o' + elif ki==1: + c='r' + if fit_res == None: + m='s' + else: + m='-' + elif ki==2: + c='g' + m='-D' + else: + c = colors[ki+2] + m= '-%s'%markers[ki+2] + try: + dumy = g2_dict_[k].shape + #print( 'here is the shape' ) + islist = False + except: + islist_n = len( g2_dict_[k] ) + islist = True + #print( 'here is the list' ) + if islist: + for nlst in range( islist_n ): + m = '-%s'%markers[ nlst ] + #print(m) + y=g2_dict_[k][nlst][:, l_ind ] + x = taus_dict_[k][nlst] + if ki==0: + ymin,ymax = min(y), max(y[1:]) + if g2_err_dict == None: + if g2_labels == None: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + #print('here ki ={} nlst = {}'.format( ki, nlst )) + if nlst==0: + ax.semilogx(x, y, m, color=c,markersize=6, label=g2_labels[ki]) + else: + ax.semilogx(x, y, m, color=c,markersize=6) + else: + yerr= g2_err_dict[k][nlst][:, l_ind ] + if g2_labels == None: + ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6) + else: + if nlst==0: + ax.errorbar(x, y, yerr=yerr, fmt=m, + color=c,markersize=6, label=g2_labels[ki]) + else: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c,markersize=6) + ax.set_xscale("log", nonposx='clip') + if nlst==0: + if l_ind==0: + ax.legend(loc='best', fontsize = 8, fancybox=True, framealpha=0.5) + + else: + y=g2_dict_[k][:, l_ind ] + x = taus_dict_[k] + if ki==0: + ymin,ymax = min(y), max(y[1:]) + if g2_err_dict == None: + if g2_labels == None: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + ax.semilogx(x, y, m, color=c,markersize=6, label=g2_labels[ki]) + else: + yerr= g2_err_dict[k][:, l_ind ] + #print(x.shape, y.shape, yerr.shape) + #print(yerr) + if g2_labels == None: + ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6) + else: + ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6,label=g2_labels[ki] ) + ax.set_xscale("log", nonposx='clip') + if l_ind==0: + ax.legend(loc='best', fontsize = 8, fancybox=True, framealpha=0.5) + + if fit_res_ != None: + result1 = fit_res_[l_ind] + #print (result1.best_values) + + beta = result1.best_values['beta'] + baseline = result1.best_values['baseline'] + if function=='simple_exponential' or function=='simple': + rate = result1.best_values['relaxation_rate'] + alpha =1.0 + elif function=='stretched_exponential' or function=='stretched': + rate = result1.best_values['relaxation_rate'] + alpha = result1.best_values['alpha'] + elif function=='stretched_vibration': + rate = result1.best_values['relaxation_rate'] + alpha = result1.best_values['alpha'] + freq = result1.best_values['freq'] + elif function=='flow_vibration': + rate = result1.best_values['relaxation_rate'] + freq = result1.best_values['freq'] + if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration': + rate = result1.best_values['relaxation_rate'] + flow = result1.best_values['flow_velocity'] + if function=='flow_para_function_explicitq' or function=='flow_para_qang': + diff = result1.best_values['diffusion'] + qrr = short_ulabel[s_ind] + #print(qrr) + rate = diff * qrr**2 + flow = result1.best_values['flow_velocity'] + if qval_dict_ == None: + print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") + else: + pass + + if rate!=0: + txts = r'$\tau_0$' + r'$ = %.3f$'%(1/rate) + r'$ s$' + else: + txts = r'$\tau_0$' + r'$ = inf$' + r'$ s$' + x=0.25 + y0=0.9 + fontsize = 12 + ax.text(x =x, y= y0, s=txts, fontsize=fontsize, transform=ax.transAxes) + #print(function) + dt=0 + if function!='flow_para_function' and function!='flow_para' and function!='flow_vibration' and function!='flow_para_qang': + txts = r'$\alpha$' + r'$ = %.3f$'%(alpha) + dt +=0.1 + #txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x =x, y= y0-dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r'$baseline$' + r'$ = %.3f$'%( baseline) + dt +=0.1 + ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration' or function=='flow_para_qang': + txts = r'$flow_v$' + r'$ = %.3f$'%( flow) + dt += 0.1 + ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + if function=='stretched_vibration' or function=='flow_vibration': + txts = r'$vibration$' + r'$ = %.1f Hz$'%( freq) + dt += 0.1 + ax.text(x =x, y= y0-dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r'$\beta$' + r'$ = %.3f$'%( beta ) + dt +=0.1 + ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + + if 'ylim' in kwargs: + ax.set_ylim( kwargs['ylim']) + elif 'vlim' in kwargs: + vmin, vmax =kwargs['vlim'] + try: + ax.set_ylim([ymin*vmin, ymax*vmax ]) + except: + pass + else: + pass + if 'xlim' in kwargs: + ax.set_xlim( kwargs['xlim']) + if num_short == 1: + fp = path + filename + else: + fp = path + filename + '_%s_%s'%(mastp, s_ind) + + if append_name != '': + fp = fp + append_name + fps.append( fp + '.png' ) + #if num_long_i <= 16: + if num_long_i <= max_plotnum_fig: + fig.set_tight_layout(True) + #fig.tight_layout() + #print(fig) + try: + plt.savefig( fp + '.png', dpi=fig.dpi) + except: + print('Can not save figure here.') + + else: + fps=[] + for fn, f in enumerate(fig): + f.set_tight_layout(True) + fp = path + filename + '_q_%s_%s'%(fn*16, (fn+1)*16) + if append_name != '': + fp = fp + append_name + fps.append( fp + '.png' ) + f.savefig( fp + '.png', dpi=f.dpi) + #plt.savefig( fp + '.png', dpi=fig.dpi) + #combine each saved images together + + if (num_short !=1) or (num_long_i > 16): + outputfile = path + filename + '.png' + if append_name != '': + outputfile = path + filename + append_name + '__joint.png' + else: + outputfile = path + filename + '__joint.png' + combine_images( fps, outputfile, outsize= outsize ) + if return_fig: + return fig + + + +def power_func(x, D0, power=2): + return D0 * x**power + + +def get_q_rate_fit_general( qval_dict, rate, geometry ='saxs', weights=None, *argv,**kwargs): + ''' + Dec 26,2016, Y.G.@CHX + + Fit q~rate by a power law function and fit curve pass (0,0) + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + Return: + D0 + qrate_fit_res + ''' + + power_variable=False + + if 'fit_range' in kwargs.keys(): + fit_range = kwargs['fit_range'] + else: + fit_range= None + + mod = Model( power_func ) + #mod.set_param_hint( 'power', min=0.5, max= 10 ) + #mod.set_param_hint( 'D0', min=0 ) + pars = mod.make_params( power = 2, D0=1*10^(-5) ) + if power_variable: + pars['power'].vary = True + else: + pars['power'].vary = False + + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + Nqr = num_long + Nqz = num_short + D0= np.zeros( Nqz ) + power= 2 #np.zeros( Nqz ) + qrate_fit_res=[] + #print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[ i ] + y = np.array( rate )[ind_long_i] + x = long_label[ind_long_i] + #print(y,x) + if fit_range != None: + y=y[fit_range[0]:fit_range[1]] + x=x[fit_range[0]:fit_range[1]] + #print (i, y,x) + _result = mod.fit(y, pars, x = x ,weights=weights ) + qrate_fit_res.append( _result ) + D0[i] = _result.best_values['D0'] + #power[i] = _result.best_values['power'] + print ('The fitted diffusion coefficient D0 is: %.3e A^2S-1'%D0[i]) + return D0, qrate_fit_res + + +def plot_q_rate_fit_general( qval_dict, rate, qrate_fit_res, geometry ='saxs', ylim = None, + plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False, + show_fit=True, + *argv,**kwargs): + ''' + Dec 26,2016, Y.G.@CHX + + plot q~rate fitted by a power law function and fit curve pass (0,0) + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + ''' + + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + else: + uid = 'uid' + if 'path' in kwargs.keys(): + path = kwargs['path'] + else: + path = '' + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + power = 2 + fig,ax = plt.subplots() + plt.title(r'$Q^%s$'%(power) + '-Rate-%s_Fit'%(uid),fontsize=20, y =1.06) + Nqz = num_short + if Nqz!=1: + ls = '--' + else: + ls='' + for i in range(Nqz): + ind_long_i = ind_long[ i ] + y = np.array( rate )[ind_long_i] + x = long_label[ind_long_i] + D0 = qrate_fit_res[i].best_values['D0'] + #print(i, x, y, D0 ) + if Nqz!=1: + label=r'$q_z=%.5f$'%short_ulabel[i] + else: + label='' + ax.plot(x**power, y, marker = 'o', ls =ls, label=label) + yfit = qrate_fit_res[i].best_fit + + if show_fit: + if plot_all_range: + ax.plot(x**power, x**power*D0, '-r') + else: + ax.plot( (x**power)[:len(yfit) ], yfit, '-r') + + if show_text: + txts = r'$D0: %.3e$'%D0 + r' $A^2$' + r'$s^{-1}$' + dy=0.1 + ax.text(x =0.15, y=.65 -dy *i, s=txts, fontsize=14, transform=ax.transAxes) + if Nqz!=1:legend = ax.legend(loc='best') + + if plot_index_range != None: + d1,d2 = plot_index_range + d2 = min( len(x)-1, d2 ) + ax.set_xlim( (x**power)[d1], (x**power)[d2] ) + ax.set_ylim( y[d1],y[d2]) + if ylim != None: + ax.set_ylim( ylim ) + + ax.set_ylabel('Relaxation rate 'r'$\gamma$'"($s^{-1}$)") + ax.set_xlabel("$q^%s$"r'($\AA^{-2}$)'%power) + fp = path + '%s_Q_Rate'%(uid) + '_fit.png' + fig.savefig( fp, dpi=fig.dpi) + fig.tight_layout() + if return_fig: + return fig,ax + + +def save_g2_fit_para_tocsv( fit_res, filename, path): + '''Y.G. Dec 29, 2016, + save g2 fitted parameter to csv file + ''' + col = list( fit_res[0].best_values.keys() ) + m,n = len( fit_res ), len( col ) + data = np.zeros( [m,n] ) + for i in range( m ): + data[i] = list( fit_res[i].best_values.values() ) + df = DataFrame( data ) + df.columns = col + filename1 = os.path.join(path, filename) # + '.csv') + df.to_csv(filename1) + print( "The g2 fitting parameters are saved in %s"%filename1) + return df + + + +def R_2(ydata,fit_data): + ''' Calculates R squared for a particular fit - by L.W. + usage R_2(ydata,fit_data) + returns R2 + by L.W. Feb. 2019 + ''' + y_ave=np.average(ydata) + SS_tot=np.sum((np.array(ydata)-y_ave)**2) + #print('SS_tot: %s'%SS_tot) + SS_res=np.sum((np.array(ydata)-np.array(fit_data))**2) + #print('SS_res: %s'%SS_res) + return 1-SS_res/SS_tot + +def is_outlier(points,thresh=3.5,verbose=False): + """MAD test + """ + points.tolist() + if len(points) ==1: + points=points[:,None] + if verbose: + print('input to is_outlier is a single point...') + median = np.median(points)*np.ones(np.shape(points))#, axis=0) + + diff = (points-median)**2 + diff=np.sqrt(diff) + med_abs_deviation= np.median(diff) + modified_z_score = .6745*diff/med_abs_deviation + return modified_z_score > thresh + +def outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False): + """ + outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) + avg_img: average image data (2D) + mask: 2D array, same size as avg_img with pixels that are already masked + roi_mask: 2D array, same size as avg_img, ROI labels 'encoded' as mask values (i.e. all pixels belonging to ROI 5 have the value 5) + outlier_threshold: threshold for MAD test + maximum_outlier_fraction: maximum fraction of pixels in an ROI that can be classifed as outliers. If the detected fraction is higher, no outliers will be masked for that ROI. + verbose: 'True' enables message output + plot: 'True' enables visualization of outliers + returns: mask (dtype=float): 0 for pixels that have been classified as outliers, 1 else + dependency: is_outlier() + + function does outlier detection for each ROI separately based on pixel intensity in avg_img*mask and ROI specified by roi_mask, using the median-absolute-deviation (MAD) method + + by LW 06/21/2023 + """ + hhmask = np.ones(np.shape(roi_mask)) + pc=1 + + for rn in np.arange(1,np.max(roi_mask)+1,1): + rm=np.zeros(np.shape(roi_mask));rm=rm-1;rm[np.where( roi_mask == rn)]=1 + pixel = roi.roi_pixel_values(avg_img*rm, roi_mask, [rn] ) + out_l = is_outlier((avg_img*mask*rm)[rm>-1], thresh=outlier_threshold) + if np.nanmax(out_l)>0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l<1]) + if verbose: print('ROI #%s\naverage ROI intensity: %s'%(rn,ave_roi_int)) + try: + upper_outlier_threshold = np.nanmin((out_l*pixel[0][0])[out_l*pixel[0][0]>ave_roi_int]) + if verbose: print('upper outlier threshold: %s'%upper_outlier_threshold) + except: + upper_outlier_threshold = False + if verbose: print('no upper outlier threshold found') + ind1 = (out_l*pixel[0][0])>0; ind2 = (out_l*pixel[0][0])< ave_roi_int + try: + lower_outlier_threshold = np.nanmax((out_l*pixel[0][0])[ind1*ind2]) + except: + lower_outlier_threshold = False + if verbose: print('no lower outlier threshold found') + else: + if verbose: print('ROI #%s: no outliers detected'%rn) + + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l)/len(pixel[0][0]) + if verbose: print('fraction of pixel values detected as outliers: %s'%np.round(outlier_fraction,2)) + if outlier_fraction > maximum_outlier_fraction: + if verbose: print('fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed'%maximum_outlier_fraction) + upper_outlier_threshold = False; lower_outlier_threshold = False + + if upper_outlier_threshold: + hhmask[avg_img*rm > upper_outlier_threshold] = 0 + if lower_outlier_threshold: + hhmask[avg_img*rm < lower_outlier_threshold] = 0 + + if plot: + if pc == 1: fig,ax = plt.subplots(1,5,figsize=(24,4)) + plt.subplot(1,5,pc);pc+=1; + if pc>5: pc=1 + pixel = roi.roi_pixel_values(avg_img*rm*mask, roi_mask, [rn] ) + plt.plot( pixel[0][0] ,'bo',markersize=1.5 ) + if upper_outlier_threshold or lower_outlier_threshold: + x=np.arange(len(out_l)) + plt.plot([x[0],x[-1]],[ave_roi_int,ave_roi_int],'g--',label='ROI average: %s'%np.round(ave_roi_int,4)) + if upper_outlier_threshold: + ind=(out_l*pixel[0][0])> upper_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[upper_outlier_threshold,upper_outlier_threshold],'r--',label='upper thresh.: %s'%np.round(upper_outlier_threshold,4)) + if lower_outlier_threshold: + ind=(out_l*pixel[0][0])< lower_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[lower_outlier_threshold,lower_outlier_threshold],'r--',label='lower thresh.: %s'%np.round(upper_outlier_threshold,4)) + plt.ylabel('Intensity') ;plt.xlabel('pixel');plt.title('ROI #: %s'%rn);plt.legend(loc='best',fontsize=8) + + if plot: + fig,ax = plt.subplots() + plt.imshow(hhmask) + hot_dark=np.nonzero(hhmask<1) + cmap = plt.cm.get_cmap('viridis') + plt.plot(hot_dark[1],hot_dark[0],'+',color=cmap(0)) + plt.xlabel('pixel');plt.ylabel('pixel');plt.title('masked pixels with outlier threshold: %s'%outlier_threshold) + + return hhmask \ No newline at end of file diff --git a/pyCHX/backups/chx_olog_012925.py b/pyCHX/backups/chx_olog_012925.py new file mode 100644 index 0000000..880c9f4 --- /dev/null +++ b/pyCHX/backups/chx_olog_012925.py @@ -0,0 +1,140 @@ +from pyOlog import Attachment, LogEntry, OlogClient, SimpleOlogClient +from pyOlog.OlogDataTypes import Logbook + + +def create_olog_entry(text, logbooks="Data Acquisition"): + """ + Create a log entry to xf11id. + + Parameters + ---------- + text : str + the text string to add to the logbook + logbooks : str, optional + the name of the logbook to update + + Returns + ------- + eid : the entry id returned from the Olog server + """ + olog_client = SimpleOlogClient() + eid = olog_client.log(text, logbooks=logbooks) + return eid + + +def update_olog_uid_with_file(uid, text, filename, append_name=""): + """ + Attach text and file (with filename) to CHX olog with entry defined by uid. + + Parameters + ---------- + uid : str + string of unique id + text : str + string to put into olog book + filename : str + file name + append_name : str + first try to attach olog with the file, if there is already a same file + in attached file, copy the file with different filename (append + append_name), and then attach to olog + """ + atch = [Attachment(open(filename, "rb"))] + + try: + update_olog_uid(uid=uid, text=text, attachments=atch) + except Exception: + from shutil import copyfile + + npname = f"{filename[:-4]}_{append_name}.pdf" + copyfile(filename, npname) + atch = [Attachment(open(npname, "rb"))] + print(f"Append {append_name} to the filename.") + update_olog_uid(uid=uid, text=text, attachments=atch) + + +def update_olog_logid_with_file(logid, text, filename=None, verbose=False): + """ + Attach text and file (with filename) to CHX olog with entry defined by + logid. + + Parameters + ---------- + logid : str + the log entry id + text : str + string to put into olog book + filename : str + file name + """ + if filename is not None: + atch = [Attachment(open(filename, "rb"))] + else: + atch = None + try: + update_olog_id(logid=logid, text=text, attachments=atch, verbose=verbose) + except Exception: + pass + + +def update_olog_id(logid, text, attachments, verbose=True): + """ + Update olog book logid entry with text and attachments files. + + Parameters + ---------- + logid : integer + the log entry id + text : str + the text to update, will add this text to the old text + attachments : ??? + add new attachment files + + Example + ------- + filename1 = ('/XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/' + 'Report_uid=af8f66.pdf') + atch = [Attachment(open(filename1, 'rb'))] + + update_olog_id(logid=29327, text='add_test_atch', attachmenents=atch) + """ + olog_client = SimpleOlogClient() + client = OlogClient() + url = client._url + + old_text = olog_client.find(id=logid)[0]["text"] + upd = LogEntry( + text=f"{old_text}\n{text}", + attachments=attachments, + logbooks=[Logbook(name="Operations", owner=None, active=True)], + ) + client.updateLog(logid, upd) + if verbose: + print(f"The url={url} was successfully updated with {text} and with " f"the attachments") + + +def update_olog_uid(uid, text, attachments): + """ + Update olog book logid entry cotaining uid string with text and attachments + files. + + Parameters + ---------- + uid: str + the uid of a scan or a specficial string (only gives one log entry) + text: str + the text to update, will add this text to the old text + attachments: ??? + add new attachment files + + Example + ------- + filename1 = ('/XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/' + 'Report_uid=af8f66.pdf') + atch = [Attachment(open(filename1, 'rb'))] + update_olog_uid(uid='af8f66', text='Add xpcs pdf report', attachments=atch) + """ + olog_client = SimpleOlogClient() + + logid = olog_client.find(search=f"*{uid}*")[0]["id"] + update_olog_id(logid, text, attachments) diff --git a/pyCHX/backups/chx_outlier_detection_05012024.py b/pyCHX/backups/chx_outlier_detection_05012024.py new file mode 100644 index 0000000..e211742 --- /dev/null +++ b/pyCHX/backups/chx_outlier_detection_05012024.py @@ -0,0 +1,98 @@ +def is_outlier(points,thresh=3.5,verbose=False): + """MAD test + """ + points.tolist() + if len(points) ==1: + points=points[:,None] + if verbose: + print('input to is_outlier is a single point...') + median = np.median(points)*np.ones(np.shape(points))#, axis=0) + + diff = (points-median)**2 + diff=np.sqrt(diff) + med_abs_deviation= np.median(diff) + modified_z_score = .6745*diff/med_abs_deviation + return modified_z_score > thresh + +def outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False): + """ + outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) + avg_img: average image data (2D) + mask: 2D array, same size as avg_img with pixels that are already masked + roi_mask: 2D array, same size as avg_img, ROI labels 'encoded' as mask values (i.e. all pixels belonging to ROI 5 have the value 5) + outlier_threshold: threshold for MAD test + maximum_outlier_fraction: maximum fraction of pixels in an ROI that can be classifed as outliers. If the detected fraction is higher, no outliers will be masked for that ROI. + verbose: 'True' enables message output + plot: 'True' enables visualization of outliers + returns: mask (dtype=float): 0 for pixels that have been classified as outliers, 1 else + dependency: is_outlier() + + function does outlier detection for each ROI separately based on pixel intensity in avg_img*mask and ROI specified by roi_mask, using the median-absolute-deviation (MAD) method + + by LW 06/21/2023 + """ + hhmask = np.ones(np.shape(roi_mask)) + pc=1 + + for rn in np.arange(1,np.max(roi_mask)+1,1): + rm=np.zeros(np.shape(roi_mask));rm=rm-1;rm[np.where( roi_mask == rn)]=1 + pixel = roi.roi_pixel_values(avg_img*rm, roi_mask, [rn] ) + out_l = is_outlier((avg_img*mask*rm)[rm>-1], thresh=outlier_threshold) + if np.nanmax(out_l)>0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l<1]) + if verbose: print('ROI #%s\naverage ROI intensity: %s'%(rn,ave_roi_int)) + try: + upper_outlier_threshold = np.nanmin((out_l*pixel[0][0])[out_l*pixel[0][0]>ave_roi_int]) + if verbose: print('upper outlier threshold: %s'%upper_outlier_threshold) + except: + upper_outlier_threshold = False + if verbose: print('no upper outlier threshold found') + ind1 = (out_l*pixel[0][0])>0; ind2 = (out_l*pixel[0][0])< ave_roi_int + try: + lower_outlier_threshold = np.nanmax((out_l*pixel[0][0])[ind1*ind2]) + except: + lower_outlier_threshold = False + if verbose: print('no lower outlier threshold found') + else: + if verbose: print('ROI #%s: no outliers detected'%rn) + + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l)/len(pixel[0][0]) + if verbose: print('fraction of pixel values detected as outliers: %s'%np.round(outlier_fraction,2)) + if outlier_fraction > maximum_outlier_fraction: + if verbose: print('fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed'%maximum_outlier_fraction) + upper_outlier_threshold = False; lower_outlier_threshold = False + + if upper_outlier_threshold: + hhmask[avg_img*rm > upper_outlier_threshold] = 0 + if lower_outlier_threshold: + hhmask[avg_img*rm < lower_outlier_threshold] = 0 + + if plot: + if pc == 1: fig,ax = plt.subplots(1,5,figsize=(24,4)) + plt.subplot(1,5,pc);pc+=1; + if pc>5: pc=1 + pixel = roi.roi_pixel_values(avg_img*rm*mask, roi_mask, [rn] ) + plt.plot( pixel[0][0] ,'bo',markersize=1.5 ) + if upper_outlier_threshold or lower_outlier_threshold: + x=np.arange(len(out_l)) + plt.plot([x[0],x[-1]],[ave_roi_int,ave_roi_int],'g--',label='ROI average: %s'%np.round(ave_roi_int,4)) + if upper_outlier_threshold: + ind=(out_l*pixel[0][0])> upper_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[upper_outlier_threshold,upper_outlier_threshold],'r--',label='upper thresh.: %s'%np.round(upper_outlier_threshold,4)) + if lower_outlier_threshold: + ind=(out_l*pixel[0][0])< lower_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[lower_outlier_threshold,lower_outlier_threshold],'r--',label='lower thresh.: %s'%np.round(upper_outlier_threshold,4)) + plt.ylabel('Intensity') ;plt.xlabel('pixel');plt.title('ROI #: %s'%rn);plt.legend(loc='best',fontsize=8) + + if plot: + fig,ax = plt.subplots() + plt.imshow(hhmask) + hot_dark=np.nonzero(hhmask<1) + cmap = plt.cm.get_cmap('viridis') + plt.plot(hot_dark[1],hot_dark[0],'+',color=cmap(0)) + plt.xlabel('pixel');plt.ylabel('pixel');plt.title('masked pixels with outlier threshold: %s'%outlier_threshold) + + return hhmask diff --git a/pyCHX/backups/chx_packages_local-20240502.py b/pyCHX/backups/chx_packages_local-20240502.py new file mode 100644 index 0000000..828cb12 --- /dev/null +++ b/pyCHX/backups/chx_packages_local-20240502.py @@ -0,0 +1,302 @@ +### This enables local import of pyCHX for testing + +import pickle as cpk + +import historydict +from eiger_io.fs_handler import EigerImages +from skimage.draw import line, line_aa, polygon + +# changes to current version of chx_packages.py +# added load_dask_data in generic_functions + + +#from pyCHX.chx_handlers import use_dask, use_pims +from chx_handlers import use_dask, use_pims +# from pyCHX.chx_libs import ( +from chx_libs import ( + EigerHandler, + Javascript, + LogNorm, + Model, + cmap_albula, + cmap_vge, + datetime, + db, + getpass, + h5py, + multi_tau_lags, + np, + os, + pims, + plt, + random, + roi, + time, + tqdm, + utils, + warnings, +) + +use_pims(db) # use pims for importing eiger data, register_handler 'AD_EIGER2' and 'AD_EIGER' + +# from pyCHX.chx_compress import ( +from chx_compress import ( + MultifileBNLCustom, + combine_binary_files, + create_compress_header, + para_compress_eigerdata, + para_segment_compress_eigerdata, + segment_compress_eigerdata, +) +# from pyCHX.chx_compress_analysis import ( +from chx_compress_analysis import ( + Multifile, + cal_each_ring_mean_intensityc, + cal_waterfallc, + compress_eigerdata, + get_avg_imgc, + get_each_frame_intensityc, + get_each_ring_mean_intensityc, + get_time_edge_avg_img, + mean_intensityc, + plot_each_ring_mean_intensityc, + plot_waterfallc, + read_compressed_eigerdata, +) +# from pyCHX.chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq +from chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq +# from pyCHX.chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF +from chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF +# from pyCHX.chx_crosscor import CrossCorrelator2, run_para_ccorr_sym +from chx_crosscor import CrossCorrelator2, run_para_ccorr_sym +#from pyCHX.chx_generic_functions import ( +from chx_generic_functions import ( + R_2, + apply_mask, + average_array_withNan, + check_bad_uids, + check_lost_metadata, + check_ROI_intensity, + check_shutter_open, + combine_images, + copy_data, + create_cross_mask, + create_fullImg_with_box, + create_hot_pixel_mask, + create_multi_rotated_rectangle_mask, + create_polygon_mask, + create_rectangle_mask, + create_ring_mask, + create_seg_ring, + create_time_slice, + create_user_folder, + delete_data, + extract_data_from_file, + filter_roi_mask, + find_bad_pixels, + find_bad_pixels_FD, + find_good_xpcs_uids, + find_index, + find_uids, + fit_one_peak_curve, + get_averaged_data_from_multi_res, + get_avg_img, + get_bad_frame_list, + get_base_all_filenames, + get_cross_point, + get_current_pipeline_filename, + get_current_pipeline_fullpath, + get_curve_turning_points, + get_detector, + get_detectors, + get_each_frame_intensity, + get_echos, + get_eigerImage_per_file, + get_fit_by_two_linear, + get_fra_num_by_dose, + get_g2_fit_general, + get_image_edge, + get_image_with_roi, + get_img_from_iq, + get_last_uids, + get_mass_center_one_roi, + get_max_countc, + get_meta_data, + get_multi_tau_lag_steps, + get_non_uniform_edges, + get_print_uids, + get_q_rate_fit_general, + get_qval_dict, + get_qval_qwid_dict, + get_roi_mask_qval_qwid_by_shift, + get_roi_nr, + get_series_g2_taus, + get_SG_norm, + get_sid_filenames, + get_today_date, + get_touched_qwidth, + get_waxs_beam_center, + lin2log_g2, + linear_fit, + load_dask_data, + load_data, + load_mask, + load_pilatus, + ls_dir, + mask_badpixels, + mask_exclude_badpixel, + move_beamstop, + pad_length, + pload_obj, + plot1D, + plot_fit_two_linear_fit, + plot_g2_general, + plot_q_g2fitpara_general, + plot_q_rate_fit_general, + plot_q_rate_general, + plot_xy_with_fit, + plot_xy_x2, + print_dict, + psave_obj, + read_dict_csv, + refine_roi_mask, + RemoveHot, + reverse_updown, + ring_edges, + run_time, + save_array_to_tiff, + save_arrays, + save_current_pipeline, + save_dict_csv, + save_g2_fit_para_tocsv, + save_g2_general, + save_lists, + save_oavs_tifs, + sgolay2d, + shift_mask, + show_img, + show_ROI_on_image, + shrink_image, + trans_data_to_pd, + update_qval_dict, + update_roi_mask, + validate_uid, +) +# from pyCHX.chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file +from chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file +# from pyCHX.chx_specklecp import ( +from chx_specklecp import ( + get_binned_his_std, + get_contrast, + get_his_std_from_pds, + get_xsvs_fit, + plot_g2_contrast, + plot_xsvs_fit, + save_bin_his_std, + save_KM, + xsvsc, + xsvsp, +) +# from pyCH.chx_xpcs_xsvs_jupyter_V1 import( +from chx_xpcs_xsvs_jupyter_V1 import( + get_t_iqc_uids, + plot_t_iqtMq2, + plot_t_iqc_uids, + plot_entries_from_csvlist, + plot_entries_from_uids, + get_iq_from_uids, + wait_func, + wait_data_acquistion_finish, + get_uids_by_range, + get_uids_in_time_period, + do_compress_on_line, + realtime_xpcs_analysis, + compress_multi_uids, + get_two_time_mulit_uids, + get_series_g2_from_g12, + get_fra_num_by_dose, + get_series_one_time_mulit_uids, + plot_dose_g2, + run_xpcs_xsvs_single, +) +# from pyCHX.Create_Report import ( +from Create_Report import ( + create_multi_pdf_reports_for_uids, + create_one_pdf_reports_for_uids, + create_pdf_report, + export_xpcs_results_to_h5, + extract_xpcs_results_from_h5, + make_pdf_report, +) +#from pyCHX.DataGonio import qphiavg +from DataGonio import qphiavg +# from pyCHX.SAXS import ( +from SAXS import ( + fit_form_factor, + fit_form_factor2, + form_factor_residuals_bg_lmfit, + form_factor_residuals_lmfit, + get_form_factor_fit_lmfit, + poly_sphere_form_factor_intensity, + show_saxs_qmap, +) +#from pyCHX.Two_Time_Correlation_Function import ( +from Two_Time_Correlation_Function import ( + get_aged_g2_from_g12, + get_aged_g2_from_g12q, + get_four_time_from_two_time, + get_one_time_from_two_time, + rotate_g12q_to_rectangle, + show_C12, +) +# from pyCHX.XPCS_GiSAXS import ( +from XPCS_GiSAXS import ( + cal_1d_qr, + convert_gisaxs_pixel_to_q, + fit_qr_qz_rate, + get_1d_qr, + get_each_box_mean_intensity, + get_gisaxs_roi, + get_qedge, + get_qmap_label, + get_qr_tick_label, + get_qzr_map, + get_qzrmap, + get_reflected_angles, + get_t_qrc, + multi_uids_gisaxs_xpcs_analysis, + plot_gisaxs_g4, + plot_gisaxs_two_g2, + plot_qr_1d_with_ROI, + plot_qrt_pds, + plot_qzr_map, + plot_t_qrc, + show_qzr_map, + show_qzr_roi, +) +# from pyCHX.XPCS_SAXS import ( +from XPCS_SAXS import ( + cal_g2, + combine_two_roi_mask, + create_hot_pixel_mask, + get_angular_mask, + get_circular_average, + get_cirucular_average_std, + get_each_ring_mean_intensity, + get_QrQw_From_RoiMask, + get_ring_mask, + get_seg_from_ring_mask, + get_t_iq, + get_t_iqc, + multi_uids_saxs_xpcs_analysis, + plot_circular_average, + plot_qIq_with_ROI, + plot_t_iqc, + recover_img_from_iq, + save_lists, +) +#from pyCHX.chx_outlier_detection import ( +from chx_outlier_detection import ( + is_outlier, + outlier_mask +) \ No newline at end of file diff --git a/pyCHX/backups/chx_xpcs_xsvs_jupyter_V1_05012024.py b/pyCHX/backups/chx_xpcs_xsvs_jupyter_V1_05012024.py new file mode 100644 index 0000000..6b10886 --- /dev/null +++ b/pyCHX/backups/chx_xpcs_xsvs_jupyter_V1_05012024.py @@ -0,0 +1,1698 @@ +from pyCHX.chx_packages import * +from pyCHX.chx_libs import markers, colors +#from pyCHX.chx_generic_functions import get_short_long_labels_from_qval_dict +#RUN_GUI = False +#from pyCHX.chx_libs import markers +import pandas as pds +# temporary fix: get_data() uses depreciated np.float and gets imported from pyCHX/chx_correlationc.py -> clobber function with temporary fix: +%run /nsls2/data/chx/legacy/analysis/2022_3/lwiegart/development/chx_analysis_setup.ipynb + +def get_t_iqc_uids( uid_list, setup_pargs, slice_num= 10, slice_width= 1): + '''Get Iq at different time edge (difined by slice_num and slice_width) for a list of uids + Input: + uid_list: list of string (uid) + setup_pargs: dict, for caculation of Iq, the key of this dict should include + 'center': beam center + 'dpix': pixel size + 'lambda_': X-ray wavelength + slice_num: slice number of the time edge + slice_edge: the width of the time_edge + Output: + qs: dict, with uid as key, with value as q values + iqsts:dict, with uid as key, with value as iq values + tstamp:dict, with uid as key, with value as time values + + ''' + iqsts = {} + tstamp = {} + qs = {} + label = [] + for uid in uid_list: + md = get_meta_data( uid ) + luid = md['uid'] + timeperframe = md['cam_acquire_period'] + N = md['cam_num_images'] + filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%luid + good_start = 5 + FD = Multifile(filename, good_start, N ) + Nimg = FD.end - FD.beg + time_edge = create_time_slice( Nimg, slice_num= slice_num, slice_width= slice_width, edges = None ) + time_edge = np.array( time_edge ) + good_start + #print( time_edge ) + tstamp[uid] = time_edge[:,0] * timeperframe + qpt, iqsts[uid], qt = get_t_iqc( FD, time_edge, None, pargs=setup_pargs, nx=1500 ) + qs[uid] = qt + + return qs, iqsts, tstamp + + + + +def plot_t_iqtMq2(qt, iqst, tstamp, ax=None, perf='' ): + '''plot q2~Iq at differnt time''' + if ax is None: + fig, ax = plt.subplots() + q = qt + for i in range(iqst.shape[0]): + yi = iqst[i] * q**2 + time_labeli = perf+'time_%s s'%( round( tstamp[i], 3) ) + plot1D( x = q, y = yi, legend= time_labeli, xlabel='Q (A-1)', ylabel='I(q)*Q^2', title='I(q)*Q^2 ~ time', + m=markers[i], c = colors[i], ax=ax, ylim=[ -0.001, 0.005]) #, xlim=[0.007,0.1] ) + + +def plot_t_iqc_uids( qs, iqsts, tstamps ): + '''plot q2~Iq at differnt time for a uid list + ''' + keys = list(qs.keys()) + fig, ax = plt.subplots() + for uid in keys: + qt = qs[uid] + iqst = iqsts[uid] + tstamp = tstamps[uid] + plot_t_iqtMq2(qt, iqst, tstamp, ax=ax, perf=uid + '_' ) + + +def plot_entries_from_csvlist( csv_list, uid_list, inDir, key = 'g2', qth = 1, legend_size=8, + yshift= 0.01, ymulti=1, xlim=None, ylim=None,uid_length=None, + legend=None, fp_fulluid=True ): + + ''' + YG Feb2, 2018, make yshift be also a list + + YG June 9, 2017@CHX + YG Sep 29, 2017@CHX. + plot enteries for a list csvs + Input: + csv_list: list, a list of uid (string) + inDir: string, imported folder for saved analysis results + key: string, plot entry, surport + 'g2' for one-time, + 'iq' for q~iq + 'mean_int_sets' for mean intensity of each roi as a function of frame + TODOLIST:#also can plot the following + dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', + 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', + 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', + 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) + qth: integer, the intesrest q number + yshift: float, values of shift in y direction + xlim: [x1,x2], for plot x limit + ylim: [y1,y2], for plot y limit + Output: + show the plot + Example: + uid_list = ['5492b9', '54c5e0'] + plot_entries_from_uids( uid_list, inDir, yshift = 0.01, key= 'g2', ylim=[1, 1.2]) + ''' + + uid_dict = {} + fig, ax =plt.subplots() + for uid in uid_list: + if uid_length is not None: + uid_ = uid[:uid_length] + else: + uid_=uid + #print(uid_) + uid_dict[uid_] = get_meta_data( uid )['uid'] + #for i, u in enumerate( list( uid_dict.keys() )): + + for i,fp in enumerate( list(csv_list)): + u = uid_list[i] #print(u) + inDiru = inDir + u + '/' + if fp_fulluid: + inDiru = inDir + uid_dict[u] + '/' + else: + inDiru = inDir + u + '/' + d = pds.read_csv( inDiru + fp ) + #print(d) + + if key == 'g2': + taus = d['tau'][1:] + col = d.columns[qth +1] + #print( qth+1, col ) + y= d[col][1:] + if legend is None: + leg=u + else: + leg='uid=%s-->'%u+legend[i] + if isinstance(yshift,list): + yshift_ = yshift[i] + ii = i + 1 + else: + yshift_ = yshift + ii = i + plot1D( x = taus, y=y + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=True, legend= leg, + xlabel='t (sec)', ylabel='g2', legend_size=legend_size,) + title='Q = %s'%(col) + ax.set_title(title) + elif key=='imgsum': + y = total_res[key] + plot1D( y=d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, + xlabel='Frame', ylabel='imgsum',) + + elif key == 'iq': + x= total_res['q_saxs'] + y= total_res['iq_saxs'] + plot1D( x=x, y= y* ymulti[i] + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx= False, logy=True, + legend= u, xlabel ='Q 'r'($\AA^{-1}$)', ylabel = "I(q)" ) + + else: + d = total_res[key][:,qth] + plot1D( x = np.arange(len(d)), y= d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, + xlabel= 'xx', ylabel=key ) + if key=='mean_int_sets':ax.set_xlabel( 'frame ') + if xlim is not None:ax.set_xlim(xlim) + if ylim is not None:ax.set_ylim(ylim) + return fig,ax + + +def plot_entries_from_uids( uid_list, inDir, key= 'g2', qth = 1, legend_size=8, + yshift= 0.01, ymulti=1, xlim=None, ylim=None,legend=None, uid_length = None, filename_list=None, fp_fulluid=False, fp_append = None ):#,title='' ): + + ''' + YG Feb2, 2018, make yshift be also a list + + YG June 9, 2017@CHX + YG Sep 29, 2017@CHX. + plot enteries for a list uids + Input: + uid_list: list, a list of uid (string) + inDir: string, imported folder for saved analysis results + key: string, plot entry, surport + 'g2' for one-time, + 'iq' for q~iq + 'mean_int_sets' for mean intensity of each roi as a function of frame + TODOLIST:#also can plot the following + dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', + 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', + 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', + 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) + qth: integer, the intesrest q number + yshift: float, values of shift in y direction + xlim: [x1,x2], for plot x limit + ylim: [y1,y2], for plot y limit + Output: + show the plot + Example: + uid_list = ['5492b9', '54c5e0'] + plot_entries_from_uids( uid_list, inDir, yshift = 0.01, key= 'g2', ylim=[1, 1.2]) + ''' + + uid_dict = {} + fig, ax =plt.subplots() + for uid in uid_list: + if uid_length is not None: + uid_ = uid[:uid_length] + else: + uid_=uid + #print(uid_) + uid_dict[uid_] = get_meta_data( uid )['uid'] + #for i, u in enumerate( list( uid_dict.keys() )): + for i,u in enumerate( list(uid_list)): + #print(u) + if isinstance(yshift,list): + yshift_ = yshift[i] + ii = i + 1 + else: + yshift_ = yshift + ii = i + if uid_length is not None: + u = u[:uid_length] + inDiru = inDir + u + '/' + if fp_fulluid: + inDiru = inDir + uid_dict[u] + '/' + else: + inDiru = inDir + u + '/' + if filename_list is None: + if fp_append is not None: + filename = 'uid=%s%s_Res.h5'%(uid_dict[u],fp_append ) + else: + filename = 'uid=%s_Res.h5'%uid_dict[u] + else: + filename = filename_list[i] + total_res = extract_xpcs_results_from_h5( filename = filename, + import_dir = inDiru, exclude_keys = ['g12b'] ) + if key=='g2': + d = total_res[key][1:,qth] + taus = total_res['taus'][1:] + if legend is None: + leg=u + else: + leg='uid=%s-->'%u+legend[i] + plot1D( x = taus, y=d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=True, legend= leg, + xlabel='t (sec)', ylabel='g2', legend_size=legend_size,) + title='Q = %s'%(total_res['qval_dict'][qth]) + ax.set_title(title) + elif key=='imgsum': + d = total_res[key] + plot1D( y=d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, + xlabel='Frame', ylabel='imgsum',) + + elif key == 'iq': + + x= total_res['q_saxs'] + y= total_res['iq_saxs'] + plot1D( x=x, y= y* ymulti[i] + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx= False, logy=True, + legend= u, xlabel ='Q 'r'($\AA^{-1}$)', ylabel = "I(q)" ) + + else: + d = total_res[key][:,qth] + plot1D( x = np.arange(len(d)), y= d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, + xlabel= 'xx', ylabel=key ) + if key=='mean_int_sets':ax.set_xlabel( 'frame ') + if xlim is not None:ax.set_xlim(xlim) + if ylim is not None:ax.set_ylim(ylim) + return fig,ax + + + + + + + +#################################################################################################### +##For real time analysis## +################################################################################################# + + + + + +def get_iq_from_uids( uids, mask, setup_pargs ): + ''' Y.G. developed July 17, 2017 @CHX + Get q-Iq of a uids dict, each uid could corrrespond one frame or a time seriers + uids: dict, val: meaningful decription, key: a list of uids + mask: bool-type 2D array + setup_pargs: dict, at least should contains, the following paramters for calculation of I(q) + + 'Ldet': 4917.50495, + 'center': [988, 1120], + 'dpix': 0.075000003562308848, + 'exposuretime': 0.99998999, + 'lambda_': 1.2845441, + 'path': '/XF11ID/analysis/2017_2/yuzhang/Results/Yang_Pressure/', + + ''' + Nuid = len( np.concatenate( np.array( list(uids.values()) ) ) ) + label = np.zeros( [ Nuid+1], dtype=object) + img_data = {} #np.zeros( [ Nuid, avg_img.shape[0], avg_img.shape[1]]) + + n = 0 + for k in list(uids.keys()): + for uid in uids[k]: + + uidstr = 'uid=%s'%uid + sud = get_sid_filenames(db[uid]) + #print(sud) + md = get_meta_data( uid ) + imgs = load_data( uid, md['detector'], reverse= True ) + md.update( imgs.md ); + Nimg = len(imgs); + if Nimg !=1: + filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%sud[1] + mask0, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename, + force_compress= False, para_compress= True, bad_pixel_threshold = 1e14, + bins=1, num_sub= 100, num_max_para_process= 500, with_pickle=True ) + else: + avg_img = imgs[0] + show_img( avg_img, vmin=0.00001, vmax= 1e1, logs=True, aspect=1, #save_format='tif', + image_name= uidstr + '_img_avg', save=True, + path=setup_pargs['path'], cmap = cmap_albula ) + + setup_pargs['uid'] = uidstr + + qp_saxs, iq_saxs, q_saxs = get_circular_average( avg_img, mask, + pargs= setup_pargs, save=True ) + if n ==0: + iqs = np.zeros( [ len(q_saxs), Nuid+1]) + iqs[:,0] = q_saxs + label[0] = 'q' + img_data[ k + '_'+ uid ] = avg_img + iqs[:,n+1] = iq_saxs + label[n+1] = k + '_'+ uid + n +=1 + plot_circular_average( qp_saxs, iq_saxs, q_saxs, pargs= setup_pargs, + xlim=[q_saxs.min(), q_saxs.max()*0.9], ylim = [iq_saxs.min(), iq_saxs.max()] ) + if 'filename' in list(setup_pargs.keys()): + filename = setup_pargs['filename'] + else: + filename = 'qIq.csv' + pd = save_arrays( iqs, label=label, dtype='array', filename= filename, + path= setup_pargs['path'], return_res=True) + return pd, img_data + + + +def wait_func( wait_time = 2 ): + print( 'Waiting %s secdons for upcoming data...'%wait_time) + time.sleep( wait_time) + #print( 'Starting to do something here...') + +def wait_data_acquistion_finish( uid, wait_time = 2, max_try_num = 3 ): + '''check the completion of a data uid acquistion + Parameter: + uid: + wait_time: the waiting step in unit of second + check_func: the function to check the completion + max_try_num: the maximum number for waiting + Return: + True: completion + False: not completion (include waiting time exceeds the max_wait_time) + + ''' + FINISH = False + Fake_FINISH = True + w = 0 + sleep_time = 0 + while( not FINISH): + try: + get_meta_data( uid ) + FINISH = True + print( 'The data acquistion finished.') + print( 'Starting to do something here...') + except: + wait_func( wait_time = wait_time ) + w += 1 + print('Try number: %s'%w) + if w> max_try_num: + print( 'There could be something going wrong with data acquistion.') + print( 'Force to terminate after %s tries.'%w) + FINISH = True + Fake_FINISH = False + sleep_time += wait_time + return FINISH * Fake_FINISH #, sleep_time + +def get_uids_by_range( start_uidth=-1, end_uidth = 0 ): + '''Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end uid number, i.e. -10, -1 + Return: + uids: list, uid with 8 character length + fuids: list, uid with full length + + ''' + hdrs = list([ db[n] for n in range(start_uidth, end_uidth)] ) + if len(hdrs)!=0: + print ('Totally %s uids are found.'%(len(hdrs))) + + uids=[] #short uid + fuids=[] #full uid + for hdr in hdrs: + fuid = hdr['start']['uid'] + uids.append( fuid[:8] ) + fuids.append( fuid ) + uids=uids[::-1] + fuids=fuids[::-1] + return np.array(uids), np.array(fuids) + + +def get_uids_in_time_period( start_time, stop_time ): + '''Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end time + Return: + uids: list, uid with 8 character length + fuids: list, uid with full length + + ''' + hdrs = list( db(start_time= start_time, stop_time = stop_time) ) + if len(hdrs)!=0: + print ('Totally %s uids are found.'%(len(hdrs))) + + uids=[] #short uid + fuids=[] #full uid + for hdr in hdrs: + fuid = hdr['start']['uid'] + uids.append( fuid[:8] ) + fuids.append( fuid ) + uids=uids[::-1] + fuids=fuids[::-1] + return np.array(uids), np.array(fuids) + +def do_compress_on_line( start_time, stop_time, mask_dict=None, mask=None, + wait_time = 2, max_try_num = 3 ): + '''Y.G. Mar 10, 2017 + Do on-line compress by giving start time and stop time + Parameters: + mask_dict: a dict, e.g., {mask1: mask_array1, mask2:mask_array2} + wait_time: search interval time + max_try_num: for each found uid, will try max_try_num*wait_time seconds + Return: + running time + ''' + + t0 = time.time() + uids, fuids = get_uids_in_time_period(start_time, stop_time) + print( fuids ) + if len(fuids): + for uid in fuids: + print('*'*50) + print('Do compress for %s now...'%uid) + if db[uid]['start']['plan_name'] == 'count': + finish = wait_data_acquistion_finish( uid, wait_time,max_try_num ) + if finish: + try: + md = get_meta_data( uid ) + compress_multi_uids( [ uid ], mask=mask, mask_dict = mask_dict, + force_compress=False, para_compress= True, bin_frame_number=1 ) + + update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) + except: + print('There are something wrong with this data: %s...'%uid) + print('*'*50) + return time.time() - t0 + + + +def realtime_xpcs_analysis( start_time, stop_time, run_pargs, md_update=None, + wait_time = 2, max_try_num = 3, emulation=False,clear_plot=False ): + '''Y.G. Mar 10, 2017 + Do on-line xpcs by giving start time and stop time + Parameters: + run_pargs: all the run control parameters, including giving roi_mask + md_update: if not None, a dict, will update all the found uid metadata by this md_update + e.g, + md['beam_center_x'] = 1012 + md['beam_center_y']= 1020 + md['det_distance']= 16718.0 + wait_time: search interval time + max_try_num: for each found uid, will try max_try_num*wait_time seconds + emulation: if True, it will only check dataset and not do real analysis + Return: + running time + ''' + + t0 = time.time() + uids, fuids = get_uids_in_time_period(start_time, stop_time) + #print( fuids ) + if len(fuids): + for uid in fuids: + print('*'*50) + #print('Do compress for %s now...'%uid) + print('Starting analysis for %s now...'%uid) + if db[uid]['start']['plan_name'] == 'count' or db[uid]['start']['plan_name'] == 'manual_count': + #if db[uid]['start']['dtype'] =='xpcs': + finish = wait_data_acquistion_finish( uid, wait_time,max_try_num ) + if finish: + try: + md = get_meta_data( uid ) + ##corect some metadata + if md_update is not None: + md.update( md_update ) + #if 'username' in list(md.keys()): + #try: + # md_cor['username'] = md_update['username'] + #except: + # md_cor = None + #uid = uid[:8] + #print(md_cor) + if not emulation: + #suid=uid[:6] + run_xpcs_xsvs_single( uid, run_pargs= run_pargs, md_cor = None, + return_res= False, clear_plot=clear_plot ) + #update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) + except: + print('There are something wrong with this data: %s...'%uid) + else: + print('\nThis is not a XPCS series. We will simiply ignore it.') + print('*'*50) + + #print( 'Sleep 10 sec here!!!') + #time.sleep(10) + + return time.time() - t0 + + + + + + + + + + + + +#################################################################################################### +##compress multi uids, sequential compress for uids, but for each uid, can apply parallel compress## +################################################################################################# +def compress_multi_uids( uids, mask, mask_dict = None, force_compress=False, para_compress= True, bin_frame_number=1, + reverse=True, rot90=False,use_local_disk=True): + ''' Compress time series data for a set of uids + Parameters: + uids: list, a list of uid + mask: bool array, mask array + force_compress: default is False, just load the compresssed data; + if True, will compress it to overwrite the old compressed data + para_compress: apply the parallel compress algorithm + bin_frame_number: + Return: + None, save the compressed data in, by default, /XF11ID/analysis/Compressed_Data with filename as + '/uid_%s.cmp' uid is the full uid string + + e.g., compress_multi_uids( uids, mask, force_compress= False, bin_frame_number=1 ) + + ''' + for uid in uids: + print('UID: %s is in processing...'%uid) + if validate_uid( uid ): + md = get_meta_data( uid ) + imgs = load_data( uid, md['detector'], reverse= reverse, rot90=rot90 ) + sud = get_sid_filenames(db[uid]) + for pa in sud[2]: + if 'master.h5' in pa: + data_fullpath = pa + print( imgs, data_fullpath ) + if mask_dict is not None: + mask = mask_dict[md['detector']] + print('The detecotr is: %s'% md['detector']) + md.update( imgs.md ) + if not use_local_disk: + cmp_path = '/nsls2/xf11id1/analysis/Compressed_Data' + else: + cmp_path = '/tmp_data/compressed' + cmp_path = '/nsls2/xf11id1/analysis/Compressed_Data' + if bin_frame_number==1: + cmp_file = '/uid_%s.cmp'%md['uid'] + else: + cmp_file = '/uid_%s_bined--%s.cmp'%(md['uid'],bin_frame_number) + filename = cmp_path + cmp_file + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename, + force_compress= force_compress, para_compress= para_compress, bad_pixel_threshold = 1e14, + reverse=reverse, rot90=rot90, + bins=bin_frame_number, num_sub= 100, num_max_para_process= 500, with_pickle=True, + direct_load_data =use_local_disk, data_path = data_fullpath, ) + + print('Done!') + + +#################################################################################################### +##get_two_time_mulit_uids, sequential cal for uids, but apply parallel for each uid ## +################################################################################################# + +def get_two_time_mulit_uids( uids, roi_mask, norm= None, bin_frame_number=1, path=None, force_generate=False, + md=None, imgs=None,direct_load_data=False,compress_path=None ): + + ''' Calculate two time correlation by using auto_two_Arrayc func for a set of uids, + if the two-time resutls are already created, by default (force_generate=False), just pass + Parameters: + uids: list, a list of uid + roi_mask: bool array, roi mask array + norm: the normalization array + path: string, where to save the two time + force_generate: default, False, if the two-time resutls are already created, just pass + if True, will force to calculate two-time no matter exist or not + + Return: + None, save the two-time in as path + uid + 'uid=%s_g12b'%uid + + e.g., + get_two_time_mulit_uids( guids, roi_mask, norm= norm,bin_frame_number=1, + path= data_dir,force_generate=False ) + + ''' + + qind, pixelist = roi.extract_label_indices(roi_mask) + for uid in uids: + print('UID: %s is in processing...'%uid) + if not direct_load_data: + md = get_meta_data( uid ) + imgs = load_data( uid, md['detector'], reverse= True ) + else: + pass + N = len(imgs) + #print( N ) + if compress_path is None: + compress_path = '/XF11ID/analysis/Compressed_Data/' + if bin_frame_number==1: + filename = '%s'%compress_path +'uid_%s.cmp'%md['uid'] + else: + filename = '%s'%compress_path +'uid_%s_bined--%s.cmp'%(md['uid'],bin_frame_number) + + FD = Multifile(filename, 0, N//bin_frame_number) + #print( FD.beg, FD.end) + uid_ = md['uid'] + os.makedirs(path + uid_ + '/', exist_ok=True) + filename = path + uid_ + '/' + 'uid=%s_g12b'%uid + doit = True + if not force_generate: + if os.path.exists( filename + '.npy'): + doit=False + print('The two time correlation function for uid=%s is already calculated. Just pass...'%uid) + if doit: + data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm= norm ).get_data() + g12b = auto_two_Arrayc( data_pixel, roi_mask, index = None ) + np.save( filename, g12b) + del g12b + print( 'The two time correlation function for uid={} is saved as {}.'.format(uid, filename )) + + + + + + +def get_series_g2_from_g12( g12b, fra_num_by_dose = None, dose_label = None, + good_start=0, log_taus = True, num_bufs=8, time_step=1 ): + ''' + Get a series of one-time function from two-time by giving noframes + Parameters: + g12b: a two time function + good_start: the start frame number + fra_num_by_dose: a list, correlation number starting from index 0, + if this number is larger than g12b length, will give a warning message, and + will use g12b length to replace this number + by default is None, will = [ g12b.shape[0] ] + dose_label: the label of each dose, also is the keys of returned g2, lag + log_taus: if true, will only return a g2 with the correponding tau values + as calculated by multi-tau defined taus + Return: + + g2_series, a dict, with keys as dose_label (corrected on if warning message is given) + lag_steps, the corresponding lags + + ''' + g2={} + lag_steps = {} + L,L,qs= g12b.shape + if fra_num_by_dose is None: + fra_num_by_dose = [L] + if dose_label is None: + dose_label = fra_num_by_dose + fra_num_by_dose = sorted( fra_num_by_dose ) + dose_label = sorted( dose_label ) + for i, good_end in enumerate(fra_num_by_dose): + key = round(dose_label[i] ,3) + #print( good_end ) + if good_end>L: + warnings.warn("Warning: the dose value is too large, and please check the maxium dose in this data set and give a smaller dose value. We will use the maxium dose of the data.") + good_end = L + if not log_taus: + g2[ key ] = get_one_time_from_two_time(g12b[good_start:good_end,good_start:good_end,:] ) + else: + #print( good_end, num_bufs ) + lag_step = get_multi_tau_lag_steps(good_end, num_bufs) + lag_step = lag_step[ lag_step < good_end - good_start] + #print( len(lag_steps ) ) + lag_steps[key] = lag_step * time_step + g2[key] = get_one_time_from_two_time(g12b[good_start:good_end,good_start:good_end,:] )[lag_step] + + return lag_steps, g2 + + +def get_fra_num_by_dose( exp_dose, exp_time, att=1, dead_time =2 ): + ''' + Calculate the frame number to be correlated by giving a X-ray exposure dose + + Paramters: + exp_dose: a list, the exposed dose, e.g., in unit of exp_time(ms)*N(fram num)*att( attenuation) + exp_time: float, the exposure time for a xpcs time sereies + dead_time: dead time for the fast shutter reponse time, CHX = 2ms + Return: + noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) + e.g., + + no_dose_fra = get_fra_num_by_dose( exp_dose = [ 3.34* 20, 3.34*50, 3.34*100, 3.34*502, 3.34*505 ], + exp_time = 1.34, dead_time = 2) + + --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) + ''' + return np.int_( np.array( exp_dose )/( exp_time + dead_time)/ att ) + + + +def get_series_one_time_mulit_uids( uids, qval_dict, trans = None, good_start=0, path=None, + exposure_dose = None, dead_time = 0, + num_bufs =8, save_g2=True, + md = None, imgs=None, direct_load_data= False ): + ''' Calculate a dose depedent series of one time correlations from two time + Parameters: + uids: list, a list of uid + trans: list, same length as uids, the transmission list + exposure_dose: list, a list x-ray exposure dose; + by default is None, namely, = [ max_frame_number ], + can be [3.34 334, 3340] in unit of ms, in unit of exp_time(ms)*N(fram num)*att( attenuation) + path: string, where to load the two time, if None, ask for it + the real g12 path is two_time_path + uid + '/' + qval_dict: the dictionary for q values + Return: + taus_uids, with keys as uid, and + taus_uids[uid] is also a dict, with keys as dose_frame + g2_uids, with keys as uid, and + g2_uids[uid] is also a dict, with keys as dose_frame + will also save g2 results to the 'path' + ''' + + if path is None: + print( 'Please calculate two time function first by using get_two_time_mulit_uids function.') + else: + taus_uids = {} + g2_uids = {} + for i, uid in enumerate(uids): + print('UID: %s is in processing...'%uid) + if not direct_load_data: + md = get_meta_data( uid ) + imgs = load_data( uid, md['detector'], reverse= True ) + #print(md) + detectors = md['detector'] + if isinstance( detectors,list): + if len(detectors)>1: + if '_image' in md['detector']: + pref = md['detector'][:-5] + else: + pref=md['detector'] + for k in [ 'beam_center_x', 'beam_center_y','cam_acquire_time','cam_acquire_period','cam_num_images', + 'wavelength', 'det_distance', 'photon_energy']: + md[k] = md[ pref + '%s'%k] + + else: + pass + N = len(imgs) + if exposure_dose is None: + exposure_dose = [N] + try: + g2_path = path + uid + '/' + g12b = np.load( g2_path + 'uid=%s_g12b.npy'%uid) + except: + g2_path = path + md['uid'] + '/' + g12b = np.load( g2_path + 'uid=%s_g12b.npy'%uid) + try: + exp_time = float( md['cam_acquire_time']) #*1000 #from second to ms + except: + exp_time = float( md['exposure time']) #* 1000 #from second to ms + if trans is None: + try: + transi = md['transmission'] + except: + transi = [1] + else: + transi = trans[i] + fra_num_by_dose = get_fra_num_by_dose( exp_dose = exposure_dose, + exp_time =exp_time, dead_time = dead_time, att = transi ) + + print( 'uid: %s--> fra_num_by_dose: %s'%(uid, fra_num_by_dose ) ) + + taus_uid, g2_uid = get_series_g2_from_g12( g12b, fra_num_by_dose=fra_num_by_dose, + dose_label = exposure_dose, + good_start=good_start, num_bufs=num_bufs, + time_step = exp_time)#md['cam_acquire_period'] ) + g2_uids['uid_%03d=%s'%(i,uid)] = g2_uid + taus_uids['uid_%03d=%s'%(i,uid)] = taus_uid + if save_g2: + for k in list( g2_uid.keys()): + #print(k) + uid_ = uid + '_fra_%s_%s'%(good_start, k ) + save_g2_general( g2_uid[k], taus=taus_uid[k],qr=np.array( list( qval_dict.values() ) )[:,0], + uid=uid_+'_g2.csv', path= g2_path, return_res=False ) + return taus_uids, g2_uids + + + + +def plot_dose_g2( taus_uids, g2_uids, qval_dict, qth_interest = None, ylim=[0.95, 1.05], vshift=0.1, + fit_res= None, geometry= 'saxs',filename= 'dose'+'_g2', legend_size=None, + path= None, function= None, g2_labels=None, ylabel= 'g2_dose', append_name= '_dose', + return_fig=False): + '''Plot a does-dependent g2 + taus_uids, dict, with format as {uid1: { dose1: tau_1, dose2: tau_2...}, uid2: ...} + g2_uids, dict, with format as {uid1: { dose1: g2_1, dose2: g2_2...}, uid2: ...} + qval_dict: a dict of qvals + vshift: float, vertical shift value of different dose of g2 + + ''' + + uids = sorted( list( taus_uids.keys() ) ) + #print( uids ) + dose = sorted( list( taus_uids[ uids[0] ].keys() ) ) + if qth_interest is None: + g2_dict= {} + taus_dict = {} + if g2_labels is None: + g2_labels = [] + for i in range( len( dose )): + g2_dict[i + 1] = [] + taus_dict[i +1 ] = [] + #print ( i ) + for j in range( len( uids )): + #print( uids[i] , dose[j]) + g2_dict[i +1 ].append( g2_uids[ uids[j] ][ dose[i] ] + vshift*i ) + taus_dict[i +1 ].append( taus_uids[ uids[j] ][ dose[i] ] ) + if j ==0: + g2_labels.append( 'Dose_%s'%dose[i] ) + + plot_g2_general( g2_dict, taus_dict, + ylim=[ylim[0], ylim[1] + vshift * len(dose)], + qval_dict = qval_dict, fit_res= None, geometry= geometry,filename= filename, + path= path, function= function, ylabel= ylabel, g2_labels=g2_labels, append_name= append_name ) + + else: + fig,ax= plt.subplots() + q = qval_dict[qth_interest-1][0] + j = 0 + for uid in uids: + #uid = uids[0] + #print( uid ) + dose_list = sorted( list(taus_uids['%s'%uid].keys()) ) + #print( dose_list ) + for i, dose in enumerate(dose_list): + dose = float(dose) + if j ==0: + legend= 'dose_%s'%round(dose,2) + else: + legend = '' + + #print( markers[i], colors[i] ) + + plot1D(x= taus_uids['%s'%uid][dose_list[i]], + y =g2_uids['%s'%uid][dose_list[i]][:,qth_interest] + i*vshift, + logx=True, ax=ax, legend= legend, m = markers[i], c= colors[i], + lw=3, title='%s_Q=%s'%(uid, q) + r'$\AA^{-1}$', legend_size=legend_size ) + ylabel='g2--Dose (trans*exptime_sec)' + j +=1 + + ax.set_ylabel( r"$%s$"%ylabel + '(' + r'$\tau$' + ')' ) + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + ax.set_ylim ( ylim ) + if return_fig: + return fig, ax + #return taus_dict, g2_dict + + + + +def run_xpcs_xsvs_single( uid, run_pargs, md_cor=None, return_res=False,reverse=True, clear_plot=False ): + '''Y.G. Dec 22, 2016 + Run XPCS XSVS analysis for a single uid + Parameters: + uid: unique id + run_pargs: dict, control run type and setup parameters, such as q range et.al. + reverse:,True, revserse the image upside down + Return: + save analysis result to csv/png/h5 files + return_res: if true, return a dict, containing g2,g4,g12,contrast et.al. depending on the run type + An example for the run_pargs: + + run_pargs= dict( + scat_geometry = 'gi_saxs' #suport 'saxs', 'gi_saxs', 'ang_saxs' (for anisotropics saxs or flow-xpcs) + force_compress = True,#False, + para_compress = True, + run_fit_form = False, + run_waterfall = True,#False, + run_t_ROI_Inten = True, + #run_fit_g2 = True, + fit_g2_func = 'stretched', + run_one_time = True,#False, + run_two_time = True,#False, + run_four_time = False, + run_xsvs=True, + att_pdf_report = True, + show_plot = False, + + CYCLE = '2016_3', + mask_path = '/XF11ID/analysis/2016_3/masks/', + mask_name = 'Nov28_4M_SAXS_mask.npy', + good_start = 5, + + uniformq = True, + inner_radius= 0.005, #0.005 for 50 nm, 0.006, #for 10nm/coralpor + outer_radius = 0.04, #0.04 for 50 nm, 0.05, #for 10nm/coralpor + num_rings = 12, + gap_ring_number = 6, + number_rings= 1, + #qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 + #width = 0.0002 + qth_interest = 1, #the intested single qth + use_sqnorm = False, + use_imgsum_norm = True, + + pdf_version = '_1' #for pdf report name + ) + + md_cor: if not None, will update the metadata with md_cor + + ''' + + scat_geometry = run_pargs['scat_geometry'] + force_compress = run_pargs['force_compress'] + para_compress = run_pargs['para_compress'] + run_fit_form = run_pargs['run_fit_form'] + run_waterfall = run_pargs['run_waterfall'] + run_t_ROI_Inten = run_pargs['run_t_ROI_Inten'] + + #run_fit_g2 = run_pargs['run_fit_g2'], + fit_g2_func = run_pargs['fit_g2_func'] + run_one_time = run_pargs['run_one_time'] + run_two_time = run_pargs['run_two_time'] + run_four_time = run_pargs['run_four_time'] + run_xsvs=run_pargs['run_xsvs'] + try: + run_dose = run_pargs['run_dose'] + except: + run_dose= False + ############################################################### + if scat_geometry =='gi_saxs': #to be done for other types + run_xsvs = False; + ############################################################### + + ############################################################### + if scat_geometry == 'ang_saxs': + run_xsvs= False;run_waterfall=False;run_two_time=False;run_four_time=False;run_t_ROI_Inten=False; + ############################################################### + if 'bin_frame' in list( run_pargs.keys() ): + bin_frame = run_pargs['bin_frame'] + bin_frame_number= run_pargs['bin_frame_number'] + else: + bin_frame = False + if not bin_frame: + bin_frame_number = 1 + + att_pdf_report = run_pargs['att_pdf_report'] + show_plot = run_pargs['show_plot'] + CYCLE = run_pargs['CYCLE'] + mask_path = run_pargs['mask_path'] + mask_name = run_pargs['mask_name'] + good_start = run_pargs['good_start'] + use_imgsum_norm = run_pargs['use_imgsum_norm'] + try: + use_sqnorm = run_pargs['use_sqnorm'] + except: + use_sqnorm = False + try: + inc_x0 = run_pargs['inc_x0'] + inc_y0 = run_pargs['inc_y0'] + except: + inc_x0 = None + inc_y0= None + + #for different scattering geogmetry, we only need to change roi_mask + #and qval_dict + qval_dict = run_pargs['qval_dict'] + if scat_geometry != 'ang_saxs': + roi_mask = run_pargs['roi_mask'] + qind, pixelist = roi.extract_label_indices( roi_mask ) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + + else: + roi_mask_p = run_pargs['roi_mask_p'] + qval_dict_p = run_pargs['qval_dict_p'] + roi_mask_v = run_pargs['roi_mask_v'] + qval_dict_v = run_pargs['qval_dict_v'] + + if scat_geometry == 'gi_saxs': + refl_x0 = run_pargs['refl_x0'] + refl_y0 = run_pargs['refl_y0'] + Qr, Qz, qr_map, qz_map = run_pargs['Qr'], run_pargs['Qz'], run_pargs['qr_map'], run_pargs['qz_map'] + + + taus=None;g2=None;tausb=None;g2b=None;g12b=None;taus4=None;g4=None;times_xsv=None;contrast_factorL=None; + qth_interest = run_pargs['qth_interest'] + pdf_version = run_pargs['pdf_version'] + + + try: + username = run_pargs['username'] + except: + username = getpass.getuser() + + data_dir0 = os.path.join('/XF11ID/analysis/', CYCLE, username, 'Results/') + os.makedirs(data_dir0, exist_ok=True) + print('Results from this analysis will be stashed in the directory %s' % data_dir0) + #uid = (sys.argv)[1] + print ('*'*40) + print ( '*'*5 + 'The processing uid is: %s'%uid + '*'*5) + print ('*'*40) + suid = uid #[:6] + data_dir = os.path.join(data_dir0, '%s/'%suid) + os.makedirs(data_dir, exist_ok=True) + print('Results from this analysis will be stashed in the directory %s' % data_dir) + md = get_meta_data( uid ) + uidstr = 'uid=%s'%uid[:6] + imgs = load_data( uid, md['detector'], reverse= reverse ) + md.update( imgs.md ) + Nimg = len(imgs) + if md_cor is not None: + md.update( md_cor ) + + + if inc_x0 is not None: + md['beam_center_x']= inc_x0 + if inc_y0 is not None: + md['beam_center_y']= inc_y0 + + #print( run_pargs ) + #print( run_pargs['inc_x0'],run_pargs['inc_y0'] ) + #print( inc_x0, inc_y0 ) + + if md['detector'] =='eiger1m_single_image': + Chip_Mask=np.load( '/XF11ID/analysis/2017_1/masks/Eiger1M_Chip_Mask.npy') + elif md['detector'] =='eiger4m_single_image' or md['detector'] == 'image': + Chip_Mask= np.array(np.load( '/XF11ID/analysis/2017_1/masks/Eiger4M_chip_mask.npy'), dtype=bool) + BadPix = np.load('/XF11ID/analysis/2018_1/BadPix_4M.npy' ) + Chip_Mask.ravel()[BadPix] = 0 + elif md['detector'] =='eiger500K_single_image': + Chip_Mask= 1 #to be defined the chip mask + else: + Chip_Mask = 1 + #show_img(Chip_Mask) + + center = [ int(md['beam_center_y']),int( md['beam_center_x'] ) ] #beam center [y,x] for python image + + + pixel_mask = 1- np.int_( np.array( imgs.md['pixel_mask'], dtype= bool) ) + print( 'The data are: %s' %imgs ) + + if False: + print_dict( md, ['suid', 'number of images', 'uid', 'scan_id', 'start_time', 'stop_time', 'sample', 'Measurement', + 'acquire period', 'exposure time', + 'det_distanc', 'beam_center_x', 'beam_center_y', ] ) + ## Overwrite Some Metadata if Wrong Input + dpix, lambda_, Ldet, exposuretime, timeperframe, center = check_lost_metadata( + md, Nimg, inc_x0 = inc_x0, inc_y0= inc_y0, pixelsize = 7.5*10*(-5) ) + + print( 'The beam center is: %s'%center ) + + timeperframe *= bin_frame_number + + setup_pargs=dict(uid=uidstr, dpix= dpix, Ldet=Ldet, lambda_= lambda_, exposuretime=exposuretime, + timeperframe=timeperframe, center=center, path= data_dir) + #print_dict( setup_pargs ) + + mask = load_mask(mask_path, mask_name, plot_ = False, image_name = uidstr + '_mask', reverse=reverse ) + mask *= pixel_mask + if md['detector'] =='eiger4m_single_image': + mask[:,2069] =0 # False #Concluded from the previous results + show_img(mask,image_name = uidstr + '_mask', save=True, path=data_dir) + mask_load=mask.copy() + imgsa = apply_mask( imgs, mask ) + + + img_choice_N = 2 + img_samp_index = random.sample( range(len(imgs)), img_choice_N) + avg_img = get_avg_img( imgsa, img_samp_index, plot_ = False, uid =uidstr) + + if avg_img.max() == 0: + print('There are no photons recorded for this uid: %s'%uid) + print('The data analysis should be terminated! Please try another uid.') + + else: + if scat_geometry !='saxs': + show_img( avg_img, vmin=.1, vmax=np.max(avg_img*.1), logs=True, + image_name= uidstr + '_%s_frames_avg'%img_choice_N, save=True, path=data_dir) + else: + show_saxs_qmap( avg_img, setup_pargs, width=400, show_pixel = False, + vmin=.1, vmax= np.max(avg_img), logs=True, image_name= uidstr + '_%s_frames_avg'%img_choice_N ) + + compress=True + photon_occ = len( np.where(avg_img)[0] ) / ( imgsa[0].size) + #compress = photon_occ < .4 #if the photon ocupation < 0.5, do compress + print ("The non-zeros photon occupation is %s."%( photon_occ)) + print("Will " + 'Always ' + ['NOT', 'DO'][compress] + " apply compress process.") + #good_start = 5 #make the good_start at least 0 + t0= time.time() + filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%md['uid'] + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename, + force_compress= force_compress, para_compress= para_compress, bad_pixel_threshold= 1e14, + bins=bin_frame_number, num_sub= 100, num_max_para_process= 500, with_pickle=True ) + min_inten = 10 + good_start = max(good_start, np.where( np.array(imgsum) > min_inten )[0][0] ) + print ('The good_start frame number is: %s '%good_start) + FD = Multifile(filename, good_start, len(imgs)) + #FD = Multifile(filename, good_start, 100) + uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end) + print( uid_ ) + plot1D( y = imgsum[ np.array( [i for i in np.arange(good_start, len(imgsum)) if i not in bad_frame_list])], + title =uidstr + '_imgsum', xlabel='Frame', ylabel='Total_Intensity', legend='imgsum' ) + run_time(t0) + + mask = mask * Chip_Mask + + #%system free && sync && echo 3 > /proc/sys/vm/drop_caches && free + ## Get bad frame list by a polynominal fit + bad_frame_list = get_bad_frame_list( imgsum, fit=True, plot=True,polyfit_order = 30, + scale= 5.5, good_start = good_start, uid= uidstr, path=data_dir) + print( 'The bad frame list length is: %s'%len(bad_frame_list) ) + + ### Creat new mask by masking the bad pixels and get new avg_img + if False: + mask = mask_exclude_badpixel( bp, mask, md['uid']) + avg_img = get_avg_imgc( FD, sampling = 1, bad_frame_list=bad_frame_list ) + + show_img( avg_img, vmin=.001, vmax= np.max(avg_img), logs=True, aspect=1, #save_format='tif', + image_name= uidstr + '_img_avg', save=True, path=data_dir, cmap = cmap_albula ) + + imgsum_y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])] + imgsum_x = np.arange( len( imgsum_y)) + save_lists( [imgsum_x, imgsum_y], label=['Frame', 'Total_Intensity'], + filename=uidstr + '_img_sum_t', path= data_dir ) + plot1D( y = imgsum_y, title = uidstr + '_img_sum_t', xlabel='Frame', + ylabel='Total_Intensity', legend='imgsum', save=True, path=data_dir) + + + ############for SAXS and ANG_SAXS (Flow_SAXS) + if scat_geometry =='saxs' or scat_geometry =='ang_saxs': + + #show_saxs_qmap( avg_img, setup_pargs, width=600, vmin=.1, vmax=np.max(avg_img*.1), logs=True, + # image_name= uidstr + '_img_avg', save=True) + #np.save( data_dir + 'uid=%s--img-avg'%uid, avg_img) + + #try: + # hmask = create_hot_pixel_mask( avg_img, threshold = 1000, center=center, center_radius= 600) + #except: + # hmask=1 + hmask=1 + qp_saxs, iq_saxs, q_saxs = get_circular_average( avg_img * Chip_Mask, mask * hmask * Chip_Mask, pargs=setup_pargs, save=True ) + + plot_circular_average( qp_saxs, iq_saxs, q_saxs, pargs= setup_pargs, + xlim=[q_saxs.min(), q_saxs.max()], ylim = [iq_saxs.min(), iq_saxs.max()] ) + + #pd = trans_data_to_pd( np.where( hmask !=1), + # label=[md['uid']+'_hmask'+'x', md['uid']+'_hmask'+'y' ], dtype='list') + + #pd.to_csv('/XF11ID/analysis/Commissioning/eiger4M_badpixel.csv', mode='a' ) + + #mask =np.array( mask * hmask, dtype=bool) + #show_img( mask ) + + if run_fit_form: + form_res = fit_form_factor( q_saxs,iq_saxs, guess_values={'radius': 2500, 'sigma':0.05, + 'delta_rho':1E-10 }, fit_range=[0.0001, 0.015], fit_variables={'radius': T, 'sigma':T, + 'delta_rho':T}, res_pargs=setup_pargs, xlim=[0.0001, 0.015]) + + show_ROI_on_image( avg_img, roi_mask, center, label_on = False, rwidth =700, alpha=.9, + save=True, path=data_dir, uid=uidstr, vmin= np.min(avg_img), vmax= np.max(avg_img) ) + + qr = np.array( [ qval_dict[k][0] for k in list( qval_dict.keys()) ] ) + plot_qIq_with_ROI( q_saxs, iq_saxs, qr, logs=True, uid=uidstr, xlim=[q_saxs.min(), q_saxs.max()], + ylim = [iq_saxs.min(), iq_saxs.max()], save=True, path=data_dir) + + if scat_geometry != 'ang_saxs': + Nimg = FD.end - FD.beg + time_edge = create_time_slice( N= Nimg, slice_num= 3, slice_width= 1, edges = None ) + time_edge = np.array( time_edge ) + good_start + #print( time_edge ) + qpt, iqst, qt = get_t_iqc( FD, time_edge, mask* Chip_Mask, pargs=setup_pargs, nx=1500 ) + plot_t_iqc( qt, iqst, time_edge, pargs=setup_pargs, xlim=[qt.min(), qt.max()], + ylim = [iqst.min(), iqst.max()], save=True ) + + elif scat_geometry == 'gi_waxs': + #roi_mask[badpixel] = 0 + qr = np.array( [ qval_dict[k][0] for k in list( qval_dict.keys()) ] ) + show_ROI_on_image( avg_img, roi_mask, label_on = True, alpha=.5,save=True, path= data_dir, uid=uidstr)#, vmin=1, vmax=15) + + elif scat_geometry == 'gi_saxs': + show_img( avg_img, vmin=.1, vmax=np.max(avg_img*.1), + logs=True, image_name= uidstr + '_img_avg', save=True, path=data_dir) + ticks_ = get_qzr_map( qr_map, qz_map, inc_x0, Nzline=10, Nrline=10 ) + ticks = ticks_[:4] + plot_qzr_map( qr_map, qz_map, inc_x0, ticks = ticks_, data= avg_img, uid= uidstr, path = data_dir ) + show_qzr_roi( avg_img, roi_mask, inc_x0, ticks, alpha=0.5, save=True, path=data_dir, uid=uidstr ) + qr_1d_pds = cal_1d_qr( avg_img, Qr, Qz, qr_map, qz_map, inc_x0, setup_pargs=setup_pargs ) + plot_qr_1d_with_ROI( qr_1d_pds, qr_center=np.unique( np.array(list( qval_dict.values() ) )[:,0] ), + loglog=False, save=True, uid=uidstr, path = data_dir) + + Nimg = FD.end - FD.beg + time_edge = create_time_slice( N= Nimg, slice_num= 3, slice_width= 1, edges = None ) + time_edge = np.array( time_edge ) + good_start + qrt_pds = get_t_qrc( FD, time_edge, Qr, Qz, qr_map, qz_map, path=data_dir, uid = uidstr ) + plot_qrt_pds( qrt_pds, time_edge, qz_index = 0, uid = uidstr, path = data_dir ) + + + + ############################## + ##the below works for all the geometries + ######################################## + if scat_geometry !='ang_saxs': + roi_inten = check_ROI_intensity( avg_img, roi_mask, ring_number= qth_interest, uid =uidstr, save=True, path=data_dir ) + if scat_geometry =='saxs' or scat_geometry =='gi_saxs' or scat_geometry =='gi_waxs': + if run_waterfall: + wat = cal_waterfallc( FD, roi_mask, + qindex= qth_interest, save = True, path=data_dir,uid=uidstr) + if run_waterfall: + plot_waterfallc( wat, qindex=qth_interest, aspect=None, + vmax= np.max(wat), uid=uidstr, save =True, + path=data_dir, beg= FD.beg) + ring_avg = None + + if run_t_ROI_Inten: + times_roi, mean_int_sets = cal_each_ring_mean_intensityc(FD, roi_mask, timeperframe = None, multi_cor=True ) + plot_each_ring_mean_intensityc( times_roi, mean_int_sets, uid = uidstr, save=True, path=data_dir ) + roi_avg = np.average( mean_int_sets, axis=0) + + uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end) + lag_steps = None + + if use_sqnorm: + norm = get_pixelist_interp_iq( qp_saxs, iq_saxs, roi_mask, center) + else: + norm=None + + define_good_series = False + if define_good_series: + FD = Multifile(filename, beg = good_start, end = Nimg) + uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end) + print( uid_ ) + + if 'g2_fit_variables' in list( run_pargs.keys() ): + g2_fit_variables = run_pargs['g2_fit_variables'] + else: + g2_fit_variables = {'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True} + + if 'g2_guess_values' in list( run_pargs.keys() ): + g2_guess_values = run_pargs['g2_guess_values'] + else: + g2_guess_values= {'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,} + + if 'g2_guess_limits' in list( run_pargs.keys()): + g2_guess_limits = run_pargs['g2_guess_limits'] + else: + g2_guess_limits = dict( baseline =[1, 2], alpha=[0, 2], beta = [0, 1], relaxation_rate= [0.001, 5000]) + + if run_one_time: + if use_imgsum_norm: + imgsum_ = imgsum + else: + imgsum_ = None + if scat_geometry !='ang_saxs': + t0 = time.time() + g2, lag_steps = cal_g2p( FD, roi_mask, bad_frame_list,good_start, num_buf = 8, num_lev= None, + imgsum= imgsum_, norm=norm ) + run_time(t0) + taus = lag_steps * timeperframe + g2_pds = save_g2_general( g2, taus=taus,qr=np.array( list( qval_dict.values() ) )[:,0], + uid=uid_+'_g2.csv', path= data_dir, return_res=True ) + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, + function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, + fit_variables= g2_fit_variables, + guess_values= g2_guess_values, + guess_limits = g2_guess_limits) + + g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_g2_fit_paras.csv', path=data_dir ) + + #if run_one_time: + #plot_g2_general( g2_dict={1:g2}, taus_dict={1:taus},vlim=[0.95, 1.05], qval_dict = qval_dict, fit_res= None, + # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') + + plot_g2_general( g2_dict={1:g2, 2:g2_fit}, taus_dict={1:taus, 2:taus_fit},vlim=[0.95, 1.05], + qval_dict = qval_dict, fit_res= g2_fit_result, geometry=scat_geometry,filename=uid_ + '_g2', + path= data_dir, function= fit_g2_func, ylabel='g2', append_name= '_fit') + + D0, qrate_fit_res = get_q_rate_fit_general( qval_dict, g2_fit_paras['relaxation_rate'], geometry= scat_geometry ) + plot_q_rate_fit_general( qval_dict, g2_fit_paras['relaxation_rate'], qrate_fit_res, + geometry= scat_geometry,uid=uid_ , path= data_dir ) + + + else: + t0 = time.time() + g2_v, lag_steps_v = cal_g2p( FD, roi_mask_v, bad_frame_list,good_start, num_buf = 8, num_lev= None, + imgsum= imgsum_, norm=norm ) + g2_p, lag_steps_p = cal_g2p( FD, roi_mask_p, bad_frame_list,good_start, num_buf = 8, num_lev= None, + imgsum= imgsum_, norm=norm ) + run_time(t0) + + taus_v = lag_steps_v * timeperframe + g2_pds_v = save_g2_general( g2_v, taus=taus_v,qr=np.array( list( qval_dict_v.values() ) )[:,0], + uid=uid_+'_g2v.csv', path= data_dir, return_res=True ) + + taus_p = lag_steps_p * timeperframe + g2_pds_p = save_g2_general( g2_p, taus=taus_p,qr=np.array( list( qval_dict_p.values() ) )[:,0], + uid=uid_+'_g2p.csv', path= data_dir, return_res=True ) + + fit_g2_func_v = 'stretched' #for vertical + g2_fit_result_v, taus_fit_v, g2_fit_v = get_g2_fit_general( g2_v, taus_v, + function = fit_g2_func_v, vlim=[0.95, 1.05], fit_range= None, + fit_variables={'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True}, + guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,}) + g2_fit_paras_v = save_g2_fit_para_tocsv(g2_fit_result_v, filename= uid_ +'_g2_fit_paras_v.csv', path=data_dir ) + + fit_g2_func_p ='flow_para' #for parallel + g2_fit_result_p, taus_fit_p, g2_fit_p = get_g2_fit_general( g2_p, taus_p, + function = fit_g2_func_p, vlim=[0.95, 1.05], fit_range= None, + fit_variables={'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True,'flow_velocity':True}, + guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,'flow_velocity':1}) + g2_fit_paras_p = save_g2_fit_para_tocsv(g2_fit_result_p, filename= uid_ +'_g2_fit_paras_p.csv', path=data_dir ) + + + + plot_g2_general( g2_dict={1:g2_v, 2:g2_fit_v}, taus_dict={1:taus_v, 2:taus_fit_v},vlim=[0.95, 1.05], + qval_dict = qval_dict_v, fit_res= g2_fit_result_v, geometry=scat_geometry,filename= uid_+'_g2_v', + path= data_dir, function= fit_g2_func_v, ylabel='g2_v', append_name= '_fit') + + plot_g2_general( g2_dict={1:g2_p, 2:g2_fit_p}, taus_dict={1:taus_p, 2:taus_fit_p},vlim=[0.95, 1.05], + qval_dict = qval_dict_p, fit_res= g2_fit_result_p, geometry=scat_geometry,filename= uid_+'_g2_p', + path= data_dir, function= fit_g2_func_p, ylabel='g2_p', append_name= '_fit') + + combine_images( [data_dir + uid_+'_g2_v_fit.png', data_dir + uid_+'_g2_p_fit.png'], data_dir + uid_+'_g2_fit.png', outsize=(2000, 2400) ) + + + D0_v, qrate_fit_res_v = get_q_rate_fit_general( qval_dict_v, g2_fit_paras_v['relaxation_rate'], geometry= scat_geometry ) + plot_q_rate_fit_general( qval_dict_v, g2_fit_paras_v['relaxation_rate'], qrate_fit_res_v, + geometry= scat_geometry,uid=uid_ +'_vert' , path= data_dir ) + + D0_p, qrate_fit_res_p = get_q_rate_fit_general( qval_dict_p, g2_fit_paras_p['relaxation_rate'], geometry= scat_geometry ) + plot_q_rate_fit_general( qval_dict_p, g2_fit_paras_p['relaxation_rate'], qrate_fit_res_p, + geometry= scat_geometry,uid=uid_ +'_para' , path= data_dir ) + + + combine_images( [data_dir + uid_+ '_vert_Q_Rate_fit.png', data_dir + uid_+ '_para_Q_Rate_fit.png'], data_dir + uid_+'_Q_Rate_fit.png', outsize=(2000, 2400) ) + + + # For two-time + data_pixel = None + if run_two_time: + + data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm=norm ).get_data() + t0=time.time() + g12b = auto_two_Arrayc( data_pixel, roi_mask, index = None ) + if run_dose: + np.save( data_dir + 'uid=%s_g12b'%uid, g12b) + + + if lag_steps is None: + num_bufs=8 + noframes = FD.end - FD.beg + num_levels = int(np.log( noframes/(num_bufs-1))/np.log(2) +1) +1 + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + max_taus= lag_steps.max() + lag_steps = lag_steps[ lag_steps < Nimg - good_start ] + + run_time( t0 ) + + show_C12(g12b, q_ind= qth_interest, N1= FD.beg, N2=min( FD.end,5000), vmin= 0.99, vmax=1.3, + timeperframe=timeperframe,save=True, cmap=cmap_albula, + path= data_dir, uid = uid_ ) + + #print('here') + #show_C12(g12b, q_ind= 3, N1= 5, N2=min(5000,5000), vmin=.8, vmax=1.31, cmap=cmap_albula, + # timeperframe= timeperframe,save=False, path= data_dir, uid = uid_ +'_' + k) + max_taus = Nimg + t0=time.time() + #g2b = get_one_time_from_two_time(g12b)[:max_taus] + g2b = get_one_time_from_two_time(g12b)[lag_steps] + + tausb = lag_steps *timeperframe + run_time(t0) + + + #tausb = np.arange( g2b.shape[0])[:max_taus] *timeperframe + g2b_pds = save_g2_general( g2b, taus=tausb, qr= np.array( list( qval_dict.values() ) )[:,0], + qz=None, uid=uid_ +'_g2b.csv', path= data_dir, return_res=True ) + + + g2_fit_resultb, taus_fitb, g2_fitb = get_g2_fit_general( g2b, tausb, + function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, + fit_variables=g2_fit_variables, guess_values=g2_guess_values, guess_limits =g2_guess_limits) + + g2b_fit_paras = save_g2_fit_para_tocsv(g2_fit_resultb, + filename= uid_ + '_g2b_fit_paras.csv', path=data_dir ) + + D0b, qrate_fit_resb = get_q_rate_fit_general( qval_dict, g2b_fit_paras['relaxation_rate'], + fit_range=None, geometry= scat_geometry ) + + + #print( qval_dict, g2b_fit_paras['relaxation_rate'], qrate_fit_resb ) + plot_q_rate_fit_general( qval_dict, g2b_fit_paras['relaxation_rate'], qrate_fit_resb, + geometry= scat_geometry,uid=uid_ +'_two_time' , path= data_dir ) + + + + plot_g2_general( g2_dict={1:g2b, 2:g2_fitb}, taus_dict={1:tausb, 2:taus_fitb},vlim=[0.95, 1.05], + qval_dict=qval_dict, fit_res= g2_fit_resultb, geometry=scat_geometry,filename=uid_+'_g2', + path= data_dir, function= fit_g2_func, ylabel='g2', append_name= '_b_fit') + + if run_two_time and run_one_time: + plot_g2_general( g2_dict={1:g2, 2:g2b}, taus_dict={1:taus, 2:tausb},vlim=[0.95, 1.05], + qval_dict=qval_dict, g2_labels=['from_one_time', 'from_two_time'], + geometry=scat_geometry,filename=uid_+'_g2_two_g2', path= data_dir, ylabel='g2', ) + + + + # Four Time Correlation + + if run_four_time: #have to run one and two first + t0=time.time() + g4 = get_four_time_from_two_time(g12b, g2=g2b)[:max_taus] + run_time(t0) + + taus4 = np.arange( g4.shape[0])*timeperframe + g4_pds = save_g2_general( g4, taus=taus4, qr=np.array( list( qval_dict.values() ) )[:,0], + qz=None, uid=uid_ +'_g4.csv', path= data_dir, return_res=True ) + plot_g2_general( g2_dict={1:g4}, taus_dict={1:taus4},vlim=[0.95, 1.05], qval_dict=qval_dict, fit_res= None, + geometry=scat_geometry,filename=uid_+'_g4',path= data_dir, ylabel='g4') + + if run_dose: + get_two_time_mulit_uids( [uid], roi_mask, norm= norm, bin_frame_number=bin_frame_number, + path= data_dir0, force_generate=False ) + N = len(imgs) + try: + tr = md['transmission'] + except: + tr = 1 + if 'dose_frame' in list(run_pargs.keys()): + dose_frame = run_pargs['dose_frame'] + else: + dose_frame = np.int_([ N/8, N/4 ,N/2, 3*N/4, N*0.99 ] ) + #N/32, N/16, N/8, N/4 ,N/2, 3*N/4, N*0.99 + exposure_dose = tr * exposuretime * dose_frame + taus_uids, g2_uids = get_series_one_time_mulit_uids( [ uid ], qval_dict, good_start=good_start, + path= data_dir0, exposure_dose = exposure_dose, num_bufs =8, save_g2= False, + dead_time = 0, trans = [ tr ] ) + + plot_dose_g2( taus_uids, g2_uids, ylim=[0.95, 1.2], vshift= 0.00, + qval_dict = qval_dict, fit_res= None, geometry= scat_geometry, + filename= '%s_dose_analysis'%uid_, + path= data_dir, function= None, ylabel='g2_Dose', g2_labels= None, append_name= '' ) + + # Speckel Visiblity + if run_xsvs: + max_cts = get_max_countc(FD, roi_mask ) + qind, pixelist = roi.extract_label_indices( roi_mask ) + noqs = len( np.unique(qind) ) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + #time_steps = np.array( utils.geometric_series(2, len(imgs) ) ) + time_steps = [0,1] #only run the first two levels + num_times = len(time_steps) + times_xsvs = exposuretime + (2**( np.arange( len(time_steps) ) ) -1 ) *timeperframe + print( 'The max counts are: %s'%max_cts ) + + ### Do historam + if roi_avg is None: + times_roi, mean_int_sets = cal_each_ring_mean_intensityc(FD, roi_mask, timeperframe = None, ) + roi_avg = np.average( mean_int_sets, axis=0) + + t0=time.time() + spec_bins, spec_his, spec_std = xsvsp( FD, np.int_(roi_mask), norm=None, + max_cts=int(max_cts+2), bad_images=bad_frame_list, only_two_levels=True ) + spec_kmean = np.array( [roi_avg * 2**j for j in range( spec_his.shape[0] )] ) + run_time(t0) + + run_xsvs_all_lags = False + if run_xsvs_all_lags: + times_xsvs = exposuretime + lag_steps * acquisition_period + if data_pixel is None: + data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm=norm ).get_data() + t0=time.time() + spec_bins, spec_his, spec_std, spec_kmean = get_binned_his_std(data_pixel, np.int_(ro_mask), lag_steps ) + run_time(t0) + spec_pds = save_bin_his_std( spec_bins, spec_his, spec_std, filename=uid_+'_spec_res.csv', path=data_dir ) + + ML_val, KL_val,K_ = get_xsvs_fit( spec_his, spec_kmean, spec_std, max_bins=2,varyK= False, ) + + #print( 'The observed average photon counts are: %s'%np.round(K_mean,4)) + #print( 'The fitted average photon counts are: %s'%np.round(K_,4)) + print( 'The difference sum of average photon counts between fit and data are: %s'%np.round( + abs(np.sum( spec_kmean[0,:] - K_ )),4)) + print( '#'*30) + qth= 10 + print( 'The fitted M for Qth= %s are: %s'%(qth, ML_val[qth]) ) + print( K_[qth]) + print( '#'*30) + + + plot_xsvs_fit( spec_his, ML_val, KL_val, K_mean = spec_kmean, spec_std=spec_std, + xlim = [0,10], vlim =[.9, 1.1], + uid=uid_, qth= qth_interest, logy= True, times= times_xsvs, q_ring_center=qr, path=data_dir) + + plot_xsvs_fit( spec_his, ML_val, KL_val, K_mean = spec_kmean, spec_std = spec_std, + xlim = [0,15], vlim =[.9, 1.1], + uid=uid_, qth= None, logy= True, times= times_xsvs, q_ring_center=qr, path=data_dir ) + + ### Get contrast + contrast_factorL = get_contrast( ML_val) + spec_km_pds = save_KM( spec_kmean, KL_val, ML_val, qs=qr, level_time=times_xsvs, uid=uid_ , path = data_dir ) + #print( spec_km_pds ) + + plot_g2_contrast( contrast_factorL, g2, times_xsvs, taus, qr, + vlim=[0.8,1.2], qth = qth_interest, uid=uid_,path = data_dir, legend_size=14) + + plot_g2_contrast( contrast_factorL, g2, times_xsvs, taus, qr, + vlim=[0.8,1.2], qth = None, uid=uid_,path = data_dir, legend_size=4) + + + + + + md['mask_file']= mask_path + mask_name + md['mask'] = mask + md['NOTEBOOK_FULL_PATH'] = None + md['good_start'] = good_start + md['bad_frame_list'] = bad_frame_list + md['avg_img'] = avg_img + md['roi_mask'] = roi_mask + + if scat_geometry == 'gi_saxs': + md['Qr'] = Qr + md['Qz'] = Qz + md['qval_dict'] = qval_dict + md['beam_center_x'] = inc_x0 + md['beam_center_y']= inc_y0 + md['beam_refl_center_x'] = refl_x0 + md['beam_refl_center_y'] = refl_y0 + + elif scat_geometry == 'saxs' or 'gi_waxs': + md['qr']= qr + #md['qr_edge'] = qr_edge + md['qval_dict'] = qval_dict + md['beam_center_x'] = center[1] + md['beam_center_y']= center[0] + + elif scat_geometry == 'ang_saxs': + md['qval_dict_v'] = qval_dict_v + md['qval_dict_p'] = qval_dict_p + md['beam_center_x'] = center[1] + md['beam_center_y']= center[0] + + + md['beg'] = FD.beg + md['end'] = FD.end + md['metadata_file'] = data_dir + 'md.csv-&-md.pkl' + psave_obj( md, data_dir + 'uid=%s_md'%uid[:6] ) #save the setup parameters + #psave_obj( md, data_dir + 'uid=%s_md'%uid ) #save the setup parameters + save_dict_csv( md, data_dir + 'uid=%s_md.csv'%uid, 'w') + + Exdt = {} + if scat_geometry == 'gi_saxs': + for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list', 'qr_1d_pds'], + [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list, qr_1d_pds] ): + Exdt[ k ] = v + elif scat_geometry == 'saxs': + for k,v in zip( ['md', 'q_saxs', 'iq_saxs','iqst','qt','roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [md, q_saxs, iq_saxs, iqst, qt,roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + Exdt[ k ] = v + elif scat_geometry == 'gi_waxs': + for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + Exdt[ k ] = v + elif scat_geometry == 'ang_saxs': + for k,v in zip( ['md', 'q_saxs', 'iq_saxs','roi_mask_v','roi_mask_p', + 'qval_dict_v','qval_dict_p','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [md, q_saxs, iq_saxs, roi_mask_v,roi_mask_p, + qval_dict_v,qval_dict_p, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + Exdt[ k ] = v + + if run_waterfall:Exdt['wat'] = wat + if run_t_ROI_Inten:Exdt['times_roi'] = times_roi;Exdt['mean_int_sets']=mean_int_sets + if run_one_time: + if scat_geometry != 'ang_saxs': + for k,v in zip( ['taus','g2','g2_fit_paras'], [taus,g2,g2_fit_paras] ):Exdt[ k ] = v + else: + for k,v in zip( ['taus_v','g2_v','g2_fit_paras_v'], [taus_v,g2_v,g2_fit_paras_v] ):Exdt[ k ] = v + for k,v in zip( ['taus_p','g2_p','g2_fit_paras_p'], [taus_p,g2_p,g2_fit_paras_p] ):Exdt[ k ] = v + if run_two_time: + for k,v in zip( ['tausb','g2b','g2b_fit_paras', 'g12b'], [tausb,g2b,g2b_fit_paras,g12b] ):Exdt[ k ] = v + if run_four_time: + for k,v in zip( ['taus4','g4'], [taus4,g4] ):Exdt[ k ] = v + if run_xsvs: + for k,v in zip( ['spec_kmean','spec_pds','times_xsvs','spec_km_pds','contrast_factorL'], + [ spec_kmean,spec_pds,times_xsvs,spec_km_pds,contrast_factorL] ):Exdt[ k ] = v + + + export_xpcs_results_to_h5( 'uid=%s_Res.h5'%md['uid'], data_dir, export_dict = Exdt ) + #extract_dict = extract_xpcs_results_from_h5( filename = 'uid=%s_Res.h5'%md['uid'], import_dir = data_dir ) + # Creat PDF Report + pdf_out_dir = os.path.join('/XF11ID/analysis/', CYCLE, username, 'Results/') + pdf_filename = "XPCS_Analysis_Report_for_uid=%s%s.pdf"%(uid,pdf_version) + if run_xsvs: + pdf_filename = "XPCS_XSVS_Analysis_Report_for_uid=%s%s.pdf"%(uid,pdf_version) + #pdf_filename + + print( data_dir, uid[:6], pdf_out_dir, pdf_filename, username ) + + make_pdf_report( data_dir, uid[:6], pdf_out_dir, pdf_filename, username, + run_fit_form, run_one_time, run_two_time, run_four_time, run_xsvs, run_dose=run_dose, + report_type= scat_geometry + ) + ## Attach the PDF report to Olog + if att_pdf_report: + os.environ['HTTPS_PROXY'] = 'https://proxy:8888' + os.environ['no_proxy'] = 'cs.nsls2.local,localhost,127.0.0.1' + pname = pdf_out_dir + pdf_filename + atch=[ Attachment(open(pname, 'rb')) ] + try: + update_olog_uid( uid= md['uid'], text='Add XPCS Analysis PDF Report', attachments= atch ) + except: + print("I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file."%pname) + + if show_plot: + plt.show() + #else: + # plt.close('all') + if clear_plot: + plt.close('all') + if return_res: + res = {} + if scat_geometry == 'saxs': + for k,v in zip( ['md', 'q_saxs', 'iq_saxs','iqst','qt','avg_img','mask', 'imgsum','bad_frame_list','roi_mask', 'qval_dict'], + [ md, q_saxs, iq_saxs, iqst, qt, avg_img,mask,imgsum,bad_frame_list,roi_mask, qval_dict ] ): + res[ k ] = v + + elif scat_geometry == 'ang_saxs': + for k,v in zip( [ 'md', 'q_saxs', 'iq_saxs','roi_mask_v','roi_mask_p', + 'qval_dict_v','qval_dict_p','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [ md, q_saxs, iq_saxs, roi_mask_v,roi_mask_p, + qval_dict_v,qval_dict_p, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + res[ k ] = v + + elif scat_geometry == 'gi_saxs': + for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list', 'qr_1d_pds'], + [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list, qr_1d_pds] ): + res[ k ] = v + + elif scat_geometry == 'gi_waxs': + for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + res[ k ] = v + + if run_waterfall: + res['wat'] = wat + if run_t_ROI_Inten: + res['times_roi'] = times_roi; + res['mean_int_sets']=mean_int_sets + if run_one_time: + if scat_geometry != 'ang_saxs': + res['g2'] = g2 + res['taus']=taus + else: + res['g2_p'] = g2_p + res['taus_p']=taus_p + res['g2_v'] = g2_v + res['taus_v']=taus_v + + if run_two_time: + res['tausb'] = tausb + res['g12b'] = g12b + res['g2b'] = g2b + if run_four_time: + res['g4']= g4 + res['taus4']=taus4 + if run_xsvs: + res['spec_kmean']=spec_kmean + res['spec_pds']= spec_pds + res['contrast_factorL'] = contrast_factorL + res['times_xsvs']= times_xsvs + return res + +#uid = '3ff4ee' +#run_xpcs_xsvs_single( uid, run_pargs ) + + + + + diff --git a/pyCHX/backups/pyCHX-backup/Badpixels.py b/pyCHX/backups/pyCHX-backup/Badpixels.py new file mode 100644 index 0000000..7b7dc5b --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/Badpixels.py @@ -0,0 +1,167 @@ +"""Dev@Octo12,2017""" + +import numpy as np + +damaged_4Mpixel = np.array( + [ + [1157, 2167 - 1231], + [1158, 2167 - 1231], + [1159, 2167 - 1231], + [1160, 2167 - 1231], + [1157, 2167 - 1230], + [1158, 2167 - 1230], + [1159, 2167 - 1230], + [1160, 2167 - 1230], + [1161, 2167 - 1230], + [1157, 2167 - 1229], + [1158, 2167 - 1229], + [1159, 2167 - 1229], + [1160, 2167 - 1229], + [1159, 2167 - 1228], + [1160, 2167 - 1228], + [1159, 2167 - 1227], + [1160, 2167 - 1227], + [1159, 2167 - 1226], + ] +) + + +# March 1, 2018 +# uid = '92394a' +bad_pixel_4M = { + "92394a": np.array( + [ + 828861, + 882769, + 915813, + 928030, + 959317, + 959318, + 992598, + 992599, + 998768, + 1009202, + 1036105, + 1143261, + 1149650, + 1259208, + 1321301, + 1426856, + 1426857, + 1586163, + 1774616, + 1936607, + 1936609, + 1936610, + 1938677, + 1938678, + 1938681, + 1940747, + 1946959, + 1955276, + 2105743, + 2105744, + 2107813, + 2107815, + 2109883, + 2118276, + 2118277, + 2149798, + 2194925, + 2283956, + 2284016, + 2284225, + 2284388, + 2290249, + 2292593, + 2298770, + 2304729, + 2317145, + 2344268, + 2346156, + 2356554, + 2360827, + 2364960, + 2408361, + 2453913, + 2470447, + 2476691, + 3462303, + 4155535, + ] + ), # 57 points, coralpor + "6cc34a": np.array([1058942, 2105743, 2105744, 2107813, 2107815, 2109883, 4155535]), # coralpor +} + + +## Create during 2018 Cycle 1 +BadPix_4M = np.array( + [ + 828861, + 882769, + 915813, + 928030, + 959317, + 959318, + 992598, + 992599, + 998768, + 1009202, + 1036105, + 1143261, + 1149650, + 1259208, + 1321301, + 1426856, + 1426857, + 1586163, + 1774616, + 1936607, + 1936609, + 1936610, + 1938677, + 1938678, + 1938681, + 1940747, + 1946959, + 1955276, + 2105743, + 2105744, + 2107813, + 2107815, + 2109883, + 2118276, + 2118277, + 2149798, + 2194925, + 2283956, + 2284016, + 2284225, + 2284388, + 2290249, + 2292593, + 2298770, + 2304729, + 2317145, + 2344268, + 2346156, + 2356554, + 2360827, + 2364960, + 2408361, + 2453913, + 2470447, + 2476691, + 3462303, + 4155535, + 1058942, + 2105743, + 2105744, + 2107813, + 2107815, + 2109883, + 4155535, + 2107814, + 3462303, + ] +) diff --git a/pyCHX/backups/pyCHX-backup/Compress_readerNew.py b/pyCHX/backups/pyCHX-backup/Compress_readerNew.py new file mode 100644 index 0000000..8d69158 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/Compress_readerNew.py @@ -0,0 +1,352 @@ +import numpy as np + +""" +This is coded by Julien Lhermitte @2018, May +""" + + +""" Description: + + This is code that Mark wrote to open the multifile format + in compressed mode, translated to python. + This seems to work for DALSA, FCCD and EIGER in compressed mode. + It should be included in the respective detector.i files + Currently, this refers to the compression mode being '6' + Each file is image descriptor files chunked together as follows: + Header (1024 bytes) + |--------------IMG N begin--------------| + | Dlen + |---------------------------------------| + | Pixel positions (dlen*4 bytes | + | (0 based indexing in file) | + |---------------------------------------| + | Pixel data(dlen*bytes bytes) | + | (bytes is found in header | + | at position 116) | + |--------------IMG N end----------------| + |--------------IMG N+1 begin------------| + |----------------etc.....---------------| + + + Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End, + + + +""" + + +# TODO : split into RO and RW classes +class Multifile: + """ + Re-write multifile from scratch. + + """ + + HEADER_SIZE = 1024 + + def __init__(self, filename, mode="rb", nbytes=2): + """ + Prepare a file for reading or writing. + mode : either 'rb' or 'wb' + numimgs: num images + """ + if mode != "rb" and mode != "wb": + raise ValueError("Error, mode must be 'rb' or 'wb'" "got : {}".format(mode)) + self._filename = filename + self._mode = mode + + self._nbytes = nbytes + if nbytes == 2: + self._dtype = " self.Nframes: + raise KeyError("Error, only {} frames, asked for {}".format(self.Nframes, n)) + # read in bytes + cur = self.frame_indexes[n] + header_raw = self._fd[cur : cur + self.HEADER_SIZE] + header = dict() + header["rows"] = np.frombuffer(header_raw[108:112], dtype=self._dtype)[0] + header["cols"] = np.frombuffer(header_raw[112:116], dtype=self._dtype)[0] + header["nbytes"] = np.frombuffer(header_raw[116:120], dtype=self._dtype)[0] + header["dlen"] = np.frombuffer(header_raw[152:156], dtype=self._dtype)[0] + # print("dlen: {}\trows: {}\tcols: {}\tnbytes: {}\n"\ + # .format(header['dlen'], header['rows'], header['cols'], + # header['nbytes'])) + + self._dlen = header["dlen"] + self._nbytes = header["nbytes"] + + return header + + def _read_raw(self, n): + """Read from raw. + Reads from current cursor in file. + """ + if n > self.Nframes: + raise KeyError("Error, only {} frames, asked for {}".format(self.Nframes, n)) + cur = self.frame_indexes[n] + 1024 + dlen = self._read_header(n)["dlen"] + + pos = self._fd[cur : cur + dlen * 4] + cur += dlen * 4 + pos = np.frombuffer(pos, dtype=" nbytes + vals = self._fd[cur : cur + dlen * 2] + # not necessary + cur += dlen * 2 + vals = np.frombuffer(vals, dtype=self._dtype) + + return pos, vals + + def _write_header(self, dlen, rows, cols): + """Write header at current position.""" + self._rows = rows + self._cols = cols + self._dlen = dlen + # byte array + header = np.zeros(self.HEADER_SIZE, dtype="c") + # write the header dlen + header[152:156] = np.array([dlen], dtype=" self.Nframes: + raise KeyError("Error, only {} frames, asked for {}".format(self.Nframes, n)) + # dlen is 4 bytes + cur = self.frame_indexes[n] + dlen = np.frombuffer(self._fd[cur : cur + 4], dtype=" nbytes + vals = self._fd[cur : cur + dlen * self.nbytes] + vals = np.frombuffer(vals, dtype=self.valtype) + + return pos, vals + + def rdframe(self, n): + # read header then image + pos, vals = self._read_raw(n) + img = np.zeros((self._rows * self._cols,)) + img[pos] = vals + return img.reshape((self._rows, self._cols)) + + def rdrawframe(self, n): + # read header then image + return self._read_raw(n) + + +class MultifileBNLCustom(MultifileBNL): + def __init__(self, filename, beg=0, end=None, **kwargs): + super().__init__(filename, **kwargs) + self.beg = beg + if end is None: + end = self.Nframes - 1 + self.end = end + + def rdframe(self, n): + if n > self.end: + raise IndexError("Index out of range") + return super().rdframe(n - self.beg) + + def rdrawframe(self, n): + return super().rdrawframe(n - self.beg) diff --git a/pyCHX/backups/pyCHX-backup/Create_Report.py b/pyCHX/backups/pyCHX-backup/Create_Report.py new file mode 100644 index 0000000..bfb7b30 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/Create_Report.py @@ -0,0 +1,2166 @@ +""" +Yugang Created at Aug 08, 2016, CHX-NSLS-II + +Create a PDF file from XPCS data analysis results, which are generated by CHX data analysis pipeline + +How to use: +python Create_Report.py full_file_path uid output_dir (option) + +An exmplae to use: +python Create_Report.py /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/ af8f66 + +python Create_Report.py /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/ af8f66 /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/test/ + +""" + + +def check_dict_keys(dicts, key): + if key not in list(dicts.keys()): + dicts[key] = "unknown" + + +import os +import sys +from datetime import datetime +from time import time + +import h5py +import numpy as np +import pandas as pds +from PIL import Image +from reportlab.lib.colors import black, blue, brown, green, pink, red, white +from reportlab.lib.pagesizes import A4, letter +from reportlab.lib.styles import getSampleStyleSheet +from reportlab.lib.units import cm, inch, mm +from reportlab.pdfgen import canvas + +from pyCHX.chx_generic_functions import pload_obj + +# from reportlab.platypus import Image, Paragraph, Table + + +def add_one_line_string(c, s, top, left=30, fontsize=11): + if (fontsize * len(s)) > 1000: + fontsize = 1000.0 / (len(s)) + c.setFont("Helvetica", fontsize) + c.drawString(left, top, s) + + +def add_image_string( + c, imgf, data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top, return_=False +): + + image = data_dir + imgf + if os.path.exists(image): + im = Image.open(image) + ratio = float(im.size[1]) / im.size[0] + height = img_height + width = height / ratio + # if width>400: + # width = 350 + # height = width*ratio + c.drawImage(image, img_left, img_top, width=width, height=height, mask=None) + + c.setFont("Helvetica", 16) + c.setFillColor(blue) + c.drawString(str1_left, str1_top, str1) + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString(str2_left, str2_top, "filename: %s" % imgf) + if return_: + return height / ratio + + else: + c.setFillColor(blue) + c.drawString(str1_left, str1_top, str1) + c.setFillColor(red) + c.drawString(str1_left, str1_top - 40, "-->Not Calculated!") + + +class create_pdf_report(object): + """Aug 16, YG@CHX-NSLS-II + Create a pdf report by giving data_dir, uid, out_dir + data_dir: the input data directory, including all necessary images + the images names should be: + meta_file = 'uid=%s-md'%uid + avg_img_file = 'uid=%s--img-avg-.png'%uid + ROI_on_img_file = 'uid=%s--ROI-on-Image-.png'%uid + qiq_file = 'uid=%s--Circular-Average-.png'%uid + ROI_on_Iq_file = 'uid=%s--ROI-on-Iq-.png'%uid + + Iq_t_file = 'uid=%s--Iq-t-.png'%uid + img_sum_t_file = 'uid=%s--img-sum-t.png'%uid + wat_file= 'uid=%s--Waterfall-.png'%uid + Mean_inten_t_file= 'uid=%s--Mean-intensity-of-each-ROI-.png'%uid + + g2_file = 'uid=%s--g2-.png'%uid + g2_fit_file = 'uid=%s--g2--fit-.png'%uid + q_rate_file = 'uid=--%s--Q-Rate--fit-.png'%uid + + two_time_file = 'uid=%s--Two-time-.png'%uid + two_g2_file = 'uid=%s--g2--two-g2-.png'%uid + + uid: the unique id + out_dir: the output directory + report_type: + 'saxs': report saxs results + 'gisaxs': report gisaxs results + + + Output: + A PDF file with name as "XPCS Analysis Report for uid=%s"%uid in out_dir folder + """ + + def __init__( + self, + data_dir, + uid, + out_dir=None, + filename=None, + load=True, + user=None, + report_type="saxs", + md=None, + res_h5_filename=None, + ): + from datetime import datetime + + self.data_dir = data_dir + self.uid = uid + self.md = md + # print(md) + if user is None: + user = "chx" + self.user = user + if out_dir is None: + out_dir = data_dir + if not os.path.exists(out_dir): + os.makedirs(out_dir) + self.out_dir = out_dir + + self.styles = getSampleStyleSheet() + self.width, self.height = letter + + self.report_type = report_type + dt = datetime.now() + CurTime = "%02d/%02d/%s/-%02d/%02d/" % (dt.month, dt.day, dt.year, dt.hour, dt.minute) + self.CurTime = CurTime + if filename is None: + filename = "XPCS_Analysis_Report_for_uid=%s.pdf" % uid + filename = out_dir + filename + c = canvas.Canvas(filename, pagesize=letter) + self.filename = filename + self.res_h5_filename = res_h5_filename + # c.setTitle("XPCS Analysis Report for uid=%s"%uid) + c.setTitle(filename) + self.c = c + if load: + self.load_metadata() + + def load_metadata(self): + uid = self.uid + data_dir = self.data_dir + # load metadata + meta_file = "uid=%s_md" % uid + self.metafile = data_dir + meta_file + if self.md is None: + md = pload_obj(data_dir + meta_file) + self.md = md + else: + md = self.md + # print('Get md from giving md') + # print(md) + self.sub_title_num = 0 + uid_g2 = None + uid_c12 = None + if "uid_g2" in list(md.keys()): + uid_g2 = md["uid_g2"] + if "uid_c12" in list(md.keys()): + uid_c12 = md["uid_c12"] + + """global definition""" + + if "beg_OneTime" in list(md.keys()): + beg_OneTime = md["beg_OneTime"] + end_OneTime = md["end_OneTime"] + else: + beg_OneTime = None + end_OneTime = None + + if "beg_TwoTime" in list(md.keys()): + beg_TwoTime = md["beg_TwoTime"] + end_TwoTime = md["end_TwoTime"] + else: + beg_TwoTime = None + end_TwoTime = None + + try: + beg = md["beg"] + end = md["end"] + uid_ = uid + "_fra_%s_%s" % (beg, end) + if beg_OneTime is None: + uid_OneTime = uid + "_fra_%s_%s" % (beg, end) + else: + uid_OneTime = uid + "_fra_%s_%s" % (beg_OneTime, end_OneTime) + if beg_TwoTime is None: + uid_TwoTime = uid + "_fra_%s_%s" % (beg, end) + else: + uid_TwoTime = uid + "_fra_%s_%s" % (beg_TwoTime, end_TwoTime) + + except: + uid_ = uid + uid_OneTime = uid + if beg is None: + uid_ = uid + uid_OneTime = uid + + self.avg_img_file = "uid=%s_img_avg.png" % uid + self.ROI_on_img_file = "uid=%s_ROI_on_Image.png" % uid + + self.qiq_file = "uid=%s_q_Iq.png" % uid + self.qiq_fit_file = "uid=%s_form_factor_fit.png" % uid + # self.qr_1d_file = 'uid=%s_Qr_ROI.png'%uid + if self.report_type == "saxs" or self.report_type == "ang_saxs": + self.ROI_on_Iq_file = "uid=%s_ROI_on_Iq.png" % uid + + elif self.report_type == "gi_saxs": + self.ROI_on_Iq_file = "uid=%s_Qr_ROI.png" % uid + + self.Iq_t_file = "uid=%s_q_Iqt.png" % uid + self.img_sum_t_file = "uid=%s_img_sum_t.png" % uid + self.wat_file = "uid=%s_waterfall.png" % uid + self.Mean_inten_t_file = "uid=%s_t_ROIs.png" % uid + self.oavs_file = "uid=%s_OAVS.png" % uid + + if uid_g2 is None: + uid_g2 = uid_OneTime + self.g2_file = "uid=%s_g2.png" % uid_g2 + self.g2_fit_file = "uid=%s_g2_fit.png" % uid_g2 + # print( self.g2_fit_file ) + self.g2_new_page = False + self.g2_fit_new_page = False + if self.report_type == "saxs": + jfn = "uid=%s_g2.png" % uid_g2 + if os.path.exists(data_dir + jfn): + self.g2_file = jfn + else: + jfn = "uid=%s_g2__joint.png" % uid_g2 + if os.path.exists(data_dir + jfn): + self.g2_file = jfn + self.g2_new_page = True + # self.g2_new_page = True + jfn = "uid=%s_g2_fit.png" % uid_g2 + if os.path.exists(data_dir + jfn): + self.g2_fit_file = jfn + # self.g2_fit_new_page = True + else: + jfn = "uid=%s_g2_fit__joint.png" % uid_g2 + if os.path.exists(data_dir + jfn): + self.g2_fit_file = jfn + self.g2_fit_new_page = True + + else: + jfn = "uid=%s_g2__joint.png" % uid_g2 + if os.path.exists(data_dir + jfn): + self.g2_file = jfn + self.g2_new_page = True + jfn = "uid=%s_g2_fit__joint.png" % uid_g2 + if os.path.exists(data_dir + jfn): + self.g2_fit_file = jfn + self.g2_fit_new_page = True + + self.q_rate_file = "uid=%s_Q_Rate_fit.png" % uid_g2 + self.q_rate_loglog_file = "uid=%s_Q_Rate_loglog.png" % uid_g2 + self.g2_q_fitpara_file = "uid=%s_g2_q_fitpara_plot.png" % uid_g2 + + # print( self.q_rate_file ) + if uid_c12 is None: + uid_c12 = uid_ + self.q_rate_two_time_fit_file = "uid=%s_two_time_Q_Rate_fit.png" % uid_c12 + # print( self.q_rate_two_time_fit_file ) + + self.two_time_file = "uid=%s_Two_time.png" % uid_c12 + self.two_g2_file = "uid=%s_g2_two_g2.png" % uid_c12 + + if self.report_type == "saxs": + + jfn = "uid=%s_g2_two_g2.png" % uid_c12 + self.two_g2_new_page = False + if os.path.exists(data_dir + jfn): + # print( 'Here we go') + self.two_g2_file = jfn + # self.two_g2_new_page = True + else: + jfn = "uid=%s_g2_two_g2__joint.png" % uid_c12 + self.two_g2_new_page = False + if os.path.exists(data_dir + jfn): + # print( 'Here we go') + self.two_g2_file = jfn + self.two_g2_new_page = True + else: + jfn = "uid=%s_g2_two_g2__joint.png" % uid_c12 + self.two_g2_new_page = False + if os.path.exists(data_dir + jfn): + # print( 'Here we go') + self.two_g2_file = jfn + self.two_g2_new_page = True + + self.four_time_file = "uid=%s_g4.png" % uid_ + jfn = "uid=%s_g4__joint.png" % uid_ + self.g4_new_page = False + if os.path.exists(data_dir + jfn): + self.four_time_file = jfn + self.g4_new_page = True + + self.xsvs_fit_file = "uid=%s_xsvs_fit.png" % uid_ + self.contrast_file = "uid=%s_contrast.png" % uid_ + self.dose_file = "uid=%s_dose_analysis.png" % uid_ + + jfn = "uid=%s_dose_analysis__joint.png" % uid_ + self.dose_file_new_page = False + if os.path.exists(data_dir + jfn): + self.dose_file = jfn + self.dose_file_new_page = True + + # print( self.dose_file ) + if False: + self.flow_g2v = "uid=%s_1a_mqv_g2_v_fit.png" % uid_ + self.flow_g2p = "uid=%s_1a_mqp_g2_p_fit.png" % uid_ + self.flow_g2v_rate_fit = "uid=%s_v_fit_rate_Q_Rate_fit.png" % uid_ + self.flow_g2p_rate_fit = "uid=%s_p_fit_rate_Q_Rate_fit.png" % uid_ + + if True: + self.two_time = "uid=%s_pv_two_time.png" % uid_ + # self.two_time_v = 'uid=%s_pv_two_time.png'%uid_ + + # self.flow_g2bv = 'uid=%s_g2b_v_fit.png'%uid_ + # self.flow_g2bp = 'uid=%s_g2b_p_fit.png'%uid_ + self.flow_g2_g2b_p = "uid=%s_g2_two_g2_p.png" % uid_ + self.flow_g2_g2b_v = "uid=%s_g2_two_g2_v.png" % uid_ + + self.flow_g2bv_rate_fit = "uid=%s_vertb_Q_Rate_fit.png" % uid_ + self.flow_g2bp_rate_fit = "uid=%s_parab_Q_Rate_fit.png" % uid_ + + self.flow_g2v = "uid=%s_g2_v_fit.png" % uid_ + self.flow_g2p = "uid=%s_g2_p_fit.png" % uid_ + self.flow_g2v_rate_fit = "uid=%s_vert_Q_Rate_fit.png" % uid_ + self.flow_g2p_rate_fit = "uid=%s_para_Q_Rate_fit.png" % uid_ + + # self.report_header(page=1, top=730, new_page=False) + # self.report_meta(new_page=False) + + self.q2Iq_file = "uid=%s_q2_iq.png" % uid + self.iq_invariant_file = "uid=%s_iq_invariant.png" % uid + + def report_invariant(self, top=300, new_page=False): + """create the invariant analysis report + two images: + ROI on average intensity image + ROI on circular average + """ + uid = self.uid + c = self.c + # add sub-title, static images + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 230 + self.sub_title_num += 1 + c.drawString(10, top, "%s. I(q) Invariant Analysis" % self.sub_title_num) # add title + # add q2Iq + c.setFont("Helvetica", 14) + imgf = self.q2Iq_file + # print( imgf ) + label = "q^2*I(q)" + add_image_string( + c, + imgf, + self.data_dir, + img_left=60, + img_top=top - ds * 1.15, + img_height=180, + str1_left=110, + str1_top=top - 35, + str1=label, + str2_left=60, + str2_top=top - 320, + ) + + # add iq_invariant + imgf = self.iq_invariant_file + img_height = 180 + img_left, img_top = 320, top - ds * 1.15 + str1_left, str1_top, str1 = 420, top - 35, "I(q) Invariant" + str2_left, str2_top = 350, top - 320 + + # print ( imgf ) + + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + if new_page: + c.showPage() + c.save() + + def report_header(self, page=1, new_page=False): + """create headers, including title/page number""" + c = self.c + CurTime = self.CurTime + uid = self.uid + user = self.user + c.setFillColor(black) + c.setFont("Helvetica", 14) + # add page number + c.drawString(250, 10, "Page--%s--" % (page)) + # add time stamp + + # c.drawString(350, 10, "Created at %s@CHX-by-%s"%( CurTime,user ) ) + s_ = "Created at %s@CHX-By-%s" % (CurTime, user) + add_one_line_string(c, s_, 10, left=350, fontsize=11) + + # add title + # c.setFont("Helvetica", 22) + title = "XPCS Analysis Report for uid=%s" % uid + c.setFont("Helvetica", 1000 / (len(title))) + # c.drawString(180,760, "XPCS Report of uid=%s"%uid ) #add title + c.drawString(50, 760, "XPCS Analysis Report for uid=%s" % uid) # add title + # add a line under title + c.setStrokeColor(red) + c.setLineWidth(width=1.5) + c.line(50, 750, 550, 750) + if new_page: + c.showPage() + c.save() + + def report_meta(self, top=740, new_page=False): + """create the meta data report, + the meta data include: + uid + Sample: + Measurement + Wavelength + Detector-Sample Distance + Beam Center + Mask file + Data dir + Pipeline notebook + """ + + c = self.c + # load metadata + md = self.md + try: + uid = md["uid"] + except: + uid = self.uid + # add sub-title, metadata + c.setFont("Helvetica", 20) + ds = 15 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Metadata" % self.sub_title_num) # add title + top = top - 5 + fontsize = 11 + c.setFont("Helvetica", fontsize) + + nec_keys = [ + "sample", + "start_time", + "stop_time", + "Measurement", + "exposure time", + "incident_wavelength", + "cam_acquire_t", + "frame_time", + "detector_distance", + "feedback_x", + "feedback_y", + "shutter mode", + "beam_center_x", + "beam_center_y", + "beam_refl_center_x", + "beam_refl_center_y", + "mask_file", + "bad_frame_list", + "transmission", + "roi_mask_file", + ] + for key in nec_keys: + check_dict_keys(md, key) + + try: # try exp time from detector + exposuretime = md["count_time"] # exposure time in sec + except: + exposuretime = md["cam_acquire_time"] # exposure time in sec + + try: # try acq time from detector + acquisition_period = md["frame_time"] + except: + try: + acquisition_period = md["acquire period"] + except: + uid = md["uid"] + acquisition_period = float(db[uid]["start"]["acquire period"]) + + s = [] + s.append("UID: %s" % uid) ###line 1, for uid + s.append("Sample: %s" % md["sample"]) ####line 2 sample + s.append( + "Data Acquisition From: %s To: %s" % (md["start_time"], md["stop_time"]) + ) ####line 3 Data Acquisition time + s.append("Measurement: %s" % md["Measurement"]) ####line 4 'Measurement + + # print( md['incident_wavelength'], int(md['number of images']), md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) + # print(acquisition_period) + s.append( + "Wavelength: %s A | Num of Image: %d | Exposure time: %s ms | Acquire period: %s ms" + % ( + md["incident_wavelength"], + int(md["number of images"]), + round(float(exposuretime) * 1000, 4), + round(float(acquisition_period) * 1000, 4), + ) + ) ####line 5 'lamda... + + s.append( + "Detector-Sample Distance: %s m| FeedBack Mode: x -> %s & y -> %s| Shutter Mode: %s" + % (md["detector_distance"], md["feedback_x"], md["feedback_y"], md["shutter mode"]) + ) ####line 6 'Detector-Sample Distance.. + if self.report_type == "saxs": + s7 = "Beam Center: [%s, %s] (pixel)" % (md["beam_center_x"], md["beam_center_y"]) + elif self.report_type == "gi_saxs": + s7 = ( + "Incident Center: [%s, %s] (pixel)" % (md["beam_center_x"], md["beam_center_y"]) + + " || " + + "Reflect Center: [%s, %s] (pixel)" % (md["beam_refl_center_x"], md["beam_refl_center_y"]) + ) + elif self.report_type == "ang_saxs" or self.report_type == "gi_waxs": + s7 = "Beam Center: [%s, %s] (pixel)" % (md["beam_center_x"], md["beam_center_y"]) + else: + s7 = "" + + s7 += " || " + "BadLen: %s" % len(md["bad_frame_list"]) + s7 += " || " + "Transmission: %s" % md["transmission"] + s.append(s7) ####line 7 'Beam center... + m = "Mask file: %s" % md["mask_file"] + " || " + "ROI mask file: %s" % md["roi_mask_file"] + # s.append( 'Mask file: %s'%md['mask_file'] ) ####line 8 mask filename + # s.append( ) ####line 8 mask filename + s.append(m) + + if self.res_h5_filename is not None: + self.data_dir_ = self.data_dir + self.res_h5_filename + else: + self.data_dir_ = self.data_dir + s.append("Analysis Results Dir: %s" % self.data_dir_) ####line 9 results folder + + s.append("Metadata Dir: %s.csv-&.pkl" % self.metafile) ####line 10 metadata folder + try: + s.append("Pipeline notebook: %s" % md["NOTEBOOK_FULL_PATH"]) ####line 11 notebook folder + except: + pass + # print( 'here' ) + line = 1 + for s_ in s: + add_one_line_string(c, s_, top - ds * line, left=30, fontsize=fontsize) + line += 1 + + if new_page: + c.showPage() + c.save() + + def report_static(self, top=560, new_page=False, iq_fit=False): + """create the static analysis report + two images: + average intensity image + circular average + + """ + # add sub-title, static images + + c = self.c + c.setFont("Helvetica", 20) + uid = self.uid + + ds = 220 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Static Analysis" % self.sub_title_num) # add title + + # add average image + c.setFont("Helvetica", 14) + + imgf = self.avg_img_file + + if self.report_type == "saxs": + ipos = 60 + dshift = 0 + elif self.report_type == "gi_saxs": + ipos = 200 + dshift = 140 + elif self.report_type == "ang_saxs": + ipos = 200 + dshift = 140 + else: + ipos = 200 + dshift = 140 + + add_image_string( + c, + imgf, + self.data_dir, + img_left=ipos, + img_top=top - ds, + img_height=180, + str1_left=90 + dshift, + str1_top=top - 35, + str1="Average Intensity Image", + str2_left=80 + dshift, + str2_top=top - 230, + ) + + # add q_Iq + if self.report_type == "saxs": + imgf = self.qiq_file + # print(imgf) + if iq_fit: + imgf = self.qiq_fit_file + label = "Circular Average" + lab_pos = 390 + fn_pos = 320 + add_image_string( + c, + imgf, + self.data_dir, + img_left=320, + img_top=top - ds, + img_height=180, + str1_left=lab_pos, + str1_top=top - 35, + str1=label, + str2_left=fn_pos, + str2_top=top - 230, + ) + else: + if False: + imgf = self.ROI_on_Iq_file # self.qr_1d_file + label = "Qr-1D" + lab_pos = 420 + fn_pos = 350 + + add_image_string( + c, + imgf, + self.data_dir, + img_left=320, + img_top=top - ds, + img_height=180, + str1_left=lab_pos, + str1_top=top - 35, + str1=label, + str2_left=fn_pos, + str2_top=top - 230, + ) + if new_page: + c.showPage() + c.save() + + def report_ROI(self, top=300, new_page=False): + """create the static analysis report + two images: + ROI on average intensity image + ROI on circular average + """ + uid = self.uid + c = self.c + # add sub-title, static images + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 230 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Define of ROI" % self.sub_title_num) # add title + # add ROI on image + c.setFont("Helvetica", 14) + imgf = self.ROI_on_img_file + label = "ROI on Image" + add_image_string( + c, + imgf, + self.data_dir, + img_left=60, + img_top=top - ds * 1.15, + img_height=240, + str1_left=110, + str1_top=top - 35, + str1=label, + str2_left=60, + str2_top=top - 260, + ) + + # add q_Iq + if self.report_type == "saxs" or self.report_type == "gi_saxs" or self.report_type == "ang_saxs": + imgf = self.ROI_on_Iq_file + img_height = 180 + img_left, img_top = 320, top - ds + str1_left, str1_top, str1 = 420, top - 35, "ROI on Iq" + str2_left, str2_top = 350, top - 260 + + # print ( imgf ) + + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + + if new_page: + c.showPage() + c.save() + + def report_time_analysis(self, top=720, new_page=False): + """create the time dependent analysis report + four images: + each image total intensity as a function of time + iq~t + waterfall + mean intensity of each ROI as a function of time + """ + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + top1 = top + ds = 20 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Time Dependent Plot" % self.sub_title_num) # add title + c.setFont("Helvetica", 14) + + top = top1 - 160 + + # add img_sum_t + if self.report_type == "saxs": + ipos = 80 + elif self.report_type == "gi_saxs": + ipos = 200 + elif self.report_type == "ang_saxs": + ipos = 200 + else: + ipos = 200 + + imgf = self.img_sum_t_file + img_height = 140 + img_left, img_top = ipos, top + str1_left, str1_top, str1 = ipos + 60, top1 - 20, "img sum ~ t" + str2_left, str2_top = ipos, top - 5 + + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + # plot iq~t + if self.report_type == "saxs": + imgf = self.Iq_t_file + image = self.data_dir + imgf + + img_height = 140 + img_left, img_top = 350, top + str1_left, str1_top, str1 = 420, top1 - 20, "iq ~ t" + str2_left, str2_top = 360, top - 5 + + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + elif self.report_type == "gi_saxs": + pass + + top = top1 - 340 + # add waterfall plot + imgf = self.wat_file + + img_height = 160 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 140, top + img_height, "waterfall plot" + str2_left, str2_top = 80, top - 5 + + if self.report_type != "ang_saxs": + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + else: + pass + + # add mean-intensity of each roi + imgf = self.Mean_inten_t_file + + img_height = 160 + img_left, img_top = 360, top + str1_left, str1_top, str1 = 330, top + img_height, "Mean-intensity-of-each-ROI" + str2_left, str2_top = 310, top - 5 + if self.report_type != "ang_saxs": + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + else: + pass + + if new_page: + c.showPage() + c.save() + + def report_oavs(self, top=350, oavs_file=None, new_page=False): + """create the oavs images report""" + + c = self.c + uid = self.uid + # add sub-title, One Time Correlation Function + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 20 + self.sub_title_num += 1 + c.drawString(10, top, "%s. OAVS Images" % self.sub_title_num) # add title + c.setFont("Helvetica", 14) + # add g2 plot + if oavs_file is None: + imgf = self.oavs_file + else: + imgf = oavs_file + # print(self.data_dir + imgf) + + if os.path.exists(self.data_dir + imgf): + im = Image.open(self.data_dir + imgf) + ratio = float(im.size[1]) / im.size[0] + img_width = 600 + img_height = img_width * ratio # img_height + # width = height/ratio + + if not new_page: + # img_height= 550 + top = top - 600 + str2_left, str2_top = 80, top - 400 + img_left, img_top = 1, top + + if new_page: + # img_height= 150 + top = top - img_height - 50 + str2_left, str2_top = 80, top - 50 + img_left, img_top = 10, top + + str1_left, str1_top, str1 = 150, top + img_height, "OAVS images" + img_width = add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + return_=True, + ) + # print( imgf,self.data_dir ) + print(img_width, img_height) + + def report_one_time(self, top=350, g2_fit_file=None, q_rate_file=None, new_page=False): + """create the one time correlation function report + Two images: + One Time Correlation Function with fit + q-rate fit + """ + + c = self.c + uid = self.uid + # add sub-title, One Time Correlation Function + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 20 + self.sub_title_num += 1 + c.drawString(10, top, "%s. One Time Correlation Function" % self.sub_title_num) # add title + c.setFont("Helvetica", 14) + # add g2 plot + if g2_fit_file is None: + imgf = self.g2_fit_file + else: + imgf = g2_fit_file + + if self.report_type != "ang_saxs": + img_height = 300 + top = top - 320 + str2_left, str2_top = 80, top - 0 + + else: + img_height = 550 + top = top - 600 + str2_left, str2_top = 80, top - 400 + # add one_time caculation + img_left, img_top = 1, top + if self.g2_fit_new_page or self.g2_new_page: + + img_height = 550 + top = top - 250 + str2_left, str2_top = 80, top - 0 + img_left, img_top = 60, top + + str1_left, str1_top, str1 = 150, top + img_height, "g2 fit plot" + img_width = add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + return_=True, + ) + # print( imgf,self.data_dir ) + # add g2 plot fit + # print(self.q_rate_file ) + if os.path.isfile(self.data_dir + self.q_rate_file): + # print('here') + # print(self.q_rate_file ) + top = top + 70 # + if q_rate_file is None: + imgf = self.q_rate_file + else: + imgf = q_rate_file + if self.report_type != "ang_saxs": + # print(img_width) + if img_width > 400: + img_height = 90 + else: + img_height = 180 + img_left, img_top = img_width - 10, top # 350, top + str2_left, str2_top = img_width + 50, top - 5 # 380, top - 5 + str1_left, str1_top, str1 = 450, top + 230, "q-rate fit plot" + else: + img_height = 300 + img_left, img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top, str1 = 450, top + 180, "q-rate fit plot" + if self.g2_fit_new_page or self.g2_new_page: + top = top - 200 + img_height = 180 + img_left, img_top = 350, top + str2_left, str2_top = 380, top - 5 + str1_left, str1_top, str1 = 450, top + 230, "q-rate fit plot" + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + + else: + top = top + 320 # + if q_rate_file is None: + imgf = self.q_rate_loglog_file + else: + imgf = q_rate_file + # print(imgf) + if self.report_type != "ang_saxs": + # print(img_width) + if img_width > 400: + img_height = 90 / 2 + else: + img_height = 180 / 2 + img_left, img_top = img_width - 10, top # 350, top + str2_left, str2_top = img_width + 50, top - 5 # 380, top - 5 + str1_left, str1_top, str1 = 450, top + 230, "q-rate loglog plot" + else: + img_height = 300 / 2 + img_left, img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top, str1 = 450, top + 180, "q-rate loglog plot" + if self.g2_fit_new_page or self.g2_new_page: + top = top - 200 + 50 + img_height = 180 / 1.5 + img_left, img_top = 350, top + str2_left, str2_top = 380, top - 5 + str1_left, str1_top, str1 = 450, top + 120, "q-rate loglog plot" + + # print('here') + + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + + top = top - 100 # + if q_rate_file is None: + imgf = self.g2_q_fitpara_file + else: + imgf = q_rate_file + if self.report_type != "ang_saxs": + # print(img_width) + if img_width > 400: + img_height = 90 + else: + img_height = 180 + img_left, img_top = img_width - 10, top # 350, top + str2_left, str2_top = img_width + 50, top - 5 # 380, top - 5 + str1_left, str1_top, str1 = 450, top + 230, "g2 fit para" + else: + img_height = 300 + img_left, img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top, str1 = 450, top + 180, "g2 fit para" + if self.g2_fit_new_page or self.g2_new_page: + top = top - 200 + img_height = 180 * 1.5 + img_left, img_top = 350, top + str2_left, str2_top = 380, top - 5 + str1_left, str1_top, str1 = 450, top + 280, "g2 fit para" + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + + if new_page: + c.showPage() + c.save() + + def report_mulit_one_time(self, top=720, new_page=False): + """create the mulit one time correlation function report + Two images: + One Time Correlation Function with fit + q-rate fit + """ + c = self.c + uid = self.uid + # add sub-title, One Time Correlation Function + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 20 + self.sub_title_num += 1 + c.drawString(10, top, "%s. One Time Correlation Function" % self.sub_title_num) # add title + c.setFont("Helvetica", 14) + # add g2 plot + top = top - 320 + + imgf = self.g2_fit_file + image = self.data_dir + imgf + if not os.path.exists(image): + image = self.data_dir + self.g2_file + im = Image.open(image) + ratio = float(im.size[1]) / im.size[0] + height = 300 + c.drawImage(image, 1, top, width=height / ratio, height=height, mask="auto") + # c.drawImage( image, 1, top, width= height/ratio,height=height, mask= None ) + c.setFont("Helvetica", 16) + c.setFillColor(blue) + c.drawString(150, top + height, "g2 fit plot") + + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString(80, top - 0, "filename: %s" % imgf) + + # add g2 plot fit + top = top + 70 # + imgf = self.q_rate_file + image = self.data_dir + imgf + if os.path.exists(image): + im = Image.open(image) + ratio = float(im.size[1]) / im.size[0] + height = 180 + c.drawImage(image, 350, top, width=height / ratio, height=height, mask="auto") + + c.setFont("Helvetica", 16) + c.setFillColor(blue) + c.drawString(450, top + 230, "q-rate fit plot") + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString(380, top - 5, "filename: %s" % imgf) + + if new_page: + c.showPage() + c.save() + + def report_two_time(self, top=720, new_page=False): + """create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + """ + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Two Time Correlation Function" % self.sub_title_num) # add title + c.setFont("Helvetica", 14) + + top1 = top + top = top1 - 330 + # add q_Iq_t + imgf = self.two_time_file + + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 180, top + 300, "two time correlation function" + str2_left, str2_top = 180, top - 10 + img_width = add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + return_=True, + ) + + top = top - 340 + # add q_Iq_t + imgf = self.two_g2_file + + if True: # not self.two_g2_new_page: + + img_height = 300 + img_left, img_top = 100 - 70, top + str1_left, str1_top, str1 = 210 - 70, top + 310, "compared g2" + str2_left, str2_top = 180 - 70, top - 10 + + if self.two_g2_new_page: + img_left, img_top = 100, top + print(imgf) + img_width = add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + return_=True, + ) + # print(imgf) + top = top + 50 + imgf = self.q_rate_two_time_fit_file + # print(imgf, img_width, top) + if img_width < 400: + img_height = 140 + img_left, img_top = 350, top + 30 + str2_left, str2_top = 380 - 80, top - 5 + str1_left, str1_top, str1 = 450 - 80, top + 230, "q-rate fit from two-time" + + else: + img_height = 90 + img_left, img_top = img_width - 10, top # 350, top + str2_left, str2_top = img_width + 50, top - 5 # 380, top - 5 + str1_left, str1_top, str1 = 450, top + 230, "q-rate fit plot" + + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + + if new_page: + c.showPage() + c.save() + + def report_four_time(self, top=720, new_page=False): + """create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + """ + + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Four Time Correlation Function" % self.sub_title_num) # add title + c.setFont("Helvetica", 14) + + top1 = top + top = top1 - 330 + # add q_Iq_t + imgf = self.four_time_file + + if not self.g4_new_page: + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 180, top + 300, "four time correlation function" + str2_left, str2_top = 180, top - 10 + else: + img_height = 600 + top -= 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 180, top + 300 - 250, "four time correlation function" + str2_left, str2_top = 180, top - 10 + + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + if new_page: + c.showPage() + c.save() + + def report_dose(self, top=720, new_page=False): + + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + ds = 20 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Dose Analysis" % self.sub_title_num) # add title + c.setFont("Helvetica", 14) + + top1 = top + top = top1 - 530 + # add q_Iq_t + imgf = self.dose_file + + img_height = 500 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 180, top + 500, "dose analysis" + str2_left, str2_top = 180, top - 10 + + # print( self.data_dir + self.dose_file) + if os.path.exists(self.data_dir + imgf): + # print( self.dose_file) + im = Image.open(self.data_dir + imgf) + ratio = float(im.size[1]) / im.size[0] + width = img_height / ratio + # print(width) + if width > 450: + img_height = 450 * ratio + + if self.dose_file_new_page: + # img_left,img_top = 180, top + img_left, img_top = 100, top + + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + if new_page: + c.showPage() + c.save() + + def report_flow_pv_g2(self, top=720, new_page=False): + """create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + """ + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Flow One Time Analysis" % self.sub_title_num) # add title + c.setFont("Helvetica", 14) + + top1 = top + top = top1 - 330 + # add xsvs fit + + imgf = self.flow_g2v + image = self.data_dir + imgf + + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 210, top + 300, "XPCS Vertical Flow" + str2_left, str2_top = 180, top - 10 + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + imgf = self.flow_g2v_rate_fit + img_height = 200 + img_left, img_top = 350, top + 50 + str1_left, str1_top, str1 = 210, top + 300, "" + str2_left, str2_top = 350, top - 10 + 50 + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + top = top - 340 + # add contrast fit + imgf = self.flow_g2p + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 210, top + 300, "XPCS Parallel Flow" + str2_left, str2_top = 180, top - 10 + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + imgf = self.flow_g2p_rate_fit + img_height = 200 + img_left, img_top = 350, top + 50 + str1_left, str1_top, str1 = 210, top + 300, "" + str2_left, str2_top = 350, top - 10 + 50 + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + if new_page: + c.showPage() + c.save() + + def report_flow_pv_two_time(self, top=720, new_page=False): + """create the two time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + """ + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Flow One &Two Time Comparison" % self.sub_title_num) # add title + c.setFont("Helvetica", 14) + + top1 = top + top = top1 - 330 + # add xsvs fit + + if False: + imgf = self.two_time + image = self.data_dir + imgf + + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 210, top + 300, "Two_time" + str2_left, str2_top = 180, top - 10 + add_image_string( + c, + imgf, + self.data_dir, + img_left, + img_top, + img_height, + str1_left, + str1_top, + str1, + str2_left, + str2_top, + ) + + imgf = self.flow_g2_g2b_p + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 210, top + 300, "XPCS Vertical Flow by two-time" + str2_left, str2_top = 180, top - 10 + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + imgf = self.flow_g2bp_rate_fit + img_height = 200 + img_left, img_top = 350, top + 50 + str1_left, str1_top, str1 = 210, top + 300, "" + str2_left, str2_top = 350, top - 10 + 50 + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + top = top - 340 + # add contrast fit + imgf = self.flow_g2_g2b_v + + img_height = 300 + img_left, img_top = 80, top + str1_left, str1_top, str1 = 210, top + 300, "XPCS Parallel Flow by two-time" + str2_left, str2_top = 180, top - 10 + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + imgf = self.flow_g2bv_rate_fit + img_height = 200 + img_left, img_top = 350, top + 50 + str1_left, str1_top, str1 = 210, top + 300, "" + str2_left, str2_top = 350, top - 10 + 50 + add_image_string( + c, imgf, self.data_dir, img_left, img_top, img_height, str1_left, str1_top, str1, str2_left, str2_top + ) + + if new_page: + c.showPage() + c.save() + + def report_xsvs(self, top=720, new_page=False): + """create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + """ + c = self.c + uid = self.uid + # add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Visibility Analysis" % self.sub_title_num) # add title + c.setFont("Helvetica", 14) + top = top - 330 + # add xsvs fit + imgf = self.xsvs_fit_file + add_image_string( + c, + imgf, + self.data_dir, + img_left=100, + img_top=top, + img_height=300, + str1_left=210, + str1_top=top + 300, + str1="XSVS_Fit_by_Negtive_Binomal Function", + str2_left=180, + str2_top=top - 10, + ) + + # add contrast fit + top = top - 340 + imgf = self.contrast_file + add_image_string( + c, + imgf, + self.data_dir, + img_left=100, + img_top=top, + img_height=300, + str1_left=210, + str1_top=top + 310, + str1="contrast get from xsvs and xpcs", + str2_left=180, + str2_top=top - 10, + ) + + if False: + top1 = top + top = top1 - 330 + # add xsvs fit + imgf = self.xsvs_fit_file + image = self.data_dir + imgf + im = Image.open(image) + ratio = float(im.size[1]) / im.size[0] + height = 300 + c.drawImage(image, 100, top, width=height / ratio, height=height, mask=None) + c.setFont("Helvetica", 16) + c.setFillColor(blue) + c.drawString(210, top + 300, "XSVS_Fit_by_Negtive_Binomal Function") + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString(180, top - 10, "filename: %s" % imgf) + top = top - 340 + # add contrast fit + imgf = self.contrast_file + image = self.data_dir + imgf + im = Image.open(image) + ratio = float(im.size[1]) / im.size[0] + height = 300 + c.drawImage(image, 100, top, width=height / ratio, height=height, mask=None) + + c.setFont("Helvetica", 16) + c.setFillColor(blue) + c.drawString(210, top + 310, "contrast get from xsvs and xpcs") + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString(180, top - 10, "filename: %s" % imgf) + + if new_page: + c.showPage() + c.save() + + def new_page(self): + c = self.c + c.showPage() + + def save_page(self): + c = self.c + c.save() + + def done(self): + out_dir = self.out_dir + uid = self.uid + + print() + print("*" * 40) + print("The pdf report is created with filename as: %s" % (self.filename)) + print("*" * 40) + + +def create_multi_pdf_reports_for_uids(uids, g2, data_dir, report_type="saxs", append_name=""): + """Aug 16, YG@CHX-NSLS-II + Create multi pdf reports for each uid in uids + uids: a list of uids to be reported + g2: a dictionary, {run_num: sub_num: g2_of_each_uid} + data_dir: + Save pdf report in data dir + """ + for key in list(g2.keys()): + i = 1 + for sub_key in list(g2[key].keys()): + uid_i = uids[key][sub_key] + data_dir_ = os.path.join(data_dir, "%s/" % uid_i) + if append_name != "": + uid_name = uid_i + append_name + else: + uid_name = uid_i + c = create_pdf_report( + data_dir_, + uid_i, + data_dir, + report_type=report_type, + filename="XPCS_Analysis_Report_for_uid=%s.pdf" % uid_name, + ) + # Page one: Meta-data/Iq-Q/ROI + c.report_header(page=1) + c.report_meta(top=730) + # c.report_one_time( top= 500 ) + # c.new_page() + if report_type == "flow": + c.report_flow_pv_g2(top=720) + c.save_page() + c.done() + + +def create_one_pdf_reports_for_uids(uids, g2, data_dir, filename="all_in_one", report_type="saxs"): + """Aug 16, YG@CHX-NSLS-II + Create one pdf reports for each uid in uids + uids: a list of uids to be reported + g2: a dictionary, {run_num: sub_num: g2_of_each_uid} + data_dir: + Save pdf report in data dir + """ + c = create_pdf_report(data_dir, uid=filename, out_dir=data_dir, load=False, report_type=report_type) + page = 1 + + for key in list(g2.keys()): + i = 1 + for sub_key in list(g2[key].keys()): + uid_i = uids[key][sub_key] + data_dir_ = os.path.join(data_dir, "%s/" % uid_i) + + c.uid = uid_i + c.data_dir = data_dir_ + c.load_metadata() + + # Page one: Meta-data/Iq-Q/ROI + c.report_header(page=page) + c.report_meta(top=730) + c.report_one_time(top=500) + c.new_page() + page += 1 + c.uid = filename + c.save_page() + c.done() + + +def save_res_h5(full_uid, data_dir, save_two_time=False): + """ + YG. Nov 10, 2016 + save the results to a h5 file + will save meta data/avg_img/mask/roi (ring_mask or box_mask)/ + will aslo save multi-tau calculated one-time correlation function g2/taus + will also save two-time derived one-time correlation function /g2b/taus2 + if save_two_time if True, will save two-time correaltion function + """ + with h5py.File(data_dir + "%s.h5" % full_uid, "w") as hf: + # write meta data + meta_data = hf.create_dataset("meta_data", (1,), dtype="i") + for key in md.keys(): + try: + meta_data.attrs[key] = md[key] + except: + pass + + shapes = md["avg_img"].shape + avg_h5 = hf.create_dataset("avg_img", data=md["avg_img"]) + mask_h5 = hf.create_dataset("mask", data=md["mask"]) + roi_h5 = hf.create_dataset("roi", data=md["ring_mask"]) + + g2_h5 = hf.create_dataset("g2", data=g2) + taus_h5 = hf.create_dataset("taus", data=taus) + + if save_two_time: + g12b_h5 = hf.create_dataset("g12b", data=g12b) + g2b_h5 = hf.create_dataset("g2b", data=g2b) + taus2_h5 = hf.create_dataset("taus2", data=taus2) + + +def printname(name): + print(name) + + +# f.visit(printname) +def load_res_h5(full_uid, data_dir): + """YG. Nov 10, 2016 + load results from a h5 file + will load meta data/avg_img/mask/roi (ring_mask or box_mask)/ + will aslo load multi-tau calculated one-time correlation function g2/taus + will also load two-time derived one-time correlation function /g2b/taus2 + if save_two_time if True, will load two-time correaltion function + + """ + with h5py.File(data_dir + "%s.h5" % full_uid, "r") as hf: + meta_data_h5 = hf.get("meta_data") + meta_data = {} + for att in meta_data_h5.attrs: + meta_data[att] = meta_data_h5.attrs[att] + avg_h5 = np.array(hf.get("avg_img")) + mask_h5 = np.array(hf.get("mask")) + roi_h5 = np.array(hf.get("roi")) + g2_h5 = np.array(hf.get("g2")) + taus_h5 = np.array(hf.get("taus")) + g2b_h5 = np.array(hf.get("g2b")) + taus2_h5 = np.array(hf.get("taus2")) + if "g12b" in hf: + g12b_h5 = np.array(hf.get("g12b")) + + if "g12b" in hf: + return meta_data, avg_h5, mask_h5, roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5, g12b + else: + return meta_data, avg_h5, mask_h5, roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5 + + +def make_pdf_report( + data_dir, + uid, + pdf_out_dir, + pdf_filename, + username, + run_fit_form, + run_one_time, + run_two_time, + run_four_time, + run_xsvs, + run_dose=None, + oavs_report=False, + report_type="saxs", + md=None, + report_invariant=False, + return_class=False, + res_h5_filename=None, +): + + if uid.startswith("uid=") or uid.startswith("Uid="): + uid = uid[4:] + c = create_pdf_report( + data_dir, + uid, + pdf_out_dir, + filename=pdf_filename, + user=username, + report_type=report_type, + md=md, + res_h5_filename=res_h5_filename, + ) + # print( c.md) + # Page one: Meta-data/Iq-Q/ROI + c.report_header(page=1) + c.report_meta(top=730) + c.report_static(top=540, iq_fit=run_fit_form) + c.report_ROI(top=290) + page = 1 + ##Page Two for plot OVAS images if oavs_report is True + if oavs_report: + c.new_page() + c.report_header(page=2) + c.report_oavs(top=720, oavs_file=None, new_page=True) + page += 1 + + # Page Two: img~t/iq~t/waterfall/mean~t/g2/rate~q + c.new_page() + page += 1 + c.report_header(page=page) + + if c.report_type != "ang_saxs": + c.report_time_analysis(top=720) + if run_one_time: + if c.report_type != "ang_saxs": + top = 350 + else: + top = 500 + if c.g2_fit_new_page: + c.new_page() + page += 1 + top = 720 + c.report_one_time(top=top) + + # self.two_g2_new_page = True + # self.g2_fit_new_page = True + + # Page Three: two-time/two g2 + + if run_two_time: + c.new_page() + page += 1 + c.report_header(page=page) + c.report_two_time(top=720) + + if run_four_time: + c.new_page() + page += 1 + c.report_header(page=page) + c.report_four_time(top=720) + + if run_xsvs: + c.new_page() + page += 1 + c.report_header(page=page) + c.report_xsvs(top=720) + if run_dose: + c.new_page() + page += 1 + c.report_header(page=page) + c.report_dose(top=702) + if report_invariant: + c.new_page() + page += 1 + c.report_header(page=page) + c.report_invariant(top=702) + + else: + c.report_flow_pv_g2(top=720, new_page=True) + c.report_flow_pv_two_time(top=720, new_page=True) + + c.save_page() + c.done() + if return_class: + return c + + +###################################### +###Deal with saving dict to hdf5 file +def save_dict_to_hdf5(dic, filename): + """ + .... + """ + with h5py.File(filename, "w") as h5file: + recursively_save_dict_contents_to_group(h5file, "/", dic) + + +def load_dict_from_hdf5(filename): + """ + .... + """ + with h5py.File(filename, "r") as h5file: + return recursively_load_dict_contents_from_group(h5file, "/") + + +def recursively_save_dict_contents_to_group(h5file, path, dic): + """...""" + # argument type checking + if not isinstance(dic, dict): + raise ValueError("must provide a dictionary") + + if not isinstance(path, str): + raise ValueError("path must be a string") + if not isinstance(h5file, h5py._hl.files.File): + raise ValueError("must be an open h5py file") + # save items to the hdf5 file + for key, item in dic.items(): + # print(key,item) + key = str(key) + if isinstance(item, list): + item = np.array(item) + # print(item) + if not isinstance(key, str): + raise ValueError("dict keys must be strings to save to hdf5") + # save strings, numpy.int64, and numpy.float64 types + if isinstance( + item, (np.int64, np.float64, str, float, np.float32, int) + ): # removed depreciated np.float LW @06/11/2023 + # print( 'here' ) + h5file[path + key] = item + if not h5file[path + key].value == item: + raise ValueError("The data representation in the HDF5 file does not match the original dict.") + # save numpy arrays + elif isinstance(item, np.ndarray): + try: + h5file[path + key] = item + except: + item = np.array(item).astype("|S9") + h5file[path + key] = item + if not np.array_equal(h5file[path + key].value, item): + raise ValueError("The data representation in the HDF5 file does not match the original dict.") + # save dictionaries + elif isinstance(item, dict): + recursively_save_dict_contents_to_group(h5file, path + key + "/", item) + # other types cannot be saved and will result in an error + else: + # print(item) + raise ValueError("Cannot save %s type." % type(item)) + + +def recursively_load_dict_contents_from_group(h5file, path): + """...""" + ans = {} + for key, item in h5file[path].items(): + if isinstance(item, h5py._hl.dataset.Dataset): + ans[key] = item.value + elif isinstance(item, h5py._hl.group.Group): + ans[key] = recursively_load_dict_contents_from_group(h5file, path + key + "/") + return ans + + +def export_xpcs_results_to_h5(filename, export_dir, export_dict): + """ + YG. May 10, 2017 + save the results to a h5 file + + YG. Aug28 2019 modify, add try in export pandas to h5 to fit the new version of pandas + + filename: the h5 file name + export_dir: the exported file folder + export_dict: dict, with keys as md, g2, g4 et.al. + """ + + fout = export_dir + filename + dicts = ["md", "qval_dict", "qval_dict_v", "qval_dict_p"] + dict_nest = ["taus_uids", "g2_uids"] + + with h5py.File(fout, "w") as hf: + flag = False + for key in list(export_dict.keys()): + # print( key ) + if key in dicts: # =='md' or key == 'qval_dict': + md = export_dict[key] + meta_data = hf.create_dataset(key, (1,), dtype="i") + for key_ in md.keys(): + try: + meta_data.attrs[str(key_)] = md[key_] + except: + pass + elif key in dict_nest: + # print(key) + try: + recursively_save_dict_contents_to_group(hf, "/%s/" % key, export_dict[key]) + except: + print("Can't export the key: %s in this dataset." % key) + + elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + try: + export_dict[key].to_hdf( + fout, + key=key, + mode="a", + ) + except: + flag = True + else: + data = hf.create_dataset(key, data=export_dict[key]) + # add this fill line at Octo 27, 2017 + data.set_fill_value = np.nan + if flag: + for key in list(export_dict.keys()): + if key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + export_dict[key].to_hdf( + fout, + key=key, + mode="a", + ) + + print("The xpcs analysis results are exported to %s with filename as %s" % (export_dir, filename)) + + +def extract_xpcs_results_from_h5_debug(filename, import_dir, onekey=None, exclude_keys=None): + """ + YG. Dec 22, 2016 + extract data from a h5 file + + filename: the h5 file name + import_dir: the imported file folder + onekey: string, if not None, only extract that key + return: + extact_dict: dict, with keys as md, g2, g4 et.al. + """ + + import numpy as np + import pandas as pds + + extract_dict = {} + fp = import_dir + filename + pds_type_keys = [] + dicts = ["md", "qval_dict", "qval_dict_v", "qval_dict_p", "taus_uids", "g2_uids"] + if exclude_keys is None: + exclude_keys = [] + if onekey is None: + for k in dicts: + extract_dict[k] = {} + with h5py.File(fp, "r") as hf: + # print (list( hf.keys()) ) + for key in list(hf.keys()): + if key not in exclude_keys: + if key in dicts: + extract_dict[key] = recursively_load_dict_contents_from_group(hf, "/" + key + "/") + elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + pds_type_keys.append(key) + else: + extract_dict[key] = np.array(hf.get(key)) + for key in pds_type_keys: + if key not in exclude_keys: + extract_dict[key] = pds.read_hdf(fp, key=key) + else: + if onekey == "md": + with h5py.File(fp, "r") as hf: + md = hf.get("md") + for key in list(md.attrs): + extract_dict["md"][key] = md.attrs[key] + elif onekey in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + extract_dict[onekey] = pds.read_hdf(fp, key=onekey) + else: + try: + with h5py.File(fp, "r") as hf: + extract_dict[onekey] = np.array(hf.get(onekey)) + except: + print("The %s dosen't have this %s value" % (fp, onekey)) + return extract_dict + + +def export_xpcs_results_to_h5_old(filename, export_dir, export_dict): + """ + YG. Dec 22, 2016 + save the results to a h5 file + + filename: the h5 file name + export_dir: the exported file folder + export_dict: dict, with keys as md, g2, g4 et.al. + """ + import h5py + + fout = export_dir + filename + dicts = ["md", "qval_dict", "qval_dict_v", "qval_dict_p"] # {k1: { }} + dict_nest = ["taus_uids", "g2_uids"] # {k1: {k2:}} + with h5py.File(fout, "w") as hf: + for key in list(export_dict.keys()): + # print( key ) + if key in dicts: # =='md' or key == 'qval_dict': + md = export_dict[key] + meta_data = hf.create_dataset(key, (1,), dtype="i") + for key_ in md.keys(): + try: + meta_data.attrs[str(key_)] = md[key_] + except: + pass + elif key in dict_nest: + k1 = export_dict[key] + v1 = hf.create_dataset(key, (1,), dtype="i") + for k2 in k1.keys(): + + v2 = hf.create_dataset(k1, (1,), dtype="i") + + elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + export_dict[key].to_hdf( + fout, + key=key, + mode="a", + ) + else: + data = hf.create_dataset(key, data=export_dict[key]) + print("The xpcs analysis results are exported to %s with filename as %s" % (export_dir, filename)) + + +def extract_xpcs_results_from_h5(filename, import_dir, onekey=None, exclude_keys=None, two_time_qindex=None): + """ + YG. Dec 22, 2016 + extract data from a h5 file + + filename: the h5 file name + import_dir: the imported file folder + onekey: string, if not None, only extract that key + return: + extact_dict: dict, with keys as md, g2, g4 et.al. + """ + + import numpy as np + import pandas as pds + + extract_dict = {} + fp = import_dir + filename + pds_type_keys = [] + dicts = ["md", "qval_dict", "qval_dict_v", "qval_dict_p", "taus_uids", "g2_uids"] + if exclude_keys is None: + exclude_keys = [] + if onekey is None: + for k in dicts: + extract_dict[k] = {} + with h5py.File(fp, "r") as hf: + # print (list( hf.keys()) ) + for key in list(hf.keys()): + if key not in exclude_keys: + if key in dicts: + md = hf.get(key) + for key_ in list(md.attrs): + # print(key, key_) + if key == "qval_dict": + extract_dict[key][int(key_)] = md.attrs[key_] + else: + extract_dict[key][key_] = md.attrs[key_] + + elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + pds_type_keys.append(key) + else: + if key == "g12b": + if two_time_qindex is not None: + extract_dict[key] = hf.get(key)[:, :, two_time_qindex] + else: + extract_dict[key] = hf.get(key)[:] + else: + extract_dict[key] = hf.get(key)[:] # np.array( hf.get( key )) + + for key in pds_type_keys: + if key not in exclude_keys: + extract_dict[key] = pds.read_hdf(fp, key=key) + else: + if onekey == "md": + with h5py.File(fp, "r") as hf: + md = hf.get("md") + for key in list(md.attrs): + extract_dict["md"][key] = md.attrs[key] + elif onekey in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: + extract_dict[onekey] = pds.read_hdf(fp, key=onekey) + else: + try: + with h5py.File(fp, "r") as hf: + if key == "g12b": + if two_time_qindex is not None: + extract_dict[key] = hf.get(key)[:, :, two_time_qindex] + else: + extract_dict[key] = hf.get(key)[:] + else: + extract_dict[key] = hf.get(key)[:] # np.array( hf.get( key )) + # extract_dict[onekey] = hf.get( key )[:] #np.array( hf.get( onekey )) + except: + print("The %s dosen't have this %s value" % (fp, onekey)) + return extract_dict + + +def read_contrast_from_multi_csv(uids, path, times=None, unit=20): + """Y.G. 2016, Dec 23, load contrast from multi csv file""" + + N = len(uids) + if times is None: + times = np.array([0] + [2**i for i in range(N)]) * unit + for i, uid in enumerate(uids): + fp = path + uid + "/uid=%s--contrast_factorL.csv" % uid + contri = pds.read_csv(fp) + qs = np.array(contri[contri.columns[0]]) + contri_ = np.array(contri[contri.columns[1]]) + if i == 0: + contr = np.zeros([N, len(qs)]) + contr[i] = contri_ + # contr[0,:] = np.nan + return times, contr + + +def read_contrast_from_multi_h5( + uids, + path, +): + """Y.G. 2016, Dec 23, load contrast from multi h5 file""" + N = len(uids) + times_xsvs = np.zeros(N) + for i, uid in enumerate(uids): + t = extract_xpcs_results_from_h5( + filename="%s_Res.h5" % uid, import_dir=path + uid + "/", onekey="times_xsvs" + ) + times_xsvs[i] = t["times_xsvs"][0] + contri = extract_xpcs_results_from_h5( + filename="%s_Res.h5" % uid, import_dir=path + uid + "/", onekey="contrast_factorL" + ) + if i == 0: + contr = np.zeros([N, contri["contrast_factorL"].shape[0]]) + contr[i] = contri["contrast_factorL"][:, 0] + return times_xsvs, contr diff --git a/pyCHX/backups/pyCHX-backup/DEVs.py b/pyCHX/backups/pyCHX-backup/DEVs.py new file mode 100644 index 0000000..6e89cda --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/DEVs.py @@ -0,0 +1,578 @@ +# simple brute force multitau +# from pyCHX.chx_generic_functions import average_array_withNan +import numpy as np +import skbeam.core.roi as roi +from numpy.fft import fft, ifft +from tqdm import tqdm + + +def fit_one_peak_curve(x, y, fit_range): + """YG Dev@Aug 10, 2019 fit a curve with a single Lorentzian shape + Parameters: + x: one-d array, x-axis data + y: one-d array, y-axis data + fit_range: [x1, x2], a list of index, to define the x-range for fit + Return: + center: float, center of the peak + center_std: float, error bar of center in the fitting + fwhm: float, full width at half max intensity of the peak, 2*sigma + fwhm_std:float, error bar of the full width at half max intensity of the peak + xf: the x in the fit + out: the fitting class resutled from lmfit + + """ + from lmfit.models import LinearModel, LorentzianModel + + peak = LorentzianModel() + background = LinearModel() + model = peak + background + x1, x2 = xrange + xf = x[x1:x2] + yf = y[x1:x2] + model.set_param_hint("slope", value=5) + model.set_param_hint("intercept", value=0) + model.set_param_hint("center", value=0.005) + model.set_param_hint("amplitude", value=0.1) + model.set_param_hint("sigma", value=0.003) + # out=model.fit(yf, x=xf)#, method='nelder') + out = model.fit(yf, x=xf, method="leastsq") + cen = out.params["center"].value + cen_std = out.params["center"].stderr + wid = out.params["sigma"].value * 2 + wid_std = out.params["sigma"].stderr * 2 + return cen, cen_std, wid, wid_std, xf, out + + +def plot_xy_with_fit(x, y, xf, out, xlim=[1e-3, 0.01], xlabel="q (" r"$\AA^{-1}$)", ylabel="I(q)", filename=None): + """YG Dev@Aug 10, 2019 to plot x,y with fit, + currently this code is dedicated to plot q-Iq with fit and show the fittign parameter, peak pos, peak wid""" + + yf2 = out.model.eval(params=out.params, x=xf) + fig, ax = plt.subplots() + plot1D(x=x, y=y, ax=ax, m="o", ls="", c="k", legend="data") + plot1D(x=xf, y=yf2, ax=ax, m="", ls="-", c="r", legend="fit", logy=True) + ax.set_xlim(xlim) + # ax.set_ylim( 0.1, 4) + # ax.set_title(uid+'--t=%.2f'%tt) + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + txts = r"peak" + r" = %.5f +/- %.5f " % (cen, cen_std) + ax.text(x=0.02, y=0.2, s=txts, fontsize=14, transform=ax.transAxes) + txts = r"wid" + r" = %.4f +/- %.4f" % (wid, wid_std) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=0.02, y=0.1, s=txts, fontsize=14, transform=ax.transAxes) + plt.tight_layout() + if filename is not None: + plt.savefig(filename) + return ax + + +#############For APD detector +def get_pix_g2_fft(time_inten): + """YG Dev@CHX 2018/12/4 get g2 for oneD intensity + g2 = G/(P*F) + G = + P = + F = + Input: + time_inten: 1d-array, + a time dependent intensity for one pixel + Return: + G/(P*F) + """ + G = get_pix_g2_G(time_inten) + P, F = get_pix_g2_PF(time_inten) + return G / (P * F) + + +def get_pix_g2_G(time_inten): + """YG Dev@CHX 2018/12/4 get G for oneD intensity + g2 = G/(P*F) + G = + P = + F = + Input: + time_inten: 1d-array, + a time dependent intensity for one pixel + Return: + G + """ + L = len(time_inten) + norm = np.arange(L, 0, -1) + return np.correlate(time_inten, time_inten, mode="full")[L - 1 :] / norm + + +def get_pix_g2_PF(time_inten): + """YG Dev@CHX 2018/12/4 get past and future intensity in the normalization of g2 + g2 = G/(P*F) + G = + P = + F = + Input: + time_inten: 1d-array, + a time dependent intensity for one pixel + Return: + P, F + """ + + cum_sum = np.cumsum(time_inten)[::-1] + cum_Norm = np.arange(time_inten.shape[0], 0, -1) + P = cum_sum / cum_Norm + + cum_sum2 = np.cumsum(time_inten[::-1], axis=0)[::-1] + cum_Norm2 = np.arange(time_inten.shape[0], 0, -1) + F = cum_sum2 / cum_Norm2 + return P, F + + +################### + + +def get_ab_correlation(a, b): + """YG 2018/11/05/ derived from pandas.frame corrwith method + Get correlation of two one-d array, formula--> + A = ( ( a-a.mean() ) * (b-b.mean()) ).sum + B = ( len(a)-1 ) * a.std() * b.std() + Cor = A/B + Input: + a: one-d array + b: one-d array + Output: + c: one-d array, correlation + + """ + a = np.array(a) + b = np.array(b) + return ((a - a.mean()) * (b - b.mean())).sum() / ((len(a) - 1) * a.std() * b.std()) + + +def get_oneQ_g2_fft(time_inten_oneQ, axis=0): + """YG Dev@CHX 2018/10/15 get g2 for one Q by giving time_inten for that Q + g2 = G/(P*F) + G = + P = + F = + Input: + time_inten_oneQ: 2d-array, shape=[time, pixel number in the ROI], + a time dependent intensity for a list of pixels + ( the equivilent pixels belongs to one Q ) + Return: + G/(P*F) + """ + L = time_inten_oneQ.shape[0] + P, F = get_g2_PF(time_inten_oneQ) + G = auto_correlation_fft(time_inten_oneQ, axis=axis) + G2f = np.average(G, axis=1) / L + Pf = np.average(P, axis=1) + Ff = np.average(F, axis=1) + g2f = G2f / (Pf * Ff) + return g2f + + +def get_g2_PF(time_inten): + """YG Dev@CHX 2018/10/15 get past and future intensity in the normalization of g2 + g2 = G/(P*F) + G = + P = + F = + Input: + time_inten: 2d-array, shape=[time, pixel number in the ROI], + a time dependent intensity for a list of pixels + Return: + P, F + """ + + cum_sum = np.cumsum(time_inten, axis=0)[::-1] + cum_Norm = np.arange(time_inten.shape[0], 0, -1) + P = cum_sum / cum_Norm[:, np.newaxis] + + cum_sum2 = np.cumsum(time_inten[::-1], axis=0)[::-1] + cum_Norm2 = np.arange(time_inten.shape[0], 0, -1) + F = cum_sum2 / cum_Norm2[:, np.newaxis] + return P, F + + +def auto_correlation_fft_padding_zeros(a, axis=-1): + """Y.G. Dev@CHX, 2018/10/15 Do autocorelation of ND array by fft + Math: + Based on auto_cor(arr) = ifft( fft( arr ) * fft(arr[::-1]) ) + In numpy form + auto_cor(arr) = ifft( + fft( arr, n=2N-1, axis=axis ) ##padding enough zeros + ## for axis + * np.conjugate( ## conju for reverse array + fft(arr , n=2N-1, axis=axis) ) + ) #do reverse fft + Input: + a: 2d array, shape=[time, pixel number in the ROI], + a time dependent intensity for a list of pixels + axis: the axis for doing autocor + Output: + return: the autocorrelation array + if a is one-d and two-d, return the cor with the same length of a + else: return the cor with full length defined by 2*N-1 + + """ + a = np.asarray(a) + M = a.shape + N = M[axis] + # print(M, N, 2*N-1) + cor = np.real( + ifft(fft(a, n=N * 2 - 1, axis=axis) * np.conjugate(fft(a, n=N * 2 - 1, axis=axis)), n=N * 2 - 1, axis=axis) + ) + + if len(M) == 1: + return cor[:N] + elif len(M) == 2: + if axis == -1 or axis == 1: + return cor[:, :N] + elif axis == 0 or axis == -2: + return cor[:N] + else: + return cor + + +def auto_correlation_fft(a, axis=-1): + """Y.G. Dev@CHX, 2018/10/15 Do autocorelation of ND array by fft + Math: + Based on auto_cor(arr) = ifft( fft( arr ) * fft(arr[::-1]) ) + In numpy form + auto_cor(arr) = ifft( + fft( arr, n=2N-1, axis=axis ) ##padding enough zeros + ## for axis + * np.conjugate( ## conju for reverse array + fft(arr , n=2N-1, axis=axis) ) + ) #do reverse fft + Input: + a: 2d array, shape=[time, pixel number in the ROI], + a time dependent intensity for a list of pixels + axis: the axis for doing autocor + Output: + return: the autocorrelation array + if a is one-d and two-d, return the cor with the same length of a + else: return the cor with full length defined by 2*N-1 + + """ + a = np.asarray(a) + cor = np.real(ifft(fft(a, axis=axis) * np.conjugate(fft(a, axis=axis)), axis=axis)) + return cor + + +def multitau(Ipix, bind, lvl=12, nobuf=8): + """ + tt,g2=multitau(Ipix,bind,lvl=12,nobuf=8) + Ipix is matrix of no-frames by no-pixels + bind is bin indicator, one per pixel. All pixels + in the same bin have the same value of bind (bin indicator). + This number of images save at each level is nobf(8) and the + number of level is lvl(12). + returns: + tt is the offsets of each g2 point. + tt*timeperstep is time of points + g2 is max(bind)+1,time steps. + plot(tt[1:],g2[1:,i]) will plot each g2. + """ + # if num_lev is None: + # num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + # print(nobuf,nolvl) + nobins = bind.max() + 1 + nobufov2 = nobuf // 2 + # t0=time.time() + noperbin = np.bincount(bind) + G2 = np.zeros((nobufov2 * (1 + lvl), nobins)) + tt = np.zeros(nobufov2 * (1 + lvl)) + dII = Ipix.copy() + t = np.bincount(bind, np.mean(dII, axis=0)) ** 2 / noperbin + G2[0, :] = np.bincount(bind, np.mean(dII * dII, axis=0)) / t + for j in np.arange(1, nobuf): + tt[j] = j + print(j, tt[j]) + # t=noperbin*np.bincount(bind,np.mean(dII,axis=0))**2 + t = ( + np.bincount(bind, np.mean(dII[j:, :], axis=0)) + * np.bincount(bind, np.mean(dII[:-j, :], axis=0)) + / noperbin + ) + G2[j, :] = np.bincount(bind, np.mean(dII[j:, :] * dII[:-j, :], axis=0)) / t + for l in tqdm(np.arange(1, lvl), desc="Calcuate g2..."): + nn = dII.shape[0] // 2 * 2 # make it even + dII = (dII[0:nn:2, :] + dII[1:nn:2, :]) / 2.0 # sum in pairs + nn = nn // 2 + if nn < nobuf: + break + for j in np.arange(nobufov2, min(nobuf, nn)): + ind = nobufov2 + nobufov2 * l + (j - nobufov2) + tt[ind] = 2**l * j + t = ( + np.bincount(bind, np.mean(dII[j:, :], axis=0)) + * np.bincount(bind, np.mean(dII[:-j, :], axis=0)) + / noperbin + ) + G2[ind, :] = np.bincount(bind, np.mean(dII[j:, :] * dII[:-j, :], axis=0)) / t + # print(ind) + # print(time.time()-t0) + return (tt[: ind + 1], G2[: ind + 1, :]) + + +def average_array_withNan(array, axis=0, mask=None): + """YG. Jan 23, 2018 + Average array invovling np.nan along axis + + Input: + array: ND array, actually should be oneD or twoD at this stage..TODOLIST for ND + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + avg: averaged array along axis + """ + shape = array.shape + if mask is None: + mask = np.isnan(array) + # mask = np.ma.masked_invalid(array).mask + array_ = np.ma.masked_array(array, mask=mask) + try: + sums = np.array(np.ma.sum(array_[:, :], axis=axis)) + except: + sums = np.array(np.ma.sum(array_[:], axis=axis)) + + cts = np.sum(~mask, axis=axis) + # print(cts) + return sums / cts + + +def autocor_for_pix_time(pix_time_data, dly_dict, pixel_norm=None, frame_norm=None, multi_tau_method=True): + """YG Feb 20, 2018@CHX + Do correlation for pixel_time type data with tau as defined as dly + Input: + pix_time_data: 2D array, shape as [number of frame (time), pixel list (each as one q) ] + dly_dict: the taus dict, (use multi step e.g.) + multi_tau_method: if True, using multi_tau_method to average data for higher level + pixel_norm: if not None, should be array with shape as pixel number + frame_norm: if not None, should be array with shape as frame number + + return: + g2: with shape as [ pixel_list number, dly number ] + + """ + Nt, Np = pix_time_data.shape + Ntau = len(np.concatenate(list(dly_dict.values()))) + G2 = np.zeros([Ntau, Np]) + Gp = np.zeros([Ntau, Np]) + Gf = np.zeros([Ntau, Np]) + # mask_pix = np.isnan(pix_time_data) + # for tau_ind, tau in tqdm( enumerate(dly), desc= 'Calcuate g2...' ): + tau_ind = 0 + # if multi_tau_method: + pix_time_datac = pix_time_data.copy() + # else: + if pixel_norm is not None: + pix_time_datac /= pixel_norm + if frame_norm is not None: + pix_time_datac /= frame_norm + + for tau_lev, tau_key in tqdm(enumerate(list(dly_dict.keys())), desc="Calcuate g2..."): + # print(tau_key) + taus = dly_dict[tau_key] + if multi_tau_method: + if tau_lev > 0: + nobuf = len(dly_dict[1]) + nn = pix_time_datac.shape[0] // 2 * 2 # make it even + pix_time_datac = (pix_time_datac[0:nn:2, :] + pix_time_datac[1:nn:2, :]) / 2.0 # sum in pairs + nn = nn // 2 + if nn < nobuf: + break + # print(nn) + # if(nn<1): break + else: + nn = pix_time_datac.shape[0] + for tau in taus: + if multi_tau_method: + IP = pix_time_datac[: nn - tau // 2**tau_lev, :] + IF = pix_time_datac[tau // 2**tau_lev : nn, :] + # print( tau_ind, nn , tau//2**tau_lev, tau_lev+1,tau, IP.shape) + else: + IP = pix_time_datac[: Nt - tau, :] + IF = pix_time_datac[tau:Nt, :] + # print( tau_ind, tau_lev+1,tau, IP.shape) + + # IP_mask = mask_pix[: Nt - tau,:] + # IF_mask = mask_pix[tau: Nt,: ] + # IPF_mask = IP_mask | IF_mask + # IPFm = average_array_withNan(IP*IF, axis = 0, )#mask= IPF_mask ) + # IPm = average_array_withNan(IP, axis = 0, )# mask= IP_mask ) + # IFm = average_array_withNan(IF, axis = 0 , )# mask= IF_mask ) + G2[tau_ind] = average_array_withNan( + IP * IF, + axis=0, + ) # IPFm + Gp[tau_ind] = average_array_withNan( + IP, + axis=0, + ) # IPm + Gf[tau_ind] = average_array_withNan( + IF, + axis=0, + ) # IFm + tau_ind += 1 + # for i in range(G2.shape[0]-1, 0, -1): + # if np.isnan(G2[i,0]): + # gmax = i + gmax = tau_ind + return G2[:gmax, :], Gp[:gmax, :], Gf[:gmax, :] + + +def autocor_xytframe(self, n): + """Do correlation for one xyt frame--with data name as n""" + + data = read_xyt_frame(n) # load data + N = len(data) + crl = correlate(data, data, "full")[N - 1 :] + FN = arange(1, N + 1, dtype=float)[::-1] + IP = cumsum(data)[::-1] + IF = cumsum(data[::-1])[::-1] + + return crl / (IP * IF) * FN + + +###################For Fit + +import matplotlib.pyplot as plt +import numpy as np +from scipy.optimize import leastsq + +# duplicate my curfit function from yorick, except use sigma and not w +# notice the main feature is an adjust list. + + +def curfit(x, y, a, sigy=None, function_name=None, adj=None): + a = np.array(a) + if adj is None: + adj = np.arange(len(a), dtype="int32") + if function_name is None: + function_name = funct + # print( a, adj, a[adj] ) + # print(x,y,a) + afit, cv, idt, m, ie = leastsq(_residuals, a[adj], args=(x, y, sigy, a, adj, function_name), full_output=True) + a[adj] = afit + realcv = np.identity(afit.size) + realcv[np.ix_(adj, adj)] = cv + nresids = idt["fvec"] + chisq = np.sum(nresids**2) / (len(y) - len(adj)) + # print( cv ) + # yfit=y-yfit*sigy + sigmaa = np.zeros(len(a)) + sigmaa[adj] = np.sqrt(np.diag(cv)) + return (chisq, a, sigmaa, nresids) + + +# hidden residuals for leastsq +def _residuals(p, x, y, sigy, pall, adj, fun): + # print(p, pall, adj ) + pall[adj] = p + # print(pall) + if sigy is None: + return y - fun(x, pall) + else: + return (y - fun(x, pall)) / sigy + + +# print out fit result nicely +def fitpr(chisq, a, sigmaa, title=None, lbl=None): + """nicely print out results of a fit""" + # get fitted results. + if lbl == None: + lbl = [] + for i in xrange(a.size): + lbl.append("A%(#)02d" % {"#": i}) + # print resuls of a fit. + if title != None: + print(title) + print(" chisq=%(c).4f" % {"c": chisq}) + for i in range(a.size): + print(" %(lbl)8s =%(m)10.4f +/- %(s).4f" % {"lbl": lbl[i], "m": a[i], "s": sigmaa[i]}) + + +# easy plot for fit +def fitplot(x, y, sigy, yfit, pl=plt): + pl.plot(x, yfit) + pl.errorbar(x, y, fmt="o", yerr=sigy) + + +# define a default function, a straight line. +def funct(x, p): + return p[0] * x + p[1] + + +# A 1D Gaussian +def Gaussian(x, p): + """ + One-D Gaussian Function + xo, amplitude, sigma, offset = p + + + """ + xo, amplitude, sigma, offset = p + g = offset + amplitude * 1.0 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-1 / 2.0 * (x - xo) ** 2 / sigma**2) + return g + + +###########For ellipse shaped sectors by users +def elps_r(a, b, theta): + """ + Returns the radius of an ellipse with semimajor/minor axes a/b + at angle theta (in radians)""" + return a * b / np.sqrt(((b * np.cos(theta)) ** 2) + ((a * np.sin(theta)) ** 2)) + + +def place_in_interval(val, interval_list): + """ + For a sorted list interval_list, returns the bin index val belongs to (0-indexed) + Returns -1 if outside of bins""" + if val < interval_list[0] or val >= interval_list[-1]: + return -1 + else: + return np.argmax(val < interval_list) - 1 + + +def gen_elps_sectors(a, b, r_min, r_n, th_n, c_x, c_y, th_min=0, th_max=360): + """ + Returns a list of x/y coordinates of ellipsoidal sectors ROI. Cuts th_max - th_min degrees into + th_n number of angular bins, and r_n number of radial bins, starting from r_min*r to r where + r is the radius of the ellipsoid at that angle. The ROIs are centered around c_x, c_y. Defaults to 360 deg. + + Example: + + roi_mask = np.zeros_like( avg_img , dtype = np.int32) + sectors = gen_elps_sectors(110,55,0.2,5,24,579,177) + for ii,sector in enumerate(sectors): + roi_mask[sector[1],sector[0]] = ii + 1 + + """ + th_list = np.linspace(th_min, th_max, th_n + 1) + r_list = np.linspace(r_min, 1, r_n + 1) + regions_list = [ + [[np.array([], dtype=np.int_), np.array([], dtype=np.int_)] for _ in range(r_n)] for _ in range(th_n) + ] + w = int(np.ceil(a * 2)) + h = int(np.ceil(b * 2)) + x_offset = c_x - w // 2 + y_offset = c_y - h // 2 + for ii in range(w): + cur_x = ii - (w - 1) // 2 + for jj in range(h): + cur_y = jj - (h - 1) // 2 + cur_theta = np.arctan2(cur_y, cur_x) % (np.pi * 2) + cur_r = np.sqrt(cur_x**2 + cur_y**2) + cur_elps_r = elps_r(a, b, cur_theta) + cur_r_list = r_list * cur_elps_r + cur_theta = np.rad2deg(cur_theta) # Convert to degrees to compare with th_list + r_ind = place_in_interval(cur_r, cur_r_list) + th_ind = place_in_interval(cur_theta, th_list) + if (r_ind != -1) and (th_ind != -1): + regions_list[th_ind][r_ind][0] = np.append(regions_list[th_ind][r_ind][0], ii + x_offset) + regions_list[th_ind][r_ind][1] = np.append(regions_list[th_ind][r_ind][1], jj + y_offset) + sectors = [] + for th_reg_list in regions_list: + for sector in th_reg_list: + sectors += [(sector[0], sector[1])] + return sectors diff --git a/pyCHX/backups/pyCHX-backup/DataGonio.py b/pyCHX/backups/pyCHX-backup/DataGonio.py new file mode 100644 index 0000000..b8603ef --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/DataGonio.py @@ -0,0 +1,758 @@ +# import sys +import os +import re # Regular expressions +import sys + +import matplotlib as mpl +import numpy as np + +# from scipy.optimize import leastsq +# import scipy.special +import PIL # Python Image Library (for opening PNG, etc.) +import pylab as plt +import skbeam.core.correlation as corr +import skbeam.core.roi as roi +import skbeam.core.utils as utils +from skbeam.core.accumulators.binned_statistic import BinnedStatistic1D, BinnedStatistic2D + +from pyCHX.chx_generic_functions import average_array_withNan + + +def convert_Qmap(img, qx_map, qy_map=None, bins=None, rangeq=None, origin=None, mask=None, statistic="mean"): + """Y.G. Nov 3@CHX + Convert a scattering image to a qmap by giving qx_map and qy_map + Return converted qmap, x-coordinates and y-coordinates + """ + if qy_map is not None: + if rangeq is None: + qx_min, qx_max = qx_map.min(), qx_map.max() + qy_min, qy_max = qy_map.min(), qy_map.max() + rangeq = [[qx_min, qx_max], [qy_min, qy_max]] + # rangeq = [qx_min,qx_max , qy_min,qy_max] + if bins is None: + bins = qx_map.shape + + b2d = BinnedStatistic2D( + qx_map.ravel(), qy_map.ravel(), statistic=statistic, bins=bins, mask=mask.ravel(), range=rangeq + ) + remesh_data, xbins, ybins = b2d(img.ravel()), b2d.bin_centers[0], b2d.bin_centers[1] + + else: + if rangeq is None: + qx_min, qx_max = qx_map.min(), qx_map.max() + rangeq = [qx_min, qx_max] + if bins is None: + bins = [qx_map.size] + # print( rangeq, bins ) + if mask is not None: + m = mask.ravel() + else: + m = None + b1d = BinnedStatistic1D(qx_map.ravel(), bins=bins, mask=m) + + remesh_data = b1d(img.ravel()) + # print('Here') + xbins = b1d.bin_centers + ybins = None + + return remesh_data, xbins, ybins + + +def qphiavg(image, q_map=None, phi_map=None, mask=None, bins=None, origin=None, range=None, statistic="mean"): + """Octo 20, 2017 Yugang According to Julien's Suggestion + Get from https://github.com/CFN-softbio/SciStreams/blob/master/SciStreams/processing/qphiavg.py + With a small revision --> return three array rather than dict + + quick qphi average calculator. + ignores bins for now + """ + # TODO : replace with method that takes qphi maps + # TODO : also return q and phi of this... + # print("In qphi average stream") + shape = image.shape + if bins is None: + bins = shape + # print(bins) + if origin is None: + origin = (shape[0] - 1) / 2.0, (shape[1] - 1) / 2.0 + + from skbeam.core.utils import angle_grid, radial_grid + + if q_map is None: + q_map = radial_grid(origin, shape) + if phi_map is None: + phi_map = angle_grid(origin, shape) + + expected_shape = tuple(shape) + if mask is not None: + if mask.shape != expected_shape: + raise ValueError( + '"mask" has incorrect shape. ' + " Expected: " + str(expected_shape) + " Received: " + str(mask.shape) + ) + mask = mask.reshape(-1) + + rphibinstat = BinnedStatistic2D( + q_map.reshape(-1), phi_map.reshape(-1), statistic=statistic, bins=bins, mask=mask, range=range + ) + + sqphi = rphibinstat(image.ravel()) + qs = rphibinstat.bin_centers[0] + phis = rphibinstat.bin_centers[1] + return sqphi, qs, phis + + +def get_QPhiMap(img_shape, center): + """Y.G., Dev Nov 10, 2018 Get q_map and phi_map by giving image shape and center + e.g., + q_map, phi_map = get_QPhiMap( mask.shape, center[::-1]) + + """ + q_map = utils.radial_grid(center, img_shape, pixel_size=[1, 1]) + phi_map = np.degrees( + utils.angle_grid( + center, + img_shape, + ) + ) + + return q_map, phi_map + + +def get_img_qphimap(img, q_map, phi_map, mask, bins, center, qang_range=None, statistic="mean"): + """Y.G., Dev Nov 10, 2018 Get phi_map by giving image + e.g., + q_map, phi_map = get_QPhiMap( mask.shape, center[::-1]) + sqphi, qs, phis = get_img_qphimap( avg, q_map, phi_map, mask, + bins=[ 1500, 1800],center=center[::-1], + qang_range=None, statistic='mean') + + """ + sqphi, qs, phis = qphiavg( + img, + q_map=q_map, + phi_map=phi_map, + mask=mask, + bins=bins, + origin=center, + range=qang_range, + statistic=statistic, + ) + return sqphi, qs, phis + + +def get_iq_from_sqphi(sqphi): + """Y.G., Dev Nov 10, 2018 Get iq from a q-phi map + e.g., + iqPHI = get_iq_from_sqphi( Iqphi )""" + return np.nan_to_num(average_array_withNan(sqphi, axis=1)) + + +def get_phi_from_sqphi(sqphi): + """Y.G., Dev Nov 10, 2018 Get Iphi from a q-phi map + e.g., + iqPHI = get_iq_from_sqphi( Iqphi ) + qc= np.argmax( iqPHI ) + qw = 5 + iphiQ = get_phi_from_sqphi( Iqphi[qc-qw:qc+qw] ) + + """ + return np.nan_to_num(average_array_withNan(sqphi, axis=0)) + + +def convert_Qmap_old(img, qx_map, qy_map=None, bins=None, rangeq=None): + """Y.G. Nov 3@CHX + Convert a scattering image to a qmap by giving qx_map and qy_map + Return converted qmap, x-coordinates and y-coordinates + """ + if qy_map is not None: + if rangeq is None: + qx_min, qx_max = qx_map.min(), qx_map.max() + qy_min, qy_max = qy_map.min(), qy_map.max() + rangeq = [[qx_min, qx_max], [qy_min, qy_max]] + if bins is None: + bins = qx_map.shape + + remesh_data, xbins, ybins = np.histogram2d( + qx_map.ravel(), qy_map.ravel(), bins=bins, range=rangeq, normed=False, weights=img.ravel() + ) + + else: + if rangeq is None: + qx_min, qx_max = qx_map.min(), qx_map.max() + rangeq = [qx_min, qx_max] + if bins is None: + bins = qx_map.size + else: + if isinstance(bins, list): + bins = bins[0] + print(rangeq, bins) + remesh_data, xbins = np.histogram( + qx_map.ravel(), bins=bins, range=rangeq, normed=False, weights=img.ravel() + ) + ybins = None + return remesh_data, xbins, ybins + + +# Mask +################################################################################ +class Mask(object): + """Stores the matrix of pixels to be excluded from further analysis.""" + + def __init__(self, infile=None, format="auto"): + """Creates a new mask object, storing a matrix of the pixels to be + excluded from further analysis.""" + + self.data = None + + if infile is not None: + self.load(infile, format=format) + + def load(self, infile, format="auto", invert=False): + """Loads a mask from a a file. If this object already has some masking + defined, then the new mask is 'added' to it. Thus, one can load multiple + masks to exlude various pixels.""" + + if format == "png" or infile[-4:] == ".png": + self.load_png(infile, invert=invert) + + elif format == "hdf5" or infile[-3:] == ".h5" or infile[-4:] == ".hd5": + self.load_hdf5(infile, invert=invert) + + else: + print("Couldn't identify mask format for %s." % (infile)) + + def load_blank(self, width, height): + """Creates a null mask; i.e. one that doesn't exlude any pixels.""" + + # TODO: Confirm that this is the correct order for x and y. + self.data = np.ones((height, width)) + + def load_png(self, infile, threshold=127, invert=False): + """Load a mask from a PNG image file. High values (white) are included, + low values (black) are exluded.""" + + # Image should be black (0) for excluded pixels, white (255) for included pixels + img = PIL.Image.open(infile).convert("L") # black-and-white + img2 = img.point(lambda p: p > threshold and 255) + data = np.asarray(img2) / 255 + data = data.astype(int) + + if invert: + data = -1 * (data - 1) + + if self.data is None: + self.data = data + else: + self.data *= data + + def load_hdf5(self, infile, invert=False): + with h5py.File(infile, "r") as f: + data = np.asarray(f["mask"]) + + if invert: + data = -1 * (data - 1) + + if self.data is None: + self.data = data + else: + self.data *= data + + def invert(self): + """Inverts the mask. Can be used if the mask file was written using the + opposite convention.""" + self.data = -1 * (self.data - 1) + + # End class Mask(object) + ######################################## + + +# Calibration +################################################################################ +class Calibration(object): + """Stores aspects of the experimental setup; especially the calibration + parameters for a particular detector. That is, the wavelength, detector + distance, and pixel size that are needed to convert pixel (x,y) into + reciprocal-space (q) value. + + This class may also store other information about the experimental setup + (such as beam size and beam divergence). + """ + + def __init__(self, wavelength_A=None, distance_m=None, pixel_size_um=None): + self.wavelength_A = wavelength_A + self.distance_m = distance_m + self.pixel_size_um = pixel_size_um + + # Data structures will be generated as needed + # (and preserved to speedup repeated calculations) + self.clear_maps() + + # Experimental parameters + ######################################## + + def set_wavelength(self, wavelength_A): + """Set the experimental x-ray wavelength (in Angstroms).""" + + self.wavelength_A = wavelength_A + + def get_wavelength(self): + """Get the x-ray beam wavelength (in Angstroms) for this setup.""" + + return self.wavelength_A + + def set_energy(self, energy_keV): + """Set the experimental x-ray beam energy (in keV).""" + + energy_eV = energy_keV * 1000.0 + energy_J = energy_eV / 6.24150974e18 + + h = 6.626068e-34 # m^2 kg / s + c = 299792458 # m/s + + wavelength_m = (h * c) / energy_J + self.wavelength_A = wavelength_m * 1e10 + + def get_energy(self): + """Get the x-ray beam energy (in keV) for this setup.""" + + h = 6.626068e-34 # m^2 kg / s + c = 299792458 # m/s + + wavelength_m = self.wavelength_A * 1e-10 # m + E = h * c / wavelength_m # Joules + + E *= 6.24150974e18 # electron volts + + E /= 1000.0 # keV + + return E + + def get_k(self): + """Get k = 2*pi/lambda for this setup, in units of inverse Angstroms.""" + + return 2.0 * np.pi / self.wavelength_A + + def set_distance(self, distance_m): + """Sets the experimental detector distance (in meters).""" + + self.distance_m = distance_m + + def set_pixel_size(self, pixel_size_um=None, width_mm=None, num_pixels=None): + """Sets the pixel size (in microns) for the detector. Pixels are assumed + to be square.""" + + if pixel_size_um is not None: + self.pixel_size_um = pixel_size_um + + else: + if num_pixels is None: + num_pixels = self.width + pixel_size_mm = width_mm * 1.0 / num_pixels + self.pixel_size_um = pixel_size_mm * 1000.0 + + def set_beam_position(self, x0, y0): + """Sets the direct beam position in the detector images (in pixel + coordinates).""" + + self.x0 = x0 + self.y0 = y0 + + def set_image_size(self, width, height=None): + """Sets the size of the detector image, in pixels.""" + + self.width = width + if height is None: + # Assume a square detector + self.height = width + else: + self.height = height + + def get_q_per_pixel(self): + """Gets the delta-q associated with a single pixel. This is computed in + the small-angle limit, so it should only be considered approximate. + For instance, wide-angle detectors will have different delta-q across + the detector face.""" + + if self.q_per_pixel is not None: + return self.q_per_pixel + + c = (self.pixel_size_um / 1e6) / self.distance_m + twotheta = np.arctan(c) # radians + + self.q_per_pixel = 2.0 * self.get_k() * np.sin(twotheta / 2.0) + + return self.q_per_pixel + + # Maps + ######################################## + + def clear_maps(self): + self.r_map_data = None + self.q_per_pixel = None + self.q_map_data = None + self.angle_map_data = None + + self.qx_map_data = None + self.qy_map_data = None + self.qz_map_data = None + self.qr_map_data = None + + def r_map(self): + """Returns a 2D map of the distance from the origin (in pixel units) for + each pixel position in the detector image.""" + + if self.r_map_data is not None: + return self.r_map_data + + x = np.arange(self.width) - self.x0 + y = np.arange(self.height) - self.y0 + X, Y = np.meshgrid(x, y) + R = np.sqrt(X**2 + Y**2) + + self.r_map_data = R + + return self.r_map_data + + def q_map(self): + """Returns a 2D map of the q-value associated with each pixel position + in the detector image.""" + + if self.q_map_data is not None: + return self.q_map_data + + c = (self.pixel_size_um / 1e6) / self.distance_m + twotheta = np.arctan(self.r_map() * c) # radians + + self.q_map_data = 2.0 * self.get_k() * np.sin(twotheta / 2.0) + + return self.q_map_data + + def angle_map(self): + """Returns a map of the angle for each pixel (w.r.t. origin). + 0 degrees is vertical, +90 degrees is right, -90 degrees is left.""" + + if self.angle_map_data is not None: + return self.angle_map_data + + x = np.arange(self.width) - self.x0 + y = np.arange(self.height) - self.y0 + X, Y = np.meshgrid(x, y) + # M = np.degrees(np.arctan2(Y, X)) + # Note intentional inversion of the usual (x,y) convention. + # This is so that 0 degrees is vertical. + # M = np.degrees(np.arctan2(X, Y)) + + # TODO: Lookup some internal parameter to determine direction + # of normal. (This is what should befine the angle convention.) + M = np.degrees(np.arctan2(X, -Y)) + + self.angle_map_data = M + + return self.angle_map_data + + def qx_map(self): + if self.qx_map_data is not None: + return self.qx_map_data + + self._generate_qxyz_maps() + + return self.qx_map_data + + def qy_map(self): + if self.qy_map_data is not None: + return self.qy_map_data + + self._generate_qxyz_maps() + + return self.qy_map_data + + def qz_map(self): + if self.qz_map_data is not None: + return self.qz_map_data + + self._generate_qxyz_maps() + + return self.qz_map_data + + def qr_map(self): + if self.qr_map_data is not None: + return self.qr_map_data + + self._generate_qxyz_maps() + + return self.qr_map_data + + def _generate_qxyz_maps(self): + # Conversion factor for pixel coordinates + # (where sample-detector distance is set to d = 1) + c = (self.pixel_size_um / 1e6) / self.distance_m + + x = np.arange(self.width) - self.x0 + y = np.arange(self.height) - self.y0 + X, Y = np.meshgrid(x, y) + R = np.sqrt(X**2 + Y**2) + + # twotheta = np.arctan(self.r_map()*c) # radians + theta_f = np.arctan2(X * c, 1) # radians + # alpha_f_prime = np.arctan2( Y*c, 1 ) # radians + alpha_f = np.arctan2(Y * c * np.cos(theta_f), 1) # radians + + self.qx_map_data = self.get_k() * np.sin(theta_f) * np.cos(alpha_f) + self.qy_map_data = self.get_k() * (np.cos(theta_f) * np.cos(alpha_f) - 1) # TODO: Check sign + self.qz_map_data = -1.0 * self.get_k() * np.sin(alpha_f) + + self.qr_map_data = np.sign(self.qx_map_data) * np.sqrt( + np.square(self.qx_map_data) + np.square(self.qy_map_data) + ) + + # End class Calibration(object) + ######################################## + + +# CalibrationGonio +################################################################################ +class CalibrationGonio(Calibration): + """ + The geometric claculations used here are described: + http://gisaxs.com/index.php/Geometry:WAXS_3D + + """ + + # Experimental parameters + ######################################## + + def set_angles( + self, det_phi_g=0.0, det_theta_g=0.0, sam_phi=0, sam_chi=0, sam_theta=0, offset_x=0, offset_y=0, offset_z=0 + ): + """ + YG. Add sample rotation angles that convert qmap from lab frame to sample frame + All the angles are given in degrees + + sam_phi, rotate along lab-frame x, CHX phi + sam_chi, rotate along lab-frame z, CHX chi + sam_theta, rotate along lab-frame y, CHX theta + + YG add offset corrections at Sep 21, 2017 + + det_phi_g, rotate along y-axis, delta at CHX + det_theta_g, away from z-plane, gamma at CHX + + For SMI, because only rotate along y-axis, (det_theta_g=0.), only care about + offset_x, offset_z""" + # print('Set angles here') + + self.det_phi_g = det_phi_g + self.det_theta_g = det_theta_g + self.offset_x = offset_x + self.offset_y = offset_y + self.offset_z = offset_z + self.sam_phi = sam_phi + self.sam_chi = sam_chi + self.sam_theta = sam_theta + + def rotation_matix(self, sam_phi, sam_theta, sam_chi, degrees=True): + """ + sam_phi, rotate along lab-frame x, CHX phi + sam_chi, rotate along lab-frame z, CHX chi + sam_theta, rotate along lab-frame y, CHX theta + """ + + if degrees: + sam_phi, sam_chi, sam_theta = np.radians(sam_phi), np.radians(sam_chi), np.radians(sam_theta) + + Rx = np.array([[1, 0, 0], [0, np.cos(sam_phi), np.sin(sam_phi)], [0, -np.sin(sam_phi), np.cos(sam_phi)]]) + + Rz = np.array([[np.cos(sam_chi), np.sin(sam_chi), 0], [-np.sin(sam_chi), np.cos(sam_chi), 0], [0, 0, 1]]) + + Ry = np.array( + [[np.cos(sam_theta), 0, np.sin(sam_theta)], [0, 1, 0], [-np.sin(sam_theta), 0, np.cos(sam_theta)]] + ) + Rxy = np.dot(Rx, Ry) + return np.dot(Rxy, Rz) + + def _generate_qxyz_map_SF_from_Lab(self, qx, qy, qz, sam_phi, sam_theta, sam_chi, degrees=True): + """ + Convert qmap from Lab frame to sample frame + """ + self.Rot = self.rotation_matix(sam_phi, sam_theta, sam_chi, degrees=degrees) + qsx, qsy, qsz = np.dot(self.Rot, [np.ravel(qx), np.ravel(qy), np.ravel(qz)]) + return qsx.reshape(qx.shape), qsy.reshape(qy.shape), qsz.reshape(qz.shape) + + def _generate_qxyz_maps_samFrame(self, degrees=True): + """ + Get lab frame qmap + """ + self._generate_qxyz_maps() + self.qx_map_lab_data, self.qy_map_lab_data, self.qz_map_lab_data = self._generate_qxyz_map_SF_from_Lab( + self.qx_map_data, + self.qy_map_data, + self.qz_map_data, + self.sam_phi, + self.sam_theta, + self.sam_chi, + degrees=degrees, + ) + self.qr_map_lab_data = np.sqrt(np.square(self.qx_map_lab_data) + np.square(self.qy_map_lab_data)) + + self.q_map_lab_data = np.sqrt( + np.square(self.qx_map_lab_data) + np.square(self.qy_map_lab_data) + np.square(self.qz_map_lab_data) + ) + + def get_ratioDw(self): + width_mm = self.width * self.pixel_size_um / 1000.0 + return self.distance_m / (width_mm / 1000.0) + + # Maps + ######################################## + + def q_map(self): + if self.q_map_data is None: + self._generate_qxyz_maps() + + return self.q_map_data + + def angle_map(self): + if self.angle_map_data is not None: + self._generate_qxyz_maps() + + return self.angle_map_data + + def _generate_qxyz_maps_no_offest(self): + """ + The geometric claculations used here are described: + http://gisaxs.com/index.php/Geometry:WAXS_3D + + """ + + d = self.distance_m + pix_size = self.pixel_size_um / 1e6 + phi_g = np.radians(self.det_phi_g) + theta_g = np.radians(self.det_theta_g) + + xs = (np.arange(self.width) - self.x0) * pix_size + ys = (np.arange(self.height) - self.y0) * pix_size + # ys = ys[::-1] + + X_c, Y_c = np.meshgrid(xs, ys) + Dprime = np.sqrt(np.square(d) + np.square(X_c) + np.square(Y_c)) + k_over_Dprime = self.get_k() / Dprime + + qx_c = k_over_Dprime * ( + X_c * np.cos(phi_g) - np.sin(phi_g) * (d * np.cos(theta_g) - Y_c * np.sin(theta_g)) + ) + qy_c = k_over_Dprime * ( + X_c * np.sin(phi_g) + np.cos(phi_g) * (d * np.cos(theta_g) - Y_c * np.sin(theta_g)) - Dprime + ) + qz_c = -1 * k_over_Dprime * (d * np.sin(theta_g) + Y_c * np.cos(theta_g)) + + qr_c = np.sqrt(np.square(qx_c) + np.square(qy_c)) + q_c = np.sqrt(np.square(qx_c) + np.square(qy_c) + np.square(qz_c)) + + # Conversion factor for pixel coordinates + # (where sample-detector distance is set to d = 1) + c = (self.pixel_size_um / 1e6) / self.distance_m + + x = np.arange(self.width) - self.x0 + y = np.arange(self.height) - self.y0 + X, Y = np.meshgrid(x, y) + R = np.sqrt(X**2 + Y**2) + + # twotheta = np.arctan(self.r_map()*c) # radians + theta_f = np.arctan2(X * c, 1) # radians + # alpha_f_prime = np.arctan2( Y*c, 1 ) # radians + alpha_f = np.arctan2(Y * c * np.cos(theta_f), 1) # radians + + self.qx_map_data = self.get_k() * np.sin(theta_f) * np.cos(alpha_f) + self.qy_map_data = self.get_k() * (np.cos(theta_f) * np.cos(alpha_f) - 1) # TODO: Check sign + self.qz_map_data = -1.0 * self.get_k() * np.sin(alpha_f) + + self.qr_map_data = np.sign(self.qx_map_data) * np.sqrt( + np.square(self.qx_map_data) + np.square(self.qy_map_data) + ) + + self.qx_map_data = qx_c + self.qy_map_data = qy_c + self.qz_map_data = qz_c + self.q_map_data = q_c + + def _generate_qxyz_maps(self): + """ + The geometric claculations used here are described: + http://gisaxs.com/index.php/Geometry:WAXS_3D + + YG add offset corrections at Sep 21, 2017 + """ + + # print('Here to get qmap without offset.') + + d = self.distance_m # + pix_size = self.pixel_size_um / 1e6 # in meter + phi_g = np.radians(self.det_phi_g) + theta_g = np.radians(self.det_theta_g) + + offset_x = self.offset_x * pix_size # in meter + offset_y = self.offset_y * pix_size + offset_z = self.offset_z * pix_size + + xs = (np.arange(self.width) - self.x0) * pix_size + ys = (np.arange(self.height) - self.y0) * pix_size + + xsprime = xs - offset_x + dprime = d - offset_y + ysprime = ys - offset_z + # ys = ys[::-1] + + X_c, Y_c = np.meshgrid(xsprime, ysprime) + # Dprime = np.sqrt( np.square(d) + np.square(X_c) + np.square(Y_c) ) + # k_over_Dprime = self.get_k()/Dprime + yprime = dprime * np.cos(theta_g) - Y_c * np.sin(theta_g) + Dprime = np.sqrt( + np.square(dprime) + + np.square(X_c) + + np.square(Y_c) + + offset_x**2 + + offset_y**2 + + offset_z**2 + + 2 * offset_x * (X_c * np.cos(phi_g) - np.sin(phi_g) * yprime) + + 2 * offset_y * (X_c * np.sin(phi_g) + np.cos(phi_g) * yprime) + + 2 * offset_z * (dprime * np.sin(theta_g) + Y_c * np.cos(theta_g)) + ) + + k_over_Dprime = self.get_k() / Dprime + + qx_c = k_over_Dprime * (X_c * np.cos(phi_g) - np.sin(phi_g) * yprime + offset_x) + qy_c = k_over_Dprime * (X_c * np.sin(phi_g) + np.cos(phi_g) * yprime + offset_y - Dprime) + qz_c = -1 * k_over_Dprime * (dprime * np.sin(theta_g) + Y_c * np.cos(theta_g) + offset_z) + + qr_c = np.sqrt(np.square(qx_c) + np.square(qy_c)) + q_c = np.sqrt(np.square(qx_c) + np.square(qy_c) + np.square(qz_c)) + + self.qx_map_data = qx_c + self.qy_map_data = qy_c + self.qz_map_data = qz_c + self.q_map_data = q_c + self.qr_map_data = qr_c + + if False: # True: + # Conversion factor for pixel coordinates + # (where sample-detector distance is set to d = 1) + c = (self.pixel_size_um / 1e6) / self.distance_m + + x = np.arange(self.width) - self.x0 + y = np.arange(self.height) - self.y0 + X, Y = np.meshgrid(x, y) + R = np.sqrt(X**2 + Y**2) + + # twotheta = np.arctan(self.r_map()*c) # radians + theta_f = np.arctan2(X * c, 1) # radians + # alpha_f_prime = np.arctan2( Y*c, 1 ) # radians + alpha_f = np.arctan2(Y * c * np.cos(theta_f), 1) # radians + + self.qx_map_data1 = self.get_k() * np.sin(theta_f) * np.cos(alpha_f) + self.qy_map_data1 = self.get_k() * (np.cos(theta_f) * np.cos(alpha_f) - 1) # TODO: Check sign + self.qz_map_data1 = -1.0 * self.get_k() * np.sin(alpha_f) + + self.qr_map_data1 = np.sign(self.qx_map_data1) * np.sqrt( + np.square(self.qx_map_data1) + np.square(self.qy_map_data1) + ) diff --git a/pyCHX/backups/pyCHX-backup/SAXS.py b/pyCHX/backups/pyCHX-backup/SAXS.py new file mode 100644 index 0000000..fc2f54a --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/SAXS.py @@ -0,0 +1,1104 @@ +""" +Sep 10 Developed by Y.G.@CHX +yuzhang@bnl.gov +This module is for the static SAXS analysis, such as fit form factor +""" + +# import numpy as np +from lmfit import Model, Parameter, Parameters, fit_report, minimize, report_fit +from scipy.optimize import curve_fit, least_squares, leastsq +from scipy.special import gamma, gammaln + +from pyCHX.chx_generic_functions import find_index, plot1D, show_img + +# import matplotlib as mpl +# import matplotlib.pyplot as plt +# from matplotlib.colors import LogNorm +from pyCHX.chx_libs import * + + +def mono_sphere_form_factor_intensity(x, radius, delta_rho=100, fit_func="G"): + """ + Input: + x/q: in A-1, array or a value + radius/R: in A + + delta_rho: Scattering Length Density(SLD) difference between solvent and the scatter, A-2 + Output: + The form factor intensity of the mono dispersed scatter + """ + q = x + R = radius + qR = q * R + volume = (4.0 / 3.0) * np.pi * (R**3) + prefactor = 36 * np.pi * ((delta_rho * volume) ** 2) / (4 * np.pi) + P = (np.sin(qR) - qR * np.cos(qR)) ** 2 / (qR**6) + P *= prefactor + P = P.real + return P + + +def gaussion(x, u, sigma): + return 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-((x - u) ** 2) / (2 * (sigma**2))) + + +def Schultz_Zimm(x, u, sigma): + """http://sasfit.ingobressler.net/manual/Schultz-Zimm + See also The size distribution of ‘gold standard’ nanoparticles + Anal Bioanal Chem (2009) 395:1651–1660 + DOI 10.1007/s00216-009-3049-5 + """ + k = 1.0 / (sigma) ** 2 + return 1.0 / u * (x / u) ** (k - 1) * k**k * np.exp(-k * x / u) / gamma(k) + + +def distribution_func(radius=1.0, sigma=0.1, num_points=20, spread=3, func="G"): + """ + radius: the central radius + sigma: sqrt root of variance in percent + """ + + if 1 - spread * sigma <= 0: + spread = (1 - sigma) / sigma - 1 + # print( num_points ) + x, rs = np.linspace( + radius - radius * spread * sigma, radius + radius * spread * sigma, int(num_points), retstep=True + ) + # print(x) + if func == "G": + func = gaussion + elif func == "S": + func = Schultz_Zimm + + return x, rs, func(x, radius, radius * sigma) + + +def poly_sphere_form_factor_intensity( + x, radius, sigma=0.1, delta_rho=1.00, background=0, num_points=20, spread=5, fit_func="G" +): + """ + Input: + x/q: in A-1, array or a value + radius/R: in A + sigma:sqrt root of variance in percent + delta_rho: Scattering Length Density(SLD) difference between solvent and the scatter, A-2 + fit_func: G: Guassian;S: Flory–Schulz distribution + Output: + The form factor intensity of the polydispersed scatter + """ + q = x + R = radius + if not hasattr(q, "__iter__"): + q = np.array([q]) + v = np.zeros((len(q))) + if sigma == 0: + v = mono_sphere_form_factor_intensity(q, R, delta_rho) + else: + r, rs, wt = distribution_func(radius=R, sigma=sigma, num_points=num_points, spread=spread, func=fit_func) + for i, Ri in enumerate(r): + # print(Ri, wt[i],delta_rho, rs) + v += mono_sphere_form_factor_intensity(q, Ri, delta_rho) * wt[i] * rs + return v + background # * delta_rho + + +def poly_sphere_form_factor_intensity_q2( + x, radius, sigma=0.1, delta_rho=1, fit_func="G" +): # , scale=1, baseline=0): + """ + Input: + x/q: in A-1, array or a value + radius/R: in A + sigma:sqrt root of variance in percent + delta_rho: Scattering Length Density(SLD) difference between solvent and the scatter, A-2 + Output: + The form factor intensity of the polydispersed scatter + """ + + return poly_sphere_form_factor_intensity(x, radius, sigma, delta_rho, fit_func) * x**2 # * scale + baseline + + +def find_index_old(x, x0, tolerance=None): + # find the position of P in a list (plist) with tolerance + + N = len(x) + i = 0 + position = None + if tolerance == None: + tolerance = (x[1] - x[0]) / 2.0 + if x0 > max(x): + position = len(x) - 1 + elif x0 < min(x): + position = 0 + else: + for item in x: + if abs(item - x0) <= tolerance: + position = i + # print 'Found Index!!!' + break + i += 1 + return position + + +def form_factor_residuals(p, iq, q, num_points=20, spread=5, fit_func="G", form_model="poly_sphere"): + """Residuals for fit iq by spheical form factor using leastsq. + p: parameters for radius, sigma, delta_rho, background + + """ + + radius, sigma, delta_rho, background = p + fiq = poly_sphere_form_factor_intensity( + q, + radius=radius, + sigma=sigma, + delta_rho=delta_rho, + background=background, + num_points=num_points, + spread=spread, + fit_func=fit_func, + ) + radius, sigma, delta_rho, background = abs(radius), abs(sigma), abs(delta_rho), abs(background) + err = np.log(iq / fiq) + return np.sqrt(np.abs(err)) + + +def form_factor_residuals_bg( + p, iq, q, num_points=20, spread=5, fit_func="G", form_model="poly_sphere" +): # , qpower=-4.0, ): + """Residuals for fit iq by spheical form factor using leastsq. + p: parameters for radius, sigma, delta_rho, background + """ + + radius, sigma, delta_rho, background, q4_scale, qpower = p + bk = q4_scale * q ** (qpower) + background + fiq = ( + poly_sphere_form_factor_intensity( + q, + radius=radius, + sigma=sigma, + delta_rho=delta_rho, + background=0, + num_points=num_points, + spread=spread, + fit_func=fit_func, + ) + + bk + ) + radius, sigma, delta_rho, background, q4_scale = ( + abs(radius), + abs(sigma), + abs(delta_rho), + abs(background), + abs(q4_scale), + ) + err = np.log(iq / fiq) # iq- (fiq + bk ) + return np.sqrt(np.abs(err)) + + +def form_factor_residuals_lmfit(p, iq, q, num_points=20, spread=5, fit_func="G", form_model="poly_sphere"): + """Residuals for fit iq by spheical form factor using leastsq. + p: parameters for radius, sigma, delta_rho, background + """ + radius, sigma, delta_rho, background = ( + p["radius"], + p["sigma"], + p["delta_rho"], + p["background"], + ) + + fiq = poly_sphere_form_factor_intensity( + q, + radius=radius, + sigma=sigma, + delta_rho=delta_rho, + background=background, + num_points=num_points, + spread=spread, + fit_func=fit_func, + ) + err = np.log(iq / fiq) + return err + + +def form_factor_residuals_bg_lmfit(p, iq, q, num_points=20, spread=5, fit_func="G", form_model="poly_sphere"): + """Residuals for fit iq by spheical form factor using leastsq. + p: parameters for radius, sigma, delta_rho, background + """ + radius, sigma, delta_rho, background, qpower_scale, qpower = ( + p["radius"], + p["sigma"], + p["delta_rho"], + p["background"], + p["qpower_scale"], + p["qpower"], + ) + bk = qpower_scale * q ** (qpower) + background + fiq = ( + poly_sphere_form_factor_intensity( + q, + radius=radius, + sigma=sigma, + delta_rho=delta_rho, + background=0, + num_points=num_points, + spread=spread, + fit_func=fit_func, + ) + + bk + ) + err = np.log(iq / fiq) + return err + + +def get_form_factor_fit_lmfit( + q, + iq, + guess_values, + guess_limit=None, + fit_range=None, + fit_variables=None, + function="poly_sphere", + fit_func="G", + qpower_bg=False, + num_points=20, + spread=5, + *argv, + **kwargs +): + """ + YG Dev@CHX 2019/8/1 + + Fit form factor + The support fitting functions include + poly_sphere (poly_sphere_form_factor_intensity), + mono_sphere (mono_sphere_form_factor_intensity) + Parameters + ---------- + q: q vector + iq: form factor + qpower_bg: if True, consider a q**(-power) background in the fitting + guess_values:a dict, contains keys + radius: the initial guess of spherecentral radius + sigma: the initial guess of sqrt root of variance in percent + + function: + mono_sphere (mono_sphere_form_factor_intensity): fit by mono dispersed sphere model + poly_sphere (poly_sphere_form_factor_intensity): fit by poly dispersed sphere model + + Returns + ------- + fit resutls: + radius + sigma + an example: + result = fit_form_factor( q, iq, res_pargs=None,function='poly_sphere' + """ + # print(q4_bg) + if fit_range is not None: + x1, x2 = fit_range + q1 = find_index(q, x1, tolerance=None) + q2 = find_index(q, x2, tolerance=None) + else: + q1 = 0 + q2 = len(q) + + q_ = q[q1:q2] + iq_ = iq[q1:q2] + pars = Parameters() + for var in list(guess_values.keys()): + pars.add(var, value=guess_values[var]) + if not qpower_bg: + mod = form_factor_residuals_lmfit + else: + # print('here') + mod = form_factor_residuals_bg_lmfit + + if guess_limit is not None: + for var in list(guess_limit.keys()): + m, M = guess_limit[var] + pars[var].min = m + pars[var].max = M + if fit_variables is not None: + for var in list(fit_variables.keys()): + pars[var].vary = fit_variables[var] + # print( pars ) + result = minimize( + mod, pars, args=(iq_, q_), kws={"num_points": num_points, "spread": spread, "fit_func": fit_func} + ) + fitp = {} + fitpe = {} + rp = result.params + for var in list(rp.keys()): + fitp[var] = rp[var].value + fitpe[var] = rp[var].stderr + if not qpower_bg: + radius, sigma, delta_rho, background = ( + fitp["radius"], + fitp["sigma"], + fitp["delta_rho"], + fitp["background"], + ) + fitq = poly_sphere_form_factor_intensity( + q_, + radius=radius, + sigma=sigma, + delta_rho=delta_rho, + background=background, + num_points=num_points, + spread=spread, + fit_func=fit_func, + ) + else: + radius, sigma, delta_rho, background, qpower_scale, qpower = ( + fitp["radius"], + fitp["sigma"], + fitp["delta_rho"], + fitp["background"], + fitp["qpower_scale"], + fitp["qpower"], + ) + bk = qpower_scale * q_ ** (qpower) + background + fitq = ( + poly_sphere_form_factor_intensity( + q_, + radius=radius, + sigma=sigma, + delta_rho=delta_rho, + background=0, + num_points=num_points, + spread=spread, + fit_func=fit_func, + ) + + bk + ) + + # yf= result.model.eval(params=result.params, x=q_) + # print( result.best_values ) + return fitp, fitpe, q_, fitq, result + + +def get_form_factor_fit2( + q, + iq, + guess_values, + fit_range=None, + fit_variables=None, + function="poly_sphere", + fit_func="G", + q4_bg=False, + num_points=20, + spread=5, + bounds=None, + *argv, + **kwargs +): + """ + Fit form factor + The support fitting functions include + poly_sphere (poly_sphere_form_factor_intensity), + mono_sphere (mono_sphere_form_factor_intensity) + Parameters + ---------- + q: q vector + iq: form factor + q4_bg: if True, consider a q**(-4) background in the fitting + guess_values:a dict, contains keys + radius: the initial guess of spherecentral radius + sigma: the initial guess of sqrt root of variance in percent + + function: + mono_sphere (mono_sphere_form_factor_intensity): fit by mono dispersed sphere model + poly_sphere (poly_sphere_form_factor_intensity): fit by poly dispersed sphere model + + Returns + ------- + fit resutls: + radius + sigma + an example: + result = fit_form_factor( q, iq, res_pargs=None,function='poly_sphere' + """ + # print(q4_bg) + if fit_range is not None: + x1, x2 = fit_range + q1 = find_index(q, x1, tolerance=None) + q2 = find_index(q, x2, tolerance=None) + else: + q1 = 0 + q2 = len(q) + + q_ = q[q1:q2] + iq_ = iq[q1:q2] + if not q4_bg: + fit_funcs = form_factor_residuals + radius, sigma, delta_rho, background = ( + guess_values["radius"], + guess_values["sigma"], + guess_values["delta_rho"], + guess_values["background"], + ) + p = [radius, sigma, delta_rho, background] + pfit, pcov, infodict, errmsg, success = leastsq( + fit_funcs, + [p], + args=(iq_, q_, num_points, spread, fit_func, function), + full_output=1, + ftol=1.49012e-38, + xtol=1.49012e-10, + factor=100, + ) + else: + # print('here') + fit_funcs = form_factor_residuals_bg + radius, sigma, delta_rho, background, q4_scale, qpower = ( + guess_values["radius"], + guess_values["sigma"], + guess_values["delta_rho"], + guess_values["background"], + guess_values["q4_scale"], + guess_values["qpower"], + ) + p = [radius, sigma, delta_rho, background, q4_scale, qpower] + if bounds is None: + bounds = (-np.inf, np.inf) + print(p) # , qpower) + pfit, pcov, infodict, errmsg, success = leastsq( + fit_funcs, + [p], + args=(iq_, q_, num_points, spread, fit_func, function), + ftol=1.49012e-38, + xtol=1.49012e-10, + ) + + # print(q4_bg) + # resL = leastsq( fit_funcs, [ p ], args=( iq_, q_, num_points, spread, fit_func, function ), + # full_output=1, ftol=1.49012e-38, xtol=1.49012e-10, factor=100) + + # radius, sigma, delta_rho, background = np.abs(pfit) + if not q4_bg: + radius, sigma, delta_rho, background = pfit + fitq = poly_sphere_form_factor_intensity( + q_, + radius=radius, + sigma=sigma, + delta_rho=delta_rho, + background=background, + num_points=num_points, + spread=spread, + fit_func=fit_func, + ) + else: + radius, sigma, delta_rho, background, q4_scale, qpower = pfit + bk = q4_scale * q_ ** (qpower) + background + fitq = ( + poly_sphere_form_factor_intensity( + q_, + radius=radius, + sigma=sigma, + delta_rho=delta_rho, + background=0, + num_points=num_points, + spread=spread, + fit_func=fit_func, + ) + + bk + ) + + if (len(iq_) > len(p)) and pcov is not None: + s_sq = (fit_funcs(pfit, iq_, q_, num_points, spread, fit_func, function)).sum() / (len(iq_) - len(p)) + pcov = pcov * s_sq + else: + pcov = np.inf + # print(pcov) + error = [] + for i in range(len(pfit)): + try: + error.append(np.absolute(pcov[i][i]) ** 0.5) + except: + error.append(None) + pfit_leastsq = pfit + perr_leastsq = np.array(error) + + return pfit_leastsq, perr_leastsq, q_, fitq # , resL + + +def get_form_factor_fit( + q, + iq, + guess_values, + fit_range=None, + fit_variables=None, + function="poly_sphere", + fit_func="G", + fit_power=0, + *argv, + **kwargs +): + """ + Fit form factor for GUI + + The support fitting functions include + poly_sphere (poly_sphere_form_factor_intensity), + mono_sphere (mono_sphere_form_factor_intensity) + Parameters + ---------- + q: q vector + iq: form factor + + guess_values:a dict, contains keys + radius: the initial guess of spherecentral radius + sigma: the initial guess of sqrt root of variance in percent + + function: + mono_sphere (mono_sphere_form_factor_intensity): fit by mono dispersed sphere model + poly_sphere (poly_sphere_form_factor_intensity): fit by poly dispersed sphere model + + Returns + ------- + fit resutls: + radius + sigma + an example: + result = fit_form_factor( q, iq, res_pargs=None,function='poly_sphere' + """ + + if function == "poly_sphere": + mod = Model(poly_sphere_form_factor_intensity) # _q2 ) + elif function == "mono_sphere": + mod = Model(mono_sphere_form_factor_intensity) + else: + print("The %s is not supported.The supported functions include poly_sphere and mono_sphere" % function) + + if fit_range is not None: + x1, x2 = fit_range + q1 = find_index(q, x1, tolerance=None) + q2 = find_index(q, x2, tolerance=None) + else: + q1 = 0 + q2 = len(q) + + q_ = q[q1:q2] + iq_ = iq[q1:q2] + + _r = guess_values["radius"] + _sigma = guess_values["sigma"] + _delta_rho = guess_values["delta_rho"] + _background = guess_values["background"] + # _scale = guess_values['scale'] + # _baseline = guess_values['baseline'] + + mod.set_param_hint("radius", min=_r / 10, max=_r * 10) + mod.set_param_hint("sigma", min=_sigma / 10, max=_sigma * 10) + # mod.set_param_hint( 'scale', min= _scale/1E3, max= _scale*1E3 ) + # mod.set_param_hint( 'baseline', min= 0 ) + # mod.set_param_hint( 'delta_rho', min= 0 ) + # mod.set_param_hint( 'delta_rho', min= _delta_rho/1E6, max= _delta_rho*1E6 ) + pars = mod.make_params( + radius=_r, sigma=_sigma, delta_rho=_delta_rho, background=_background + ) # scale= _scale, baseline =_baseline ) + + if fit_variables is not None: + for var in list(fit_variables.keys()): + pars[var].vary = fit_variables[var] + # pars['delta_rho'].vary =False + # fit_power = 0 + result = mod.fit(iq_ * q_**fit_power, pars, x=q_) # , fit_func=fit_func ) + if function == "poly_sphere": + sigma = result.best_values["sigma"] + elif function == "mono_sphere": + sigma = 0 + r = result.best_values["radius"] + # scale = result.best_values['scale'] + # baseline = result.best_values['baseline'] + delta_rho = result.best_values["delta_rho"] + print(result.best_values) + return result, q_ + + +def plot_form_factor_with_fit(q, iq, q_, result, fit_power=0, res_pargs=None, return_fig=False, *argv, **kwargs): + if res_pargs is not None: + uid = res_pargs["uid"] + path = res_pargs["path"] + else: + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + + # fig = Figure() + # ax = fig.add_subplot(111) + fig, ax = plt.subplots() + + title_qr = "form_factor_fit" + plt.title("uid= %s:--->" % uid + title_qr, fontsize=20, y=1.02) + + r = result.best_values["radius"] + delta_rho = result.best_values["delta_rho"] + sigma = result.best_values["sigma"] + + ax.semilogy(q, iq, "ro", label="Form Factor") + ax.semilogy(q_, result.best_fit / q_**fit_power, "-b", lw=3, label="Fit") + + txts = r"radius" + r" = %.2f " % (r / 10.0) + r"$ nm$" + ax.text(x=0.02, y=0.35, s=txts, fontsize=14, transform=ax.transAxes) + txts = r"sigma" + r" = %.3f" % (sigma) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=0.02, y=0.25, s=txts, fontsize=14, transform=ax.transAxes) + # txts = r'delta_rho' + r' = %.3e'%( delta_rho) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + # ax.text(x =0.02, y=.35, s=txts, fontsize=14, transform=ax.transAxes) + ax.legend(loc="best") + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + + fp = path + "uid=%s--form_factor--fit-" % (uid) + ".png" + plt.savefig(fp, dpi=fig.dpi) + # fig.tight_layout() + plt.show() + + if return_fig: + return fig + + +def fit_form_factor( + q, + iq, + guess_values, + fit_range=None, + fit_variables=None, + res_pargs=None, + function="poly_sphere", + fit_func="G", + return_fig=False, + *argv, + **kwargs +): + """ + Fit form factor + + The support fitting functions include + poly_sphere (poly_sphere_form_factor_intensity), + mono_sphere (mono_sphere_form_factor_intensity) + Parameters + ---------- + q: q vector + iq: form factor + res_pargs: a dict, contains keys, such path, uid... + + guess_values:a dict, contains keys + radius: the initial guess of spherecentral radius + sigma: the initial guess of sqrt root of variance in percent + + function: + mono_sphere (mono_sphere_form_factor_intensity): fit by mono dispersed sphere model + poly_sphere (poly_sphere_form_factor_intensity): fit by poly dispersed sphere model + + Returns + ------- + fit resutls: + radius + sigma + an example: + result = fit_form_factor( q, iq, res_pargs=None,function='poly_sphere' + """ + + result, q_ = get_form_factor_fit( + q, iq, guess_values, fit_range=fit_range, fit_variables=fit_variables, function=function, fit_func=fit_func + ) + plot_form_factor_with_fit(q, iq, q_, result, fit_power=0, res_pargs=res_pargs, return_fig=return_fig) + + return result + + +def fit_form_factor2( + q, + iq, + guess_values, + fit_range=None, + fit_variables=None, + res_pargs=None, + function="poly_sphere", + fit_func="G", + *argv, + **kwargs +): + """ + Fit form factor + + The support fitting functions include + poly_sphere (poly_sphere_form_factor_intensity), + mono_sphere (mono_sphere_form_factor_intensity) + Parameters + ---------- + q: q vector + iq: form factor + res_pargs: a dict, contains keys, such path, uid... + + guess_values:a dict, contains keys + radius: the initial guess of spherecentral radius + sigma: the initial guess of sqrt root of variance in percent + + function: + mono_sphere (mono_sphere_form_factor_intensity): fit by mono dispersed sphere model + poly_sphere (poly_sphere_form_factor_intensity): fit by poly dispersed sphere model + + Returns + ------- + fit resutls: + radius + sigma + an example: + result = fit_form_factor( q, iq, res_pargs=None,function='poly_sphere' + """ + + if res_pargs is not None: + uid = res_pargs["uid"] + path = res_pargs["path"] + else: + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + + if function == "poly_sphere": + mod = Model(poly_sphere_form_factor_intensity) # _q2 ) + elif function == "mono_sphere": + mod = Model(mono_sphere_form_factor_intensity) + else: + print("The %s is not supported.The supported functions include poly_sphere and mono_sphere" % function) + + if fit_range is not None: + x1, x2 = fit_range + q1 = find_index(q, x1, tolerance=None) + q2 = find_index(q, x2, tolerance=None) + else: + q1 = 0 + q2 = len(q) + + q_ = q[q1:q2] + iq_ = iq[q1:q2] + + _r = guess_values["radius"] + _sigma = guess_values["sigma"] + _delta_rho = guess_values["delta_rho"] + if "background" in list(guess_values.keys()): + _bk = guess_values["background"] + else: + _bk = 0 + # _scale = guess_values['scale'] + # _baseline = guess_values['baseline'] + + mod.set_param_hint("radius", min=_r / 10, max=_r * 10) + mod.set_param_hint("sigma", min=_sigma / 10, max=_sigma * 10) + # mod.set_param_hint( 'scale', min= _scale/1E3, max= _scale*1E3 ) + mod.set_param_hint("background", min=0) + # mod.set_param_hint( 'delta_rho', min= 0 ) + mod.set_param_hint("delta_rho", min=_delta_rho / 1e6, max=_delta_rho * 1e6) + pars = mod.make_params( + radius=_r, sigma=_sigma, delta_rho=_delta_rho, background=_bk + ) # scale= _scale, baseline =_baseline ) + + if fit_variables is not None: + for var in list(fit_variables.keys()): + pars[var].vary = fit_variables[var] + # pars['delta_rho'].vary =False + + fig = plt.figure(figsize=(8, 6)) + title_qr = "form_factor_fit" + plt.title("uid= %s:--->" % uid + title_qr, fontsize=20, y=1.02) + + fit_power = 0 # 2 + + result = mod.fit(iq_ * q_**fit_power, pars, x=q_) # ,fit_func= fit_func ) + + if function == "poly_sphere": + sigma = result.best_values["sigma"] + elif function == "mono_sphere": + sigma = 0 + + r = result.best_values["radius"] + # scale = result.best_values['scale'] + # baseline = result.best_values['baseline'] + delta_rho = result.best_values["delta_rho"] + + # report_fit( result ) + + ax = fig.add_subplot(1, 1, 1) + + ax.semilogy(q, iq, "ro", label="Form Factor") + ax.semilogy(q_, result.best_fit / q_**fit_power, "-b", lw=3, label="Fit") + + txts = r"radius" + r" = %.2f " % (r / 10.0) + r"$ nm$" + ax.text(x=0.02, y=0.35, s=txts, fontsize=14, transform=ax.transAxes) + txts = r"sigma" + r" = %.3f" % (sigma) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=0.02, y=0.25, s=txts, fontsize=14, transform=ax.transAxes) + # txts = r'delta_rho' + r' = %.3e'%( delta_rho) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + # ax.text(x =0.02, y=.35, s=txts, fontsize=14, transform=ax.transAxes) + ax.legend(loc="best") + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + + fp = path + "%s_form_factor_fit" % (uid) + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + # plt.show() + + result = dict(radius=r, sigma=sigma, delta_rho=delta_rho) + + return result + + +def show_saxs_qmap( + img, + pargs, + width=200, + vmin=0.1, + vmax=300, + logs=True, + image_name="", + show_colorbar=True, + file_name="", + show_time=False, + save=False, + show_pixel=False, + aspect=1, + save_format="png", + cmap="viridis", +): + """ + Show a SAXS q-map by giving + Parameter: + image: the frame + setup pargs, a dictionary, including + dpix #in mm, eiger 4m is 0.075 mm + lambda_ # wavelegth of the X-rays in Angstroms + Ldet # detector to sample distance (mm) + path where to save data + center: beam center in pixel, center[0] (x), should be image-y, and should be python-x + width: the showed area centered at center + Return: + None + """ + + Ldet = pargs["Ldet"] + dpix = pargs["dpix"] + lambda_ = pargs["lambda_"] + center = pargs["center"] + cx, cy = center + path = pargs["path"] + lx, ly = img.shape + # center = [ center[1], center[0] ] #due to python conventions + w = width + + img_ = np.zeros([w, w]) + minW, maxW = min(center[0] - w, center[1] - w), max(center[0] - w, center[1] - w) + if w < minW: + img_ = img[cx - w // 2 : cx + w // 2, cy + w // 2 : cy + w // 2] + # elif w > maxW: + # img_[ cx-w//2:cx+w//2, cy+w//2:cy+w//2 ] = + + ROI = [max(0, center[0] - w), min(center[0] + w, lx), max(0, center[1] - w), min(ly, center[1] + w)] + # print( ROI ) + ax = plt.subplots() + if not show_pixel: + # print( 'here' ) + two_theta = utils.radius_to_twotheta( + Ldet, + np.array( + [ + (ROI[0] - cx) * dpix, + (ROI[1] - cx) * dpix, + (ROI[2] - cy) * dpix, + (ROI[3] - cy) * dpix, + ] + ), + ) + qext = utils.twotheta_to_q(two_theta, lambda_) + # print( two_theta, qext ) + + show_img( + 1e-15 + img[ROI[0] : ROI[1], ROI[2] : ROI[3]], + ax=ax, + xlabel=r"$q_x$" + "(" + r"$\AA^{-1}$" + ")", + ylabel=r"$q_y$" + "(" + r"$\AA^{-1}$" + ")", + extent=[qext[3], qext[2], qext[0], qext[1]], + vmin=vmin, + vmax=vmax, + logs=logs, + image_name=image_name, + file_name=file_name, + show_time=show_time, + save_format=save_format, + cmap=cmap, + show_colorbar=show_colorbar, + save=save, + path=path, + aspect=aspect, + ) + else: + # qext = w + show_img( + 1e-15 + img[ROI[0] : ROI[1], ROI[2] : ROI[3]], + ax=ax, + xlabel="pixel", + ylabel="pixel", + extent=[ROI[0], ROI[1], ROI[2], ROI[3]], + vmin=vmin, + vmax=vmax, + logs=logs, + image_name=image_name, + save_format=save_format, + cmap=cmap, + show_colorbar=show_colorbar, + file_name=file_name, + show_time=show_time, + save=save, + path=path, + aspect=aspect, + ) + return ax + + +######################## +##Fit sphere by scipy.leastsq fit + + +def fit_sphere_form_factor_func(parameters, ydata, xdata, yerror=None, nonvariables=None): + """##Develop by YG at July 28, 2017 @CHX + This function is for fitting form factor of polyderse spherical particles by using scipy.leastsq fit + + radius, sigma, delta_rho, background = parameters + """ + radius, sigma, delta_rho, background = parameters + fit = poly_sphere_form_factor_intensity( + xdata, + radius=radius, + sigma=sigma, + delta_rho=delta_rho, + background=background, + num_points=10, + spread=3, + fit_func="G", + ) + error = np.abs(ydata - fit) + return np.sqrt(error) + + +def fit_sphere_form_factor_by_leastsq( + p0, + q, + pq, + fit_range=None, +): + """##Develop by YG at July 28, 2017 @CHX + Fitting form factor of polyderse spherical particles by using scipy.leastsq fit + Input: + radius, sigma, delta_rho, background = p0 + Return + fit res, res[0] is the fitting parameters + """ + if fit_range is not None: + x1, x2 = fit_range + q1, q2 = find_index(q, x1), find_index(q, x2) + res = leastsq( + fit_sphere_form_factor_func, + [p0], + args=( + pq[q1:q2], + q[q1:q2], + ), + ftol=1.49012e-38, + xtol=1.49012e-38, + factor=100, + full_output=1, + ) + return res + + +def plot_fit_sphere_form_factor(q, pq, res, p0=None, xlim=None, ylim=None): + """##Develop by YG at July 28, 2017 @CHX""" + + if p0 is not None: + radius, sigma, delta_rho, background = p0 + fit_init = poly_sphere_form_factor_intensity( + q, + radius=radius, + sigma=sigma, + delta_rho=delta_rho, + background=background, + ) + radius, sigma, delta_rho, background = res[0] + fit = poly_sphere_form_factor_intensity( + q, + radius=radius, + sigma=sigma, + delta_rho=delta_rho, + background=background, + ) + + fig, ax = plt.subplots() + if p0 is not None: + plot1D(x=q, y=fit_init, c="b", m="", ls="-", lw=3, ax=ax, logy=True, legend="Init_Fitting") + plot1D(x=q, y=fit, c="r", m="", ls="-", lw=3, ax=ax, logy=True, legend="Fitting") + plot1D( + x=q, + y=pq, + c="k", + m="X", + ax=ax, + markersize=3, + ls="", + legend="data", + xlim=xlim, + ylim=ylim, + logx=True, + xlabel="Q (A-1)", + ylabel="P(Q)", + ) + + txts = r"radius" + r" = %.2f " % (res[0][0] / 10.0) + r"$ nm$" + ax.text(x=0.02, y=0.25, s=txts, fontsize=14, transform=ax.transAxes) + txts = r"sigma" + r" = %.3f" % (res[0][1]) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=0.02, y=0.15, s=txts, fontsize=14, transform=ax.transAxes) + + +def exm_plot(): + fig, ax = plt.subplots() + + ax.semilogy(q, iq, "ro", label="data") + ax.semilogy(q, ff, "-b", label="fit") + ax.set_xlim([0.0001, 0.01]) + ax.set_ylim([1e-2, 1e4]) + ax.legend(loc="best") + # plot1D( iq, q, logy=True, xlim=[0.0001, .01], ylim=[1E-3,1E4], ax=ax, legend='data') + # plot1D( ff, q, logy=True, xlim=[0.0001, .01], ax=ax, legend='cal') + + # %run /XF11ID/analysis/Analysis_Pipelines/Develop/pyCHX/pyCHX/XPCS_SAXS.py + # %run /XF11ID/analysis/Analysis_Pipelines/Develop/pyCHX/pyCHX/chx_generic_functions.py + # %run /XF11ID/analysis/Analysis_Pipelines/Develop/pyCHX/pyCHX/SAXS.py diff --git a/pyCHX/backups/pyCHX-backup/Stitching.py b/pyCHX/backups/pyCHX-backup/Stitching.py new file mode 100644 index 0000000..e78bdd3 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/Stitching.py @@ -0,0 +1,527 @@ +import os +import re +import sys + +import matplotlib.pyplot as plt +import numpy as np +import PIL +from scipy.signal import savgol_filter as sf + +from pyCHX.chx_generic_functions import plot1D, show_img +from pyCHX.DataGonio import convert_Qmap + + +def get_base_all_filenames(inDir, base_filename_cut_length=-7): + """YG Sep 26, 2017 @SMI + Get base filenames and their related all filenames + Input: + inDir, str, input data dir + base_filename_cut_length: to which length the base name is unique + Output: + dict: keys, base filename + vales, all realted filename + """ + from os import listdir + from os.path import isfile, join + + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + tifsc = list(tifs.copy()) + utifs = np.sort(np.unique(np.array([f[:base_filename_cut_length] for f in tifs])))[::-1] + files = {} + for uf in utifs: + files[uf] = [] + i = 0 + reName = [] + for i in range(len(tifsc)): + if uf in tifsc[i]: + files[uf].append(inDir + tifsc[i]) + reName.append(tifsc[i]) + for fn in reName: + tifsc.remove(fn) + return files + + +def check_stitch_two_imgs(img1, img2, overlap_width, badpixel_width=10): + """YG Check the stitched two imgs + For SMI, overlap_width = 85 #for calibration data--Kevin gives one, 4 degree + overlap_width = 58 #for TWD data, 5 degree step + For SMI: + d0 = np.array( PIL.Image.open(infiles[0]).convert('I') ).T + d1 = np.array( PIL.Image.open(infiles[1]).convert('I') ).T + Example: + + img1 = np.array( PIL.Image.open(infiles[0]).convert('I') ).T + img2 = np.array( PIL.Image.open(infiles[1]).convert('I') ).T + img12 = check_stitch_two_imgs( img1, img2, overlap_width=58, badpixel_width =10 ) + show_img(img12[200:800, 120:250], logs = False, cmap = cmap_vge_hdr, vmin=0.8*img12.min(), + vmax= 1.2*img12.max(), aspect=1,image_name = 'Check_Overlap') + + """ + w = overlap_width + ow = badpixel_width + M, N = img1.shape[0], img1.shape[1] + d0 = img1 + d1 = img2 + d01 = np.zeros([M, N * 2 - w * (2 - 1)]) + d01[:, :N] = d0 + i = 1 + a1, a2, b1, b2 = N * i - w * (i - 1) - ow, N * (i + 1) - w * i, w - ow, N + d01[:, a1:a2] = d1[:, b1:b2] + return d01 + + +def Correct_Overlap_Images_Intensities( + infiles, window_length=101, polyorder=5, overlap_width=58, badpixel_width=10 +): + """YG Correct WAXS Images intensities by using overlap area intensity + Image intensity keep same for the first image + Other image intensity is scaled by a pixel-width intensity array, which is averaged in the overlap area and then smoothed by + scipy.signal import savgol_filter with parameters as window_length=101, polyorder=5, + + from scipy.signal import savgol_filter as sf + + Return: data: array, stitched image with corrected intensity + dataM: dict, each value is the image with correted intensity + scale: scale for each image, the first scale=1 by defination + scale_smooth: smoothed scale + + Exampe: + data, dataM, scale,scale_smooth = Correct_Overlap_Images_Intensities( infiles, window_length=101, polyorder=5, + overlap_width=58, badpixel_width =10 ) + + show_img(data, logs = True, vmin=0.8* data.min(), vmax= 1.2*data.max() + cmap = cmap_vge_hdr, aspect=1, image_name = 'Merged_Sca_Img') + + + fig = plt.figure()# figsize=[2,8]) + for i in range(len(infiles)): + #print(i) + ax = fig.add_subplot(1,8, i+1) + d = process.load( infiles[i] ) + show_img( dataM[i], logs = True, show_colorbar= False,show_ticks =False, + ax= [fig, ax], image_name= '%02d'%(i+1), cmap = cmap_vge_hdr, vmin=100, vmax=2e3, + aspect=1, save=True, path=ResDir) + + + + """ + + w = overlap_width + ow = badpixel_width + Nf = len(infiles) + dataM = {} + + for i in range(len(infiles)): + d = np.array(PIL.Image.open(infiles[i]).convert("I")).T / 1.0 + if i == 0: + M, N = d.shape[0], d.shape[1] + data = np.zeros([M, N * Nf - w * (Nf - 1)]) + data[:, :N] = d + scale = np.zeros([len(infiles), M]) + scale_smooth = np.zeros([len(infiles), M]) + overlap_int = np.zeros([2 * len(infiles) - 1, M]) + overlap_int[0] = np.average(d[:, N - w : N - ow], axis=1) + scale[0] = 1 + scale_smooth[0] = 1 + dataM[0] = d + else: + a1, a2, b1, b2 = N * i - w * (i - 1) - ow, N * (i + 1) - w * i, w - ow, N + overlap_int[2 * i - 1] = np.average(d[:, 0 : w - ow], axis=1) + overlap_int[2 * i] = np.average(d[:, N - w : N - ow], axis=1) + scale[i] = overlap_int[2 * i - 2] / overlap_int[2 * i - 1] * scale[i - 1] + scale_smooth[i] = sf( + scale[i], + window_length=window_length, + polyorder=polyorder, + deriv=0, + delta=1.0, + axis=-1, + mode="mirror", + cval=0.0, + ) + data[:, a1:a2] = d[:, b1:b2] * np.repeat(scale_smooth[i], b2 - b1, axis=0).reshape([M, b2 - b1]) + dataM[i] = np.zeros_like(dataM[i - 1]) + dataM[i][:, 0 : w - ow] = dataM[i - 1][:, N - w : N - ow] + dataM[i][:, w - ow :] = data[:, a1:a2] + return data, dataM, scale, scale_smooth + + +def check_overlap_scaling_factor(scale, scale_smooth, i=1, filename=None, save=False): + """check_overlap_scaling_factor( scale,scale_smooth, i=1 )""" + fig, ax = plt.subplots() + plot1D(scale[i], m="o", c="k", ax=ax, title="Scale_averaged_line_intensity_%s" % i, ls="", legend="Data") + plot1D( + scale_smooth[i], + ax=ax, + title="Scale_averaged_line_intensity_%s" % i, + m="", + c="r", + ls="-", + legend="Smoothed", + ) + if save: + fig.savefig(filename) + + +def stitch_WAXS_in_Qspace(dataM, phis, calibration, dx=0, dy=22, dz=0, dq=0.015, mask=None): + """YG Octo 11, 2017 stitch waxs scattering images in qspace + dataM: the data (with corrected intensity), dict format (todolist, make array also avialable) + phis: for SMI, the rotation angle around z-aixs + For SMI + dx= 0 #in pixel unit + dy = 22 #in pixel unit + dz = 0 + calibration: class, for calibration + + Return: Intensity_map, qxs, qzs + + Example: + phis = np.array( [get_phi(infile, + phi_offset=4.649, phi_start=1.0, phi_spacing=5.0,) for infile in infiles] ) # For TWD data + + calibration = CalibrationGonio(wavelength_A=0.619920987) # 20.0 keV + #calibration.set_image_size( data.shape[1], data.shape[0] ) + calibration.set_image_size(195, height=1475) # Pilatus300kW vertical + calibration.set_pixel_size(pixel_size_um=172.0) + calibration.set_beam_position(97.0, 1314.0) + calibration.set_distance(0.275) + + Intensity_map, qxs, qzs = stitch_WAXS_in_Qspace( dataM, phis, calibration) + #Get center of the qmap + bx,by = np.argmin( np.abs(qxs) ), np.argmin( np.abs(qzs) ) + print( bx, by ) + + """ + + q_range = get_qmap_range(calibration, phis[0], phis[-1]) + if q_range[0] > 0: + q_range[0] = 0 + if q_range[2] > 0: + q_range[2] = 0 + qx_min = q_range[0] + qx_max = q_range[1] + qxs = np.arange(q_range[0], q_range[1], dq) + qzs = np.arange(q_range[2], q_range[3], dq) + QXs, QZs = np.meshgrid(qxs, qzs) + num_qx = len(qxs) + qz_min = q_range[2] + qz_max = q_range[3] + num_qz = len(qzs) + phi = phis[0] + + Intensity_map = np.zeros((len(qzs), len(qxs))) + count_map = np.zeros((len(qzs), len(qxs))) + # Intensity_mapN = np.zeros( (8, len(qzs), len(qxs)) ) + for i in range(len(phis)): + dM = np.rot90(dataM[i].T) + D = dM.ravel() + phi = phis[i] + calibration.set_angles(det_phi_g=phi, det_theta_g=0.0, offset_x=dx, offset_y=dy, offset_z=dz) + calibration.clear_maps() + QZ = calibration.qz_map().ravel() # [pixel_list] + QX = calibration.qx_map().ravel() # [pixel_list] + bins = [num_qz, num_qx] + rangeq = [[qz_min, qz_max], [qx_min, qx_max]] + # Nov 7,2017 using new func to qmap + remesh_data, zbins, xbins = convert_Qmap(dM, QZ, QX, bins=bins, range=rangeq, mask=mask) + # Normalize by the binning + num_per_bin, zbins, xbins = convert_Qmap(np.ones_like(dM), QZ, QX, bins=bins, range=rangeq, mask=mask) + + # remesh_data, zbins, xbins = np.histogram2d(QZ, QX, bins=bins, range=rangeq, normed=False, weights=D) + # Normalize by the binning + # num_per_bin, zbins, xbins = np.histogram2d(QZ, QX, bins=bins, range=rangeq, normed=False, weights=None) + Intensity_map += remesh_data + count_map += num_per_bin + # Intensity_mapN[i] = np.nan_to_num( remesh_data/num_per_bin ) + Intensity_map = np.nan_to_num(Intensity_map / count_map) + return Intensity_map, qxs, qzs + + +def plot_qmap_in_folder(inDir): + """YG. Sep 27@SMI + Plot Qmap data from inDir, which contains qmap data and extent data + """ + import pickle as cpl + + from pyCHX.chx_generic_functions import show_img + from pyCHX.chx_libs import cmap_vge_hdr, plt + + fp = get_base_all_filenames(inDir, base_filename_cut_length=-10) + print( + "There will %s samples and totally %s files to be analyzed." + % (len(fp.keys()), len(np.concatenate(list(fp.values())))) + ) + for k in list(fp.keys()): + for s in fp[k]: + if "npy" in s: + d = np.load(s) # * qmask + if "pkl" in s: + xs, zs = cpl.load(open(s, "rb")) + show_img( + d, + logs=False, + show_colorbar=True, + show_ticks=True, + xlabel="$q_x \, (\AA^{-1})$", + ylabel="$q_z \, (\AA^{-1})$", + cmap=cmap_vge_hdr, # vmin= np.min(d), vmax = np.max(d), + aspect=1, + vmin=-1, + vmax=np.max(d) * 0.5, + extent=[xs[0], xs[-1], zs[0], zs[-1]], + image_name=k[:-1], + path=inDir, + save=True, + ) + plt.close("all") + + +def get_qmap_range(calibration, phi_min, phi_max): + """YG Sep 27@SMI + Get q_range, [ qx_start, qx_end, qz_start, qz_end ] for SMI WAXS qmap + (only rotate around z-axis, so det_theta_g=0.,actually being the y-axis for beamline conventional defination) + based on calibration on Sep 22, offset_x= 0, offset_y= 22 + Input: + calibration: class, See SciAnalysis.XSAnalysis.DataGonio.CalibrationGonio + phi_min: min of phis + phi_max: max of phis + Output: + qrange: np.array([ qx_start, qx_end, qz_start, qz_end ]) + """ + calibration.set_angles(det_phi_g=phi_max, det_theta_g=0.0, offset_x=0, offset_y=22) + calibration._generate_qxyz_maps() + qx_end = np.max(calibration.qx_map_data) + qz_start = np.min(calibration.qz_map_data) + qz_end = np.max(calibration.qz_map_data) + + calibration.set_angles(det_phi_g=phi_min, det_theta_g=0.0, offset_x=0, offset_y=22) + calibration._generate_qxyz_maps() + qx_start = np.min(calibration.qx_map_data) + return np.array([qx_start, qx_end, qz_start, qz_end]) + + +def get_phi(filename, phi_offset=0, phi_start=4.5, phi_spacing=4.0, polarity=-1, ext="_WAXS.tif"): + pattern_re = "^.+\/?([a-zA-Z0-9_]+_)(\d\d\d\d\d\d)(\%s)$" % ext + # print( pattern_re ) + # pattern_re='^.+\/?([a-zA-Z0-9_]+_)(\d\d\d)(\.tif)$' + phi_re = re.compile(pattern_re) + phi_offset = phi_offset + m = phi_re.match(filename) + + if m: + idx = float(m.groups()[1]) + # print(idx) + # phi_c = polarity*( phi_offset + phi_start + (idx-1)*phi_spacing ) + phi_c = polarity * (phi_offset + phi_start + (idx - 0) * phi_spacing) + + else: + print("ERROR: File {} doesn't match phi_re".format(filename)) + phi_c = 0.0 + + return phi_c + + +############For CHX beamline + + +def get_qmap_qxyz_range( + calibration, + det_theta_g_lim, + det_phi_g_lim, + sam_phi_lim, + sam_theta_lim, + sam_chi_lim, + offset_x=0, + offset_y=0, + offset_z=0, +): + """YG Nov 8, 2017@CHX + Get q_range, [ qx_start, qx_end, qz_start, qz_end ] for SMI WAXS qmap + (only rotate around z-axis, so det_theta_g=0.,actually being the y-axis for beamline conventional defination) + based on calibration on Sep 22, offset_x= 0, offset_y= 22 + Input: + calibration: class, See SciAnalysis.XSAnalysis.DataGonio.CalibrationGonio + phi_min: min of phis + phi_max: max of phis + Output: + qrange: np.array([ qx_start, qx_end, qz_start, qz_end ]) + """ + + i = 0 + calibration.set_angles( + det_theta_g=det_theta_g_lim[i], + det_phi_g=det_phi_g_lim[i], + sam_phi=sam_phi_lim[i], + sam_theta=sam_theta_lim[i], + sam_chi=sam_chi_lim[i], + offset_x=offset_x, + offset_y=offset_y, + offset_z=offset_z, + ) + calibration._generate_qxyz_maps() + qx_start = np.min(calibration.qx_map_data) + qy_start = np.min(calibration.qy_map_data) + qz_start = np.min(calibration.qz_map_data) + + i = 1 + + calibration.set_angles( + det_theta_g=det_theta_g_lim[i], + det_phi_g=det_phi_g_lim[i], + sam_phi=sam_phi_lim[i], + sam_theta=sam_theta_lim[i], + sam_chi=sam_chi_lim[i], + offset_x=offset_x, + offset_y=offset_y, + offset_z=offset_z, + ) + + calibration._generate_qxyz_maps() + qx_end = np.min(calibration.qx_map_data) + qy_end = np.min(calibration.qy_map_data) + qz_end = np.min(calibration.qz_map_data) + + return np.array([qx_start, qx_end]), np.array([qy_start, qy_end]), np.array([qz_start, qz_end]) + + +def stitch_WAXS_in_Qspace_CHX( + data, + angle_dict, + calibration, + vary_angle="phi", + qxlim=None, + qylim=None, + qzlim=None, + det_theta_g=0, + det_phi_g=0.0, + sam_phi=0, + sam_theta=0, + sam_chi=0, + dx=0, + dy=0, + dz=0, + dq=0.0008, +): + """YG Octo 11, 2017 stitch waxs scattering images in qspace + dataM: the data (with corrected intensity), dict format (todolist, make array also avialable) + phis: for SMI, the rotation angle around z-aixs + For SMI + dx= 0 #in pixel unit + dy = 22 #in pixel unit + dz = 0 + calibration: class, for calibration + + Return: Intensity_map, qxs, qzs + + Example: + phis = np.array( [get_phi(infile, + phi_offset=4.649, phi_start=1.0, phi_spacing=5.0,) for infile in infiles] ) # For TWD data + + calibration = CalibrationGonio(wavelength_A=0.619920987) # 20.0 keV + #calibration.set_image_size( data.shape[1], data.shape[0] ) + calibration.set_image_size(195, height=1475) # Pilatus300kW vertical + calibration.set_pixel_size(pixel_size_um=172.0) + calibration.set_beam_position(97.0, 1314.0) + calibration.set_distance(0.275) + + Intensity_map, qxs, qzs = stitch_WAXS_in_Qspace( dataM, phis, calibration) + #Get center of the qmap + bx,by = np.argmin( np.abs(qxs) ), np.argmin( np.abs(qzs) ) + print( bx, by ) + """ + qx_min, qx_max = qxlim[0], qxlim[1] + qy_min, qy_max = qylim[0], qylim[1] + qz_min, qz_max = qzlim[0], qzlim[1] + + qxs = np.arange(qxlim[0], qxlim[1], dq) + qys = np.arange(qylim[0], qylim[1], dq) + qzs = np.arange(qzlim[0], qzlim[1], dq) + + QXs, QYs = np.meshgrid(qxs, qys) + QZs, QYs = np.meshgrid(qzs, qys) + QZs, QXs = np.meshgrid(qzs, qxs) + + num_qx = len(qxs) + num_qy = len(qys) + num_qz = len(qzs) + + Intensity_map_XY = np.zeros((len(qxs), len(qys))) + count_map_XY = np.zeros((len(qxs), len(qys))) + + Intensity_map_ZY = np.zeros((len(qzs), len(qys))) + count_map_ZY = np.zeros((len(qzs), len(qys))) + + Intensity_map_ZX = np.zeros((len(qzs), len(qxs))) + count_map_ZX = np.zeros((len(qzs), len(qxs))) + + N = len(data) + N = len(angle_dict[vary_angle]) + print(N) + # Intensity_mapN = np.zeros( (8, len(qzs), len(qxs)) ) + for i in range(N): + dM = data[i] + D = dM.ravel() + sam_phi = angle_dict[vary_angle][i] + print(i, sam_phi) + calibration.set_angles( + det_theta_g=det_theta_g, + det_phi_g=det_phi_g, + sam_phi=sam_phi, + sam_theta=sam_theta, + sam_chi=sam_chi, + offset_x=dx, + offset_y=dy, + offset_z=dz, + ) + calibration.clear_maps() + calibration._generate_qxyz_maps() + + QZ = calibration.qz_map_lab_data.ravel() # [pixel_list] + QX = calibration.qx_map_lab_data.ravel() # [pixel_list] + QY = calibration.qy_map_lab_data.ravel() # [pixel_list] + + bins_xy = [num_qx, num_qy] + bins_zy = [num_qz, num_qy] + bins_zx = [num_qz, num_qx] + + rangeq_xy = [[qx_min, qx_max], [qy_min, qy_max]] + rangeq_zy = [[qz_min, qz_max], [qy_min, qy_max]] + rangeq_zx = [[qz_min, qz_max], [qx_min, qx_max]] + print(rangeq_xy, rangeq_zy, rangeq_zx) + + remesh_dataxy, xbins, ybins = np.histogram2d( + QX, QY, bins=bins_xy, range=rangeq_xy, normed=False, weights=D + ) + # Normalize by the binning + num_per_binxy, xbins, ybins = np.histogram2d( + QX, QY, bins=bins_xy, range=rangeq_xy, normed=False, weights=None + ) + Intensity_map_XY += remesh_dataxy + count_map_XY += num_per_binxy + + remesh_datazy, zbins, ybins = np.histogram2d( + QZ, QY, bins=bins_zy, range=rangeq_zy, normed=False, weights=D + ) + # Normalize by the binning + num_per_binzy, zbins, ybins = np.histogram2d( + QZ, QY, bins=bins_zy, range=rangeq_zy, normed=False, weights=None + ) + Intensity_map_ZY += remesh_datazy + count_map_ZY += num_per_binzy + + remesh_datazx, zbins, xbins = np.histogram2d( + QZ, QX, bins=bins_zx, range=rangeq_zx, normed=False, weights=D + ) + # Normalize by the binning + num_per_binzx, zbins, xbins = np.histogram2d( + QZ, QX, bins=bins_zx, range=rangeq_zx, normed=False, weights=None + ) + Intensity_map_ZX += remesh_datazx + count_map_ZX += num_per_binzx + + # Intensity_mapN[i] = np.nan_to_num( remesh_data/num_per_bin ) + Intensity_map_XY = np.nan_to_num(Intensity_map_XY / count_map_XY) + Intensity_map_ZY = np.nan_to_num(Intensity_map_ZY / count_map_ZY) + Intensity_map_ZX = np.nan_to_num(Intensity_map_ZX / count_map_ZX) + + return Intensity_map_XY, Intensity_map_ZY, Intensity_map_ZX, qxs, qys, qzs diff --git a/pyCHX/backups/pyCHX-backup/Two_Time_Correlation_Function.py b/pyCHX/backups/pyCHX-backup/Two_Time_Correlation_Function.py new file mode 100644 index 0000000..a110211 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/Two_Time_Correlation_Function.py @@ -0,0 +1,1305 @@ +###################################################################################### +########Dec 16, 2015, Yugang Zhang, yuzhang@bnl.gov, CHX, NSLS-II, BNL################ +########Time correlation function, include one-time, two-time, four-time############## +########Muli-tau method, array-operation method####################################### +###################################################################################### + + +import itertools +import sys +import time +from datetime import datetime + +import matplotlib.pyplot as plt +import numpy as np +import skbeam.core.roi as roi +from matplotlib import gridspec +from matplotlib.colors import LogNorm +from modest_image import ModestImage, imshow +from tqdm import tqdm + +# from pyCHX.chx_libs import colors_ as mcolors, markers_ as markers +from pyCHX.chx_libs import RUN_GUI, Figure +from pyCHX.chx_libs import colors +from pyCHX.chx_libs import colors as colors_array +from pyCHX.chx_libs import lstyles +from pyCHX.chx_libs import markers +from pyCHX.chx_libs import markers as markers_array +from pyCHX.chx_libs import markers_copy, mcolors, multi_tau_lags + + +def delays(num_lev=3, num_buf=4, time=1): + """DOCUMENT delays(time=) + return array of delays. + KEYWORD: time: scale delays by time ( should be time between frames) + """ + if num_buf % 2 != 0: + print("nobuf must be even!!!") + dly = np.zeros((num_lev + 1) * int(num_buf / 2) + 1) + dict_dly = {} + for i in range(1, num_lev + 1): + if i == 1: + imin = 1 + else: + imin = int(num_buf / 2) + 1 + ptr = (i - 1) * int(num_buf / 2) + np.arange(imin, num_buf + 1) + dly[ptr] = np.arange(imin, num_buf + 1) * 2 ** (i - 1) + dict_dly[i] = dly[ptr - 1] + dly *= time + # print (i, ptr, imin) + return dly, dict_dly + + +class Get_Pixel_Array(object): + """ + Dec 16, 2015, Y.G.@CHX + a class to get intested pixels from a images sequence, + load ROI of all images into memory + get_data: to get a 2-D array, shape as (len(images), len(pixellist)) + + One example: + data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() + """ + + def __init__(self, indexable, pixelist): + """ + indexable: a images sequences + pixelist: 1-D array, interest pixel list + """ + self.indexable = indexable + self.pixelist = pixelist + # self.shape = indexable.shape + try: + self.length = len(indexable) + except: + self.length = indexable.length + + def get_data(self): + """ + To get intested pixels array + Return: 2-D array, shape as (len(images), len(pixellist)) + """ + + # print (self.length) + data_array = np.zeros([self.length, len(self.pixelist)]) + for key in tqdm(range(self.length)): + data_array[key] = np.ravel(self.indexable[key])[self.pixelist] + return data_array + + +class Reverse_Coordinate(object): + """obsolete codes""" + + def __init__(self, indexable, mask): + self.indexable = indexable + self.mask = mask + try: + self.shape = indexable.shape + except: + # if + self.shape = [len(indexable), indexable[0].shape[0], indexable[0].shape[1]] + # self.shape = indexable.shape + self.length = len(indexable) + + def __getitem__(self, key): + if self.mask is not None: + img = self.indexable[key] * self.mask + else: + img = self.indexable[key] + + if len(img.shape) == 3: + img_ = img[:, ::-1, :] + if len(img.shape) == 2: + img_ = img[::-1, :] + return img_ + + +def get_mean_intensity(data_pixel, qind): + """ + Dec 16, 2015, Y.G.@CHX + a function to get mean intensity as a function of time (image number) + + Parameters: + data_pixel: 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + qind: 1-D int array, a index list of interest pixel, values change from 1 to int number + + Return: + mean_inten: a dict, with keys as the unique values of qind, + each dict[key]: 1-D array, with shape as data_pixel.shape[0],namely, len(images) + + One example: + mean_inten = get_mean_intensity( data_pixel, qind) + """ + + noqs = len(np.unique(qind)) + mean_inten = {} + + for qi in range(1, noqs + 1): + pixelist_qi = np.where(qind == qi)[0] + # print (pixelist_qi.shape, data_pixel[qi].shape) + data_pixel_qi = data_pixel[:, pixelist_qi] + mean_inten[qi] = data_pixel_qi.mean(axis=1) + return mean_inten + + +def run_time(t0): + """Calculate running time of a program + Dec 16, 2015, Y.G.@CHX + Parameters + ---------- + t0: time_string, t0=time.time() + The start time + Returns + ------- + Print the running time + + One usage + --------- + t0=time.time() + .....(the running code) + run_time(t0) + """ + + elapsed_time = time.time() - t0 + print("Total time: %.2f min" % (elapsed_time / 60.0)) + + +def get_each_frame_ROI_intensity(data_pixel, bad_pixel_threshold=1e10, plot_=False, *argv, **kwargs): + """ + Dec 16, 2015, Y.G.@CHX + Get the ROI intensity of each frame + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity( data_pixel, + bad_pixel_threshold=1e10, plot_ = True) + """ + + # print ( argv, kwargs ) + imgsum = np.array([np.sum(img) for img in tqdm(data_series[::sampling], leave=True)]) + if plot_: + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + fig, ax = plt.subplots() + ax.plot(imgsum, "bo") + ax.set_title("uid= %s--imgsum" % uid) + ax.set_xlabel("Frame_bin_%s" % sampling) + ax.set_ylabel("Total_Intensity") + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() + bad_frame_list = np.where(np.array(imgsum) > bad_pixel_threshold)[0] + if len(bad_frame_list): + print("Bad frame list are: %s" % bad_frame_list) + else: + print("No bad frames are involved.") + return imgsum, bad_frame_list + + +def auto_two_Array(data, rois, data_pixel=None): + """ + Dec 16, 2015, Y.G.@CHX + a numpy operation method to get two-time correlation function + + Parameters: + data: images sequence, shape as [img[0], img[1], imgs_length] + rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs + + Options: + + data_pixel: if not None, + 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + + + Return: + g12: a 3-D array, shape as ( imgs_length, imgs_length, q) + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + """ + + start_time = time.time() + + qind, pixelist = roi.extract_label_indices(rois) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + + if data_pixel is None: + data_pixel = Get_Pixel_Array(data, pixelist).get_data() + # print (data_pixel.shape) + + noframes = data_pixel.shape[0] + g12b = np.zeros([noframes, noframes, noqs]) + Unitq = noqs / 10 + proi = 0 + + for qi in tqdm(range(1, noqs + 1)): + pixelist_qi = np.where(qind == qi)[0] + # print (pixelist_qi.shape, data_pixel[qi].shape) + data_pixel_qi = data_pixel[:, pixelist_qi] + + sum1 = (np.average(data_pixel_qi, axis=1)).reshape(1, noframes) + sum2 = sum1.T + + g12b[:, :, qi - 1] = np.dot(data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1] + # print ( proi, int( qi //( Unitq) ) ) + # if int( qi //( Unitq) ) == proi: + # sys.stdout.write("#") + # sys.stdout.flush() + # proi += 1 + + elapsed_time = time.time() - start_time + print("Total time: %.2f min" % (elapsed_time / 60.0)) + + return g12b + + +#################################### +##Derivation of Two time correlation +##################################### + + +##################################### +# get one-time @different age +##################################### + + +def get_qedge2(qstart, qend, qwidth, noqs, return_int=False): + """DOCUMENT make_qlist( ) + give qstart,qend,qwidth,noqs + return a qedge by giving the noqs, qstart,qend,qwidth. + a qcenter, which is center of each qedge + KEYWORD: None""" + import numpy as np + + qcenter = np.linspace(qstart, qend, noqs) + # print ('the qcenter is: %s'%qcenter ) + qedge = np.zeros(2 * noqs) + qedge[::2] = qcenter - (qwidth / 2) # +1 #render even value + qedge[1::2] = qcenter + qwidth / 2 # render odd value + if not return_int: + return qedge, qcenter + else: + return np.int(qedge), np.int(qcenter) + + +def get_qedge(qstart, qend, qwidth, noqs, return_int=False): + """DOCUMENT make_qlist( ) + give qstart,qend,qwidth,noqs + return a qedge by giving the noqs, qstart,qend,qwidth. + a qcenter, which is center of each qedge + KEYWORD: None""" + import numpy as np + + qcenter = np.linspace(qstart, qend, noqs) + # print ('the qcenter is: %s'%qcenter ) + qedge = np.zeros([noqs, 2]) + qedge[:, 0] = qcenter - (qwidth / 2) # +1 #render even value + qedge[:, 1] = qcenter + qwidth / 2 # render odd value + if not return_int: + return qedge, qcenter + else: + return np.int(qedge), np.int(qcenter) + + +def get_time_edge(tstart, tend, twidth, nots, return_int=False): + """Get time edge and time center by giving tstart, tend, twidth,nots + Return: + tedge: array, [ [ tedge1_start, tedge1_end], [ tedge2_start, tedge2_end], ... ] + tcenter: array, [tcenter1, tcenter2, ...] + if return_int = True, return tedge, tcenter in integer + """ + import numpy as np + + tcenter = np.linspace(tstart, tend, nots) + # print ('the qcenter is: %s'%qcenter ) + tedge = np.zeros([nots, 2]) + tedge[:, 0] = tcenter - (twidth / 2) # +1 #render even value + tedge[:, 1] = tcenter + twidth / 2 # render odd value + if not return_int: + return tedge, tcenter + else: + return np.int(tedge), np.int(tcenter) + + +def rotate_g12q_to_rectangle(g12q): + """ + Dec 16, 2015, Y.G.@CHX + Rotate anti clockwise 45 of a one-q two correlation function along diagonal to a masked array + the shape ( imgs_length, imgs_length ) of g12q will change to ( imgs_length, 2*imgs_length -1) + + + Parameters: + g12q: a 2-D array, one-q two correlation function, shape as ( imgs_length, imgs_length ) + + + Return: + g12qr: a masked 2-D array, shape as ( imgs_length, 2*imgs_length -1 ) + x-axis: taus, from 0 to imgs_length + y-axis: ages, from 0 to imgs_length( the middle of y) to 2imgs_length-1 (top) + One example: + g12qr = rotate_g12q_to_rectangle(g12bm[:,:,0] ) + """ + M, N = g12q.shape + g12qr = np.ma.empty((2 * N - 1, N)) + g12qr.mask = True + for i in range(N): + g12qr[i : (2 * N - 1 - i) : 2, i] = g12q.diagonal(i) + return g12qr + + +def get_aged_g2_from_g12(g12, age_edge, age_center): + """ + Dec 16, 2015, Y.G.@CHX + Get one-time correlation function of different age from two correlation function + namely, calculate the different aged mean of each diag line of g12 to get one-time correlation fucntion + + Parameters: + g12: a 3-D array, a two correlation function, shape as ( imgs_length, imgs_length, noqs ) + + Options: + slice_num: int, the slice number of the diagonal of g12 + slice_width: int, each slice width in unit of pixel + slice start: int, can start from 0 + slice end: int, can end at 2*imgs_length -1 + + + Return: + g2_aged: a dict, one time correlation function at different age + the keys of dict is ages in unit of pixel + dict[key]: + a two-D array, shape as ( imgs_length ), + a multi-q one-time correlation function + One example: + g2_aged = get_aged_g2_from_g12( g12, slice_num =3, slice_width= 500, + slice_start=4000, slice_end= 20000-4000 ) + """ + + m, n, noqs = g12.shape + g2_aged = {} + for q in range(noqs): + g12q = g12[:, :, q] + g2q_aged = get_aged_g2_from_g12q(g12q, age_edge, age_center) + if q == 0: + keys = list(g2q_aged.keys()) + for key in keys: + if q == 0: + g2_aged[key] = np.zeros([len(g2q_aged[key]), noqs]) + g2_aged[key][:, q] = g2q_aged[key] + # print( q, key ) + + return g2_aged + + +def get_aged_g2_from_g12q(g12q, age_edge, age_center=None, timeperframe=1, time_sampling="log", num_bufs=8): + """ + + + Revised at Octo 20, 2017, correct age, should be (t1+t2)/2, namely, age_edge will *2, age_center will keep same + + Revised at Sep 28, 2017 add time_sampling='log', num_bufs=8 options + Dec 16, 2015, Y.G.@CHX + Revised at April 19, 2017 + Get one-time correlation function of different age from 1q-two correlation function + namely, calculate the different aged mean of each diag line of g12 to get one-time correlation fucntion + + Parameters: + g12q: a 2-D array, one-q two correlation function, shape as ( imgs_length, imgs_length ) + age_edge, list, e.g., [[0, 500], [2249, 2749], [4500, 5000]] + can be obtained by function: + age_edge = create_time_slice( len(imgsa), slice_num= 3, slice_width= 500, edges = None ) + Options: + timeperframe: in unit of sec + age_center: None, will use the center of age_edge + time_sampling: 'log', log sampling of taus; Or 'linear', namely, delta tau = 1 + num_bufs: 8, buf number for log sampling tau + + + Return: + + g2_aged: a dict, one time correlation function at different age + the keys of dict is ages in unit of pixel + dict[key]: + a 1-D array, shape as ( imgs_length ), + a one-q one-time correlation function + One example: + g2_aged = get_aged_g2_from_g12q( g12q, age_edge ) + """ + + arr = rotate_g12q_to_rectangle(g12q) + m, n = arr.shape # m should be 2*n-1 + # age_edge, age_center = get_qedge( qstart=slice_start,qend= slice_end, + # qwidth = slice_width, noqs =slice_num ) + # print(arr.shape) + age_edge = np.int_(age_edge) + if age_center is None: + age_center = (age_edge[:, 0] + age_edge[:, 1]) // 2 + + age_edge_ = age_edge * 2 + age_center_ = age_center * timeperframe + g2_aged = {} + lag_dict = {} + # print( age_edge, age_center) + for i, age in enumerate(age_center_): + age_edges_0, age_edges_1 = age_edge_[i][0], age_edge_[i][1] + # print(i, age, age_edges_0, age_edges_1) + g2i = arr[age_edges_0:age_edges_1].mean(axis=0) + # print('here') + g2i_ = np.array(g2i) + g2_aged[age] = g2i_[np.nonzero(g2i_)[0]] + N = len(g2_aged[age]) + lag_dict[age] = np.arange(N) * 1.0 + if time_sampling == "log": + num_levels = int(np.log(N / (num_bufs - 1)) / np.log(2) + 1) + 1 + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + # max_taus= lag_steps[age].max() + lag_steps_ = lag_steps[lag_steps < N] + # print(i, age, lag_steps, N, lag_steps_, len(g2_aged[age])) + g2_aged[age] = g2_aged[age][lag_steps_] + lag_dict[age] = lag_steps_ * 1.0 + # print( lag_dict[age] ) + lag_dict[age] *= timeperframe + + return lag_dict, g2_aged + + +def get_aged_g2_from_g12q2(g12q, slice_num=6, slice_width=5, slice_start=0, slice_end=1): + """ + Dec 16, 2015, Y.G.@CHX + Get one-time correlation function of different age from two correlation function + namely, calculate the different aged mean of each diag line of g12 to get one-time correlation fucntion + + Parameters: + g12q: a 2-D array, one-q two correlation function, shape as ( imgs_length, imgs_length ) + + Options: + slice_num: int, the slice number of the diagonal of g12 + slice_width: int, each slice width in unit of pixel + slice start: int, can start from 0 + slice end: int, can end at 2*imgs_length -1 + + + Return: + g2_aged: a dict, one time correlation function at different age + the keys of dict is ages in unit of pixel + dict[key]: + a 1-D array, shape as ( imgs_length ), + a one-q one-time correlation function + One example: + g2_aged = get_aged_g2_from_g12q( g12q, slice_num =3, slice_width= 500, + slice_start=4000, slice_end= 20000-4000 ) + """ + + arr = rotate_g12q_to_rectangle(g12q) + m, n = arr.shape # m should be 2*n-1 + age_edge, age_center = get_qedge(qstart=slice_start, qend=slice_end, qwidth=slice_width, noqs=slice_num) + age_edge, age_center = np.int_(age_edge), np.int_(age_center) + # print (age_edge, age_center) + g2_aged = {} + for i, age in enumerate(age_center): + age_edges_0, age_edges_1 = age_edge[i * 2 : 2 * i + 2] + g2i = arr[age_edges_0:age_edges_1].mean(axis=0) + g2i_ = np.array(g2i) + g2_aged[age] = g2i_[np.nonzero(g2i_)[0]] + + return g2_aged + + +def show_g12q_aged_g2( + g12q, + g2_aged, + taus_aged=None, + slice_width=10, + timeperframe=1, + vmin=1, + vmax=1.25, + save=True, + uid="uid", + path="", + *argv, + **kwargs +): + """ + Octo 20, 2017, add taus_aged option + + Dec 16, 2015, Y.G.@CHX + Plot one-time correlation function of different age with two correlation function + Parameters: + g12q: a 2-D array, one-q two correlation function, shape as ( imgs_length, imgs_length ) + tau_aged: a dict, taus for different age + g2_aged: a dict, one time correlation function at different age + obtained by: for example, + g2_aged = get_aged_g2_from_g12q( g12q, slice_num =3, slice_width= 500, + slice_start=4000, slice_end= 20000-4000 ) + the keys of dict is ages in unit of pixel + dict[key]: + a 1-D array, shape as ( imgs_length ), + a one-q one-time correlation function + + Options: + slice_width: int, each slice width in unit of pixel, for line width of a plot + timeperframe: float, time per frame for axis unit + vmin, float, matplot vmin + vmax, float, matplot vmax + + Return: + two plots, one for the two-time correlation, g12q, + + One example: + show_g12q_aged_g2( g12q, g2_aged,timeperframe=1,vmin= 1, vmax= 1.22 ) + """ + + age_center = np.array(list(sorted(g2_aged.keys()))) + print("the cut age centers are: " + str(age_center)) + age_center = np.int_(np.array(list(sorted(g2_aged.keys()))) / timeperframe) * 2 # in pixel + M, N = g12q.shape + + # fig, ax = plt.subplots( figsize = (8,8) ) + + figw = 10 + figh = 8 + fig = plt.figure(figsize=(figw, figh)) + + # gs = gridspec.GridSpec(1, 2, width_ratios=[10, 8],height_ratios=[8,8] ) + gs = gridspec.GridSpec(1, 2) + ax = plt.subplot(gs[0]) + im = imshow(ax, g12q, origin="lower", cmap="viridis", norm=LogNorm(vmin, vmax), extent=[0, N, 0, N]) + + # plt.gca().set_xticks(ticks) + ticks = np.round(plt.gca().get_xticks() * timeperframe, 2) + # print( ticks ) + ax.set_xticklabels(ticks) + ax.set_yticklabels(ticks) + # plt.xticks(ticks, fontsize=9) + + # ), extent=[0, g12q.shape[0]*timeperframe, 0, g12q.shape[0]*timeperframe ] ) + + ax1 = plt.subplot(gs[1]) + linS1 = [[0] * len(age_center), np.int_(age_center - slice_width // 2)] + linS2 = [[0] * len(age_center), np.int_(age_center + slice_width // 2)] + linE1 = [np.int_(age_center - slice_width // 2), [0] * len(age_center)] + linE2 = [np.int_(age_center + slice_width // 2), [0] * len(age_center)] + linC = [[0] * len(age_center), np.int_(age_center)] + + for i in range(len(age_center)): + ps = linS1[1][i] + pe = linE1[0][i] + if ps >= N: + s0 = ps - N + s1 = N + else: + s0 = 0 + s1 = ps + e0 = s1 + e1 = s0 + # if pe>=N:e0=N;e1=pe - N + # else:e0=pe;e1=0 + + ps = linS2[1][i] + pe = linE2[0][i] + if ps >= N: + S0 = ps - N + S1 = N + else: + S0 = 0 + S1 = ps + # if pe>=N:e0=N;E1=pe - N + # else:E0=pe;E1=0 + E0 = S1 + E1 = S0 + + ps = linC[1][i] + if ps >= N: + C0 = ps - N + C1 = N + else: + C0 = 0 + C1 = ps + # if pe>=N:e0=N;E1=pe - N + # else:E0=pe;E1=0 + D0 = C1 + D1 = C0 + + lined = slice_width / 2.0 # in data width + linewidthc = (lined * (figh * 72.0 / N)) * 0.5 + # print( s0,e0, s1,e1, S0,E0, S1, E1) + + # lined= slice_width/2. #in data width + # linewidth= (lined * (figh*72./N)) * 0.8 + linewidth = 1 + ax.plot([s0, e0], [s1, e1], linewidth=linewidth, ls="--", alpha=1, color=colors_array[i]) + ax.plot([S0, E0], [S1, E1], linewidth=linewidth, ls="--", alpha=1, color=colors_array[i]) + # print( i, [s0,e0],[s1,e1], [S0,E0],[S1,E1], colors_array[i] ) + ax.plot([C0, D0], [C1, D1], linewidth=linewidthc, ls="-", alpha=0.0, color=colors_array[i]) + + # ax.set_title( '%s_frames'%(N) ) + ax.set_title("%s_two_time" % uid) + ax.set_xlabel(r"$t_1$ $(s)$", fontsize=18) + ax.set_ylabel(r"$t_2$ $(s)$", fontsize=18) + fig.colorbar(im) + ax1.set_title("%s_aged_g2" % uid) + ki = 0 + for i in sorted(g2_aged.keys()): + # ax = fig.add_subplot(sx,sy,sn+1 ) + if taus_aged is None: + gx = np.arange(len(g2_aged[i])) * timeperframe + else: + gx = taus_aged[i] + # marker = next(markers) + # print( g2_aged[i], marker ) + # print(i) + ax1.plot( + gx, + g2_aged[i], + marker="%s" % markers_array[ki], + ls="-", + color=colors_array[ki], + label=r"$t_a= %.1f s$" % i, + ) + # print( i, ki, colors_array[ki] ) + ki += 1 + ax1.set_ylim(vmin, vmax) + ax1.set_xlabel(r"$\tau $ $(s)$", fontsize=18) + ax1.set_ylabel("g2") + ax1.set_xscale("log") + ax1.legend(fontsize="small", loc="best") + if save: + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "%s_aged_g2" % uid + ".png" + # print( fp ) + fig.savefig(fp, dpi=fig.dpi) + + # plt.show() + + +def plot_aged_g2(g2_aged, tau=None, timeperframe=1, ylim=None, xlim=None): + """'A plot of g2 calculated from two-time""" + fig = plt.figure(figsize=(8, 10)) + age_center = list(sorted(g2_aged.keys())) + gs = gridspec.GridSpec(len(age_center), 1) + for n, i in enumerate(age_center): + ax = plt.subplot(gs[n]) + if tau is None: + gx = np.arange(len(g2_aged[i])) * timeperframe + else: + gx = tau[i] + marker = markers[n] + c = colors[n] + ax.plot(gx, g2_aged[i], "-%s" % marker, c=c, label=r"$age= %.1f s$" % (i * timeperframe)) + ax.set_xscale("log") + ax.legend(fontsize="large", loc="best") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=18) + ax.set_ylabel("g2") + if ylim is not None: + ax.set_ylim(ylim) + if xlim is not None: + ax.set_ylim(xlim) + + +##################################### +# get fout-time + + +def get_tau_from_g12q(g12q, slice_num=6, slice_width=1, slice_start=None, slice_end=None): + """ + Dec 16, 2015, Y.G.@CHX + Get tau lines from two correlation function + namely, get diag line of g12 as a function of ages + + Parameters: + g12q: a 2-D array, one-q two correlation function, shape as ( imgs_length, imgs_length ) + + Options: + slice_num: int, the slice number of the diagonal of g12 + slice_width: int, each slice width in unit of pixel + slice start: int, can start from 0 + slice end: int, can end at imgs_length -1 + + + Return: + return: tau, a dict, tau lines + the keys of dict is tau(slice center) in unit of pixel + dict[key]: + a 1-D array, shape as ( tau_line-length ), + + One example: + taus = get_aged_g2_from_g12q( g12q, slice_num =3, slice_width= 500, + slice_start=4000, slice_end= 20000-4000 ) + """ + + arr = rotate_g12q_to_rectangle(g12q) + m, n = arr.shape # m should be 2*n-1 + + age_edge, age_center = get_qedge(qstart=slice_start, qend=slice_end, qwidth=slice_width, noqs=slice_num) + age_edge, age_center = np.int_(age_edge), np.int_(age_center) + # print (age_edge, age_center) + tau = {} + for i, age in enumerate(age_center): + age_edges_0, age_edges_1 = age_edge[i * 2 : 2 * i + 2] + # print (age_edges_0, age_edges_1) + g2i = arr[:, age_edges_0:age_edges_1].mean(axis=1) + g2i_ = np.array(g2i) + tau[age] = g2i_[np.nonzero(g2i_)[0]] + + return tau + + +def show_g12q_taus(g12q, taus, slice_width=10, timeperframe=1, vmin=1, vmax=1.25): + """ + Dec 16, 2015, Y.G.@CHX + Plot tau-lines as a function of age with two correlation function + + + Parameters: + g12q: a 2-D array, one-q two correlation function, shape as ( imgs_length, imgs_length ) + tau, a dict, tau lines + the keys of dict is tau(slice center) in unit of pixel + dict[key]: + a 1-D array, shape as ( tau_line-length ), + obtained by: for example, + taus = get_tau_from_g12q( g12b_norm[:,:,0], slice_num = 5, slice_width=1, + slice_start=3, slice_end= 5000-1 )) + + + Options: + slice_width: int, each slice width in unit of pixel, for line width of a plot + timeperframe: float, time per frame for axis unit + vmin, float, matplot vmin + vmax, float, matplot vmax + + Return: + two plots, one for tau lines~ages, g12q, + + One example: + show_g12q_taus( g12b_norm[:,:,0], taus, slice_width=50, + timeperframe=1,vmin=1.01,vmax=1.55 ) + """ + + age_center = list(taus.keys()) + print("the cut tau centers are: " + str(age_center)) + M, N = g12q.shape + + # fig, ax = plt.subplots( figsize = (8,8) ) + + figw = 10 + figh = 10 + fig = plt.figure(figsize=(figw, figh)) + + gs = gridspec.GridSpec(1, 2, width_ratios=[10, 8], height_ratios=[8, 8]) + ax = plt.subplot(gs[0]) + ax1 = plt.subplot(gs[1]) + im = imshow(ax, g12q, origin="lower", cmap="viridis", norm=LogNorm(vmin=vmin, vmax=vmax), extent=[0, N, 0, N]) + + linS = [] + linE = [] + linS.append(zip(np.int_(age_center) - 1, [0] * len(age_center))) + linE.append(zip([N - 1] * len(age_center), N - np.int_(age_center))) + for i, [ps, pe] in enumerate(zip(linS[0], linE[0])): + lined = slice_width # /2. *draw_scale_tau #in data width + linewidth = (lined * (figh * 72.0 / N)) * 0.8 + # print (ps,pe) + ax.plot([ps[0], pe[0]], [ps[1], pe[1]], linewidth=linewidth) # , color= ) + + ax.set_title("%s_frames" % (N)) + ax.set_xlabel(r"$t_1$ $(s)$", fontsize=18) + ax.set_ylabel(r"$t_2$ $(s)$", fontsize=18) + fig.colorbar(im) + + ax1.set_title("Tau_Cuts_in_G12") + for i in sorted(taus.keys()): + gx = np.arange(len(taus[i])) * timeperframe + marker = next(markers) + ax1.plot(gx, taus[i], "-%s" % marker, label=r"$tau= %.1f s$" % (i * timeperframe)) + ax1.set_ylim(vmin, vmax) + ax1.set_xlabel(r"$t (s)$", fontsize=5) + ax1.set_ylabel("g2") + ax1.set_xscale("log") + ax1.legend(fontsize="small", loc="best") + # plt.show() + + +def histogram_taus(taus, hisbin=20, plot=True, timeperframe=1): + """ + Dec 16, 2015, Y.G.@CHX + Do histogram and plot of tau-lines + + + Parameters: + taus, a dict, tau lines + the keys of dict is tau(slice center) in unit of pixel + dict[key]: + a 1-D array, shape as ( tau_line-length ), + obtained by: for example, + taus = get_tau_from_g12q( g12b_norm[:,:,0], slice_num = 5, slice_width=1, + slice_start=3, slice_end= 5000-1 )) + + Options: + bins: int, bins number for the histogram + plot: if True, show the histogram plot + timeperframe: float, time per frame for axis unit + + + Return: + his: a dict, his[key], the histogram of tau-lines + if plot, plot the histogram of tau-lines + + One example: + his = histogram_taus(taus, hisbin=30, plot=True, timeperframe=timeperframe) + """ + + his = {} + for key in list(taus.keys()): + his[key] = np.histogram(taus[key], bins=hisbin) + + if plot: + fig, ax1 = plt.subplots(figsize=(8, 8)) + ax1.set_title("Tau_histgram") + for key in sorted(his.keys()): + tx = 0.5 * (his[key][1][:-1] + his[key][1][1:]) + marker = next(markers) + ax1.plot(tx, his[key][0], "-%s" % marker, label=r"$tau= %.1f s$" % (key * timeperframe)) + # ax1.set_ylim( 1.05,1.35 ) + ax1.set_xlim(1.05, 1.35) + ax1.set_xlabel(r"$g_2$", fontsize=19) + ax1.set_ylabel(r"histgram of g2 @ tau", fontsize=15) + # ax1.set_xscale('log') + ax1.legend(fontsize="large", loc="best") + # plt.show() + + return his + + +##################################### +# get one-time +##################################### + + +def get_one_time_from_two_time_old(g12, norms=None, nopr=None): + """ + Dec 16, 2015, Y.G.@CHX + Get one-time correlation function from two correlation function + namely, calculate the mean of each diag line of g12 to get one-time correlation fucntion + + Parameters: + g12: a 3-D array, two correlation function, shape as ( imgs_length, imgs_length, q) + + Options: + norms: if not None, a 2-D array, shape as ( imgs_length, q), a normalization for further get one-time from two time, get by: g12b_norm, g12b_not_norm, norms = auto_two_Array_g1_norm( imgsr, ring_mask, data_pixel = data_pixel ) + nopr: if not None, 1-D array, shape as [q], the number of interested pixel of each q + + + Return: + g2f12: a 2-D array, shape as ( imgs_length, q), + a one-time correlation function + + One example: + g2b_norm = get_one_time_from_two_time(g12b_norm, norms=None, nopr=None ) + g2b_not_norm = get_one_time_from_two_time(g12b_not_norm, norms=norms, nopr=nopr) + """ + + m, n, noqs = g12.shape + g2f12 = np.zeros([m, noqs]) + for q in range(noqs): + y = g12[:, :, q] + for tau in range(m): + if norms is None: + g2f12[tau, q] = np.nanmean(np.diag(y, k=int(tau))) + else: + yn = norms[:, q] + yn1 = np.average(yn[tau:]) + yn2 = np.average(yn[: m - tau]) + g2f12[tau, q] = np.nanmean(np.diag(y, k=int(tau))) / (yn1 * yn2 * nopr[q]) + + return g2f12 + + +def get_one_time_from_two_time(g12, norms=None, nopr=None): + """ + Dec 16, 2015, Y.G.@CHX + Get one-time correlation function from two correlation function + namely, calculate the mean of each diag line of g12 to get one-time correlation fucntion + + Parameters: + g12: a 3-D array, two correlation function, shape as ( imgs_length, imgs_length, q) + + Options: + norms: if not None, a 2-D array, shape as ( imgs_length, q), a normalization for further get one-time from two time, get by: g12b_norm, g12b_not_norm, norms = auto_two_Array_g1_norm( imgsr, ring_mask, data_pixel = data_pixel ) + nopr: if not None, 1-D array, shape as [q], the number of interested pixel of each q + + + Return: + g2f12: a 2-D array, shape as ( imgs_length, q), + a one-time correlation function + + One example: + g2b_norm = get_one_time_from_two_time(g12b_norm, norms=None, nopr=None ) + g2b_not_norm = get_one_time_from_two_time(g12b_not_norm, norms=norms, nopr=nopr) + """ + + m, n, noqs = g12.shape + if norms is None: + g2f12 = np.array([np.nanmean(g12.diagonal(i), axis=1) for i in range(m)]) + else: + g2f12 = np.zeros([m, noqs]) + for q in range(noqs): + yn = norms[:, q] + g2f12[i, q] = np.array( + [ + np.nanmean(g12[:, :, q].diagonal(i)) / (np.average(yn[i:]) * np.average(yn[: m - i]) * nopr[q]) + for i in range(m) + ] + ) + return g2f12 + + +def get_four_time_from_two_time(g12, g2=None, rois=None): + """ + Dec 16, 2015, Y.G.@CHX + Get four-time correlation function from two correlation function + namely, calculate the deviation of each diag line of g12 to get four-time correlation fucntion + TOBEDONE: deal with bad frames + + Parameters: + g12: a 3-D array, two correlation function, shape as ( imgs_length, imgs_length, q) + + Options: + g2: if not None, a 2-D array, shape as ( imgs_length, q), or (tau, q) + one-time correlation fucntion, for normalization of the four-time + rois: if not None, a list, [x-slice-start, x-slice-end, y-slice-start, y-slice-end] + + Return: + g4f12: a 2-D array, shape as ( imgs_length, q), + a four-time correlation function + + One example: + s1,s2 = 0,2000 + g4 = get_four_time_from_two_time( g12bm, g2b, roi=[s1,s2,s1,s2] ) + + """ + m, n, noqs = g12.shape + if g2 is not None: + norm = (g2[0] - 1) ** 2 + else: + norm = 1.0 + if rois is None: + g4f12 = np.array([(np.nanstd(g12.diagonal(i), axis=1)) ** 2 / norm for i in range(m)]) + + else: + x1, x2, y1, y2 = rois + g4f12 = np.array([(np.nanstd(g12[x1:x2, y1:y2, :].diagonal(i), axis=1)) ** 2 / norm for i in range(m)]) + + return g4f12 + + +###### +def make_g12_mask(badframes_list, g12_shape): + """ + Dec 16, 2015, Y.G.@CHX + make g12 mask by badlines + + Parameters: + badframes_list: list, contains the bad frame number, like [100, 155, 10000] + g12_shape: the shape of one-q two correlation function, shape as ( imgs_length, imgs_length ) + Return: + g12_mask: a 2-D array, shape as ( imgs_length, imgs_length ) + + + One example: + g12_mask = make_g12_mask(bad_frames, g12b[:,:,0].shape) + + """ + + m, n = g12_shape + # g12_mask = np.ma.empty( ( m,n ) ) + g12_mask = np.ma.ones((m, n)) + g12_mask.mask = False + for bdl in badframes_list: + g12_mask.mask[:, bdl] = True + g12_mask.mask[bdl, :] = True + return g12_mask + + +def masked_g12(g12, badframes_list): + """ + Dec 16, 2015, Y.G.@CHX + make masked g12 with mask defined by badframes_list + + + Parameters: + g12: a 3-D array, two correlation function, shape as ( imgs_length, imgs_length, q) + badframes_list: list, contains the bad frame number, like [100, 155, 10000] + + Return: + g12m: a masked 3-D array, shape as same as g12, ( imgs_length, imgs_length, q ) + + + One example: + g12m = masked_g12( g12b, bad_frames) + + """ + + m, n, qs = g12.shape + g12m = np.ma.empty_like(g12) + g12_mask = make_g12_mask(badframes_list, g12[:, :, 0].shape) + + for i in range(qs): + g12m[:, :, i] = g12[:, :, i] * g12_mask + return g12m + + +def show_one_C12( + C12, fig_ax=None, return_fig=False, interpolation="none", cmap="viridis", show_colorbar=True, *argv, **kwargs +): + """ + plot one-q of two-time correlation function + C12: two-time correlation function, with shape as [ time, time, qs] + q_ind: if integer, for a SAXS q, the nth of q to be plotted + if a list: for a GiSAXS [qz_ind, qr_ind] + kwargs: support + timeperframe: the time interval + N1: the start frame(time) + N2: the end frame(time) + vmin/vmax: for plot + title: if True, show the tile + + e.g., + show_C12(g12b, q_ind=1, N1=0, N2=500, vmin=1.05, vmax=1.07, ) + + """ + + # strs = [ 'timeperframe', 'N1', 'N2', 'vmin', 'vmax', 'title'] + + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + + shape = C12.shape + + if "timeperframe" in kwargs.keys(): + timeperframe = kwargs["timeperframe"] + else: + timeperframe = 1 + + if "vmin" in kwargs.keys(): + vmin = kwargs["vmin"] + else: + vmin = 1 + if "vmax" in kwargs.keys(): + vmax = kwargs["vmax"] + else: + vmax = 1.05 + + if "N1" in kwargs.keys(): + N1 = kwargs["N1"] + else: + N1 = 0 + + if "N2" in kwargs.keys(): + N2 = kwargs["N2"] + else: + N2 = shape[0] + if "title" in kwargs.keys(): + title = kwargs["title"] + else: + title = True + + data = C12[N1:N2, N1:N2] + if fig_ax is None: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots() + else: + fig, ax = fig_ax + im = imshow( + ax, + data, + origin="lower", + cmap=cmap, + norm=LogNorm(vmin, vmax), + extent=[0, data.shape[0] * timeperframe, 0, data.shape[0] * timeperframe], + interpolation=interpolation, + ) + if title: + tit = "%s-[%s-%s] frames" % (uid, N1, N2) + + ax.set_title(tit) + else: + tit = "" + # ax.set_title('%s-%s frames--Qth= %s'%(N1,N2,g12_num)) + ax.set_xlabel(r"$t_1$ $(s)$", fontsize=18) + ax.set_ylabel(r"$t_2$ $(s)$", fontsize=18) + if show_colorbar: + fig.colorbar(im) + + save = False + if "save" in kwargs: + save = kwargs["save"] + if save: + path = kwargs["path"] + # fp = path + 'Two-time--uid=%s'%(uid) + tit + CurTime + '.png' + fp = path + "%s_Two_time" % (uid) + ".png" + plt.savefig(fp, dpi=fig.dpi) + + if return_fig: + return fig, ax, im + + +def show_C12( + C12, + fig_ax=None, + q_ind=1, + return_fig=False, + interpolation="none", + cmap="viridis", + logs=True, + qlabel=None, + show_colorbar=True, + *argv, + **kwargs +): + """ + plot one-q of two-time correlation function + C12: two-time correlation function, with shape as [ time, time, qs] + q_ind: if integer, for a SAXS q, the nth of q to be plotted, starting from 1, + if a list: for a GiSAXS [qz_ind, qr_ind] + kwargs: support + timeperframe: the time interval + N1: the start frame(time) + N2: the end frame(time) + vmin/vmax: for plot + title: if True, show the tile + + e.g., + show_C12(g12b, q_ind=1, N1=0, N2=500, vmin=1.05, vmax=1.07, ) + + """ + + # strs = [ 'timeperframe', 'N1', 'N2', 'vmin', 'vmax', 'title'] + + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + shape = C12.shape + if (q_ind < 1) or (q_ind > shape[2]): + raise Exceptions( + "Error: qind starts from 1 (corresponding to python array index 0, but in the plot it will show as 1) to the max Q-length of two time funcs %s." + % shape[2] + ) + + if isinstance(q_ind, int): + C12_num = q_ind - 1 + else: + qz_ind, qr_ind = q_ind - 1 + C12_num = qz_ind * num_qr + qr_ind + + if "timeperframe" in kwargs.keys(): + timeperframe = kwargs["timeperframe"] + else: + timeperframe = 1 + + if "timeoffset" in kwargs.keys(): ### added timeoffset here + timeoffset = kwargs["timeoffset"] + else: + timeoffset = 0 + + if "vmin" in kwargs.keys(): + vmin = kwargs["vmin"] + else: + vmin = 1 + if "vmax" in kwargs.keys(): + vmax = kwargs["vmax"] + else: + vmax = 1.05 + + if "N1" in kwargs.keys(): + N1 = kwargs["N1"] + if N1 < 0: + N1 = 0 + else: + N1 = 0 + + if "N2" in kwargs.keys(): + N2 = kwargs["N2"] + if N2 > shape[0]: + N2 = shape[0] + else: + N2 = shape[0] + if "title" in kwargs.keys(): + title = kwargs["title"] + else: + title = True + + data = C12[N1:N2, N1:N2, C12_num] + if fig_ax is None: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots() + else: + fig, ax = fig_ax + + # extent=[0, data.shape[0]*timeperframe, 0, data.shape[0]*timeperframe ] + extent = np.array([N1, N2, N1, N2]) * timeperframe + timeoffset ### added timeoffset to extend + + if logs: + im = imshow( + ax, + data, + origin="lower", + cmap=cmap, + norm=LogNorm(vmin, vmax), + interpolation=interpolation, + extent=extent, + ) + else: + im = imshow( + ax, data, origin="lower", cmap=cmap, vmin=vmin, vmax=vmax, interpolation=interpolation, extent=extent + ) + if qlabel is not None: + if isinstance(q_ind, int): + qstr = "Qth= %s-qval=%s" % (C12_num + 1, qlabel[C12_num]) + else: + qstr = "Qth= %s" % (C12_num + 1) + if title: + if isinstance(q_ind, int): + tit = "%s-[%s-%s] frames--" % (uid, N1, N2) + qstr + else: + tit = "%s-[%s-%s] frames--Qzth= %s--Qrth= %s" % (uid, N1, N2, qz_ind, qr_ind) + ax.set_title(tit) + else: + tit = "" + # ax.set_title('%s-%s frames--Qth= %s'%(N1,N2,g12_num)) + ax.set_xlabel(r"$t_1$ $(s)$", fontsize=18) + ax.set_ylabel(r"$t_2$ $(s)$", fontsize=18) + if show_colorbar: + fig.colorbar(im) + + save = False + if "save" in kwargs: + save = kwargs["save"] + if save: + path = kwargs["path"] + # fp = path + 'Two-time--uid=%s'%(uid) + tit + CurTime + '.png' + fp = path + "%s_Two_time" % (uid) + ".png" + plt.savefig(fp, dpi=fig.dpi) + + if return_fig: + return fig, ax, im + + +class Exceptions(Exception): + pass diff --git a/pyCHX/backups/pyCHX-backup/XPCS_GiSAXS.py b/pyCHX/backups/pyCHX-backup/XPCS_GiSAXS.py new file mode 100644 index 0000000..a2feda5 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/XPCS_GiSAXS.py @@ -0,0 +1,2596 @@ +""" +Dec 10, 2015 Developed by Y.G.@CHX +yuzhang@bnl.gov +This module is for the GiSAXS XPCS analysis +""" + + + +from skbeam.core.accumulators.binned_statistic import BinnedStatistic1D, BinnedStatistic2D + +from pyCHX.chx_compress import ( + Multifile, + compress_eigerdata, + get_avg_imgc, + init_compress_eigerdata, + read_compressed_eigerdata, +) +from pyCHX.chx_correlationc import cal_g2c +from pyCHX.chx_generic_functions import * +from pyCHX.chx_libs import colors, colors_, markers, markers_ + + +def get_gisaxs_roi2(qr_edge, qz_edge, qr_map, qz_map, mask=None, qval_dict=None): + """Y.G. 2019 Feb 12 + Get xpcs roi of gisaxs by giving Qr centers/edges, Qz centers/edges + Parameters: + qr_edge: list, e.g., [ [0.01,0.02], [0.03,0.04] ]. + each elment has two values for the start and end of one qr edge + qz_edge: list, e.g., [ [0.01,0.02], [0.03,0.04] ] + each elment has two values for the start and end of one qz edge + qr_map: two-d array, the same shape as gisaxs frame, a qr map + qz_map: two-d array, the same shape as gisaxs frame, a qz map + mask: array, the scattering mask + qval_dict: a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + if not None, the new returned qval_dict will include the old one + + Return: + roi_mask: array, the same shape as gisaxs frame, the label array of roi + qval_dict, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + """ + + # qr_edge, qr_center = get_qedge( *Qr ) + # qz_edge, qz_center = get_qedge( *Qz ) + qr_edge, qz_edge = np.array(qr_edge), np.array(qz_edge) + qr_center = 0.5 * (qr_edge[:, 0] + qr_edge[:, 1]) + qz_center = 0.5 * (qz_edge[:, 0] + qz_edge[:, 1]) + label_array_qz = get_qmap_label(qz_map, qz_edge) + label_array_qr = get_qmap_label(qr_map, qr_edge) + label_array_qzr, qzc, qrc = get_qzrmap(label_array_qz, label_array_qr, qz_center, qr_center) + labels_qzr, indices_qzr = roi.extract_label_indices(label_array_qzr) + labels_qz, indices_qz = roi.extract_label_indices(label_array_qz) + labels_qr, indices_qr = roi.extract_label_indices(label_array_qr) + if mask is None: + mask = 1 + roi_mask = label_array_qzr * mask + qval_dict = get_qval_dict(np.round(qr_center, 5), np.round(qz_center, 5), qval_dict=qval_dict) + return roi_mask, qval_dict + + +def get_gisaxs_roi(Qr, Qz, qr_map, qz_map, mask=None, qval_dict=None): + """Y.G. 2016 Dec 31 + Get xpcs roi of gisaxs + Parameters: + Qr: list, = [qr_start , qr_end, qr_width, qr_num], corresponding to qr start, qr end, qr width, qr number + Qz: list, = [qz_start , qz_end, qz_width, qz_num], corresponding to qz start, qz end, qz width, qz number + qr_map: two-d array, the same shape as gisaxs frame, a qr map + qz_map: two-d array, the same shape as gisaxs frame, a qz map + mask: array, the scattering mask + qval_dict: a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + if not None, the new returned qval_dict will include the old one + + Return: + roi_mask: array, the same shape as gisaxs frame, the label array of roi + qval_dict, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + """ + + qr_edge, qr_center = get_qedge(*Qr) + qz_edge, qz_center = get_qedge(*Qz) + label_array_qz = get_qmap_label(qz_map, qz_edge) + label_array_qr = get_qmap_label(qr_map, qr_edge) + label_array_qzr, qzc, qrc = get_qzrmap(label_array_qz, label_array_qr, qz_center, qr_center) + labels_qzr, indices_qzr = roi.extract_label_indices(label_array_qzr) + labels_qz, indices_qz = roi.extract_label_indices(label_array_qz) + labels_qr, indices_qr = roi.extract_label_indices(label_array_qr) + if mask is None: + mask = 1 + roi_mask = label_array_qzr * mask + qval_dict = get_qval_dict(np.round(qr_center, 5), np.round(qz_center, 5), qval_dict=qval_dict) + return roi_mask, qval_dict + + +############ +##developed at Octo 11, 2016 +def get_qr(data, Qr, Qz, qr, qz, mask=None): + """Octo 12, 2016, Y.G.@CHX + plot one-d of I(q) as a function of qr for different qz + + data: a image/Eiger frame + Qr: info for qr, = qr_start , qr_end, qr_width, qr_num + Qz: info for qz, = qz_start, qz_end, qz_width , qz_num + qr: qr-map + qz: qz-map + mask: a mask for qr-1d integration, default is None + Return: qr_1d, a dataframe, with columns as qr1, qz1 (float value), qr2, qz2,.... + + Examples: + #to make two-qz, from 0.018 to 0.046, width as 0.008, + qz_width = 0.008 + qz_start = 0.018 + qz_width/2 + qz_end = 0.046 - qz_width/2 + qz_num= 2 + #to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 + qr_width = 0.1-0.02 + qr_start = 0.02 + qr_width /2 + qr_end = 0.01 - qr_width /2 + qr_num = 1 + Qr = [qr_start , qr_end, qr_width, qr_num] + Qz= [qz_start, qz_end, qz_width , qz_num ] + new_mask[ :, 1020:1045] =0 + ticks = show_qzr_map( qr,qz, inc_x0, data = avg_imgmr, Nzline=10, Nrline=10 ) + qx, qy, qr, qz = convert_gisaxs_pixel_to_q( inc_x0, inc_y0,refl_x0,refl_y0, lamda=lamda, Lsd=Lsd ) + qr_1d = get_qr( avg_imgr, Qr, Qz, qr, qz, new_mask) + + """ + + qr_start, qr_end, qr_width, qr_num = Qr + qz_start, qz_end, qz_width, qz_num = Qz + qr_edge, qr_center = get_qedge(qr_start, qr_end, qr_width, qr_num) + qz_edge, qz_center = get_qedge(qz_start, qz_end, qz_width, qz_num) + label_array_qr = get_qmap_label(qr, qr_edge) + # qr_1d ={} + # columns=[] + + for i, qzc_ in enumerate(qz_center): + # print (i,qzc_) + label_array_qz = get_qmap_label(qz, qz_edge[i * 2 : 2 * i + 2]) + # print (qzc_, qz_edge[i*2:2*i+2]) + label_array_qzr, qzc, qrc = get_qzrmap(label_array_qz, label_array_qr, qz_center, qr_center) + # print (np.unique(label_array_qzr )) + if mask is not None: + label_array_qzr *= mask + roi_pixel_num = np.sum(label_array_qzr, axis=0) + qr_ = qr * label_array_qzr + data_ = data * label_array_qzr + qr_ave = np.sum(qr_, axis=0) / roi_pixel_num + data_ave = np.sum(data_, axis=0) / roi_pixel_num + qr_ave, data_ave = zip(*sorted(zip(*[qr_ave[~np.isnan(qr_ave)], data_ave[~np.isnan(data_ave)]]))) + if i == 0: + N_interp = len(qr_ave) + + qr_ave_intp = np.linspace(np.min(qr_ave), np.max(qr_ave), N_interp) + data_ave = np.interp(qr_ave_intp, qr_ave, data_ave) + # columns.append( ['qr%s'%i, str(round(qzc_,4))] ) + if i == 0: + df = np.hstack([(qr_ave_intp).reshape(N_interp, 1), data_ave.reshape(N_interp, 1)]) + else: + df = np.hstack([df, (qr_ave_intp).reshape(N_interp, 1), data_ave.reshape(N_interp, 1)]) + # df = DataFrame( df ) + # df.columns = np.concatenate( columns ) + + return df + + +######################## +# get one-d of I(q) as a function of qr for different qz +##################### + + +def cal_1d_qr( + data, + Qr, + Qz, + qr, + qz, + inc_x0=None, + mask=None, + path=None, + uid=None, + setup_pargs=None, + save=True, + print_save_message=True, +): + """Revised at July 18, 2017 by YG, to correct a divide by zero bug + Dec 16, 2016, Y.G.@CHX + calculate one-d of I(q) as a function of qr for different qz + data: a dataframe + Qr: info for qr, = qr_start , qr_end, qr_width, qr_num, the purpose of Qr is only for the defination of qr range (qr number does not matter) + Qz: info for qz, = qz_start, qz_end, qz_width , qz_num + qr: qr-map + qz: qz-map + inc_x0: x-center of incident beam + mask: a mask for qr-1d integration + setup_pargs: gives path, filename... + + Return: qr_1d, a dataframe, with columns as qr1, qz1 (float value), qz2,.... + Plot 1D cureve as a function of Qr for each Qz + + Examples: + #to make two-qz, from 0.018 to 0.046, width as 0.008, + qz_width = 0.008 + qz_start = 0.018 + qz_width/2 + qz_end = 0.046 - qz_width/2 + qz_num= 2 + + + #to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 + qr_width = 0.1-0.02 + qr_start = 0.02 + qr_width /2 + qr_end = 0.01 - qr_width /2 + qr_num = 1 + + Qr = [qr_start , qr_end, qr_width, qr_num] + Qz= [qz_start, qz_end, qz_width , qz_num ] + new_mask[ :, 1020:1045] =0 + qx, qy, qr, qz = convert_gisaxs_pixel_to_q( inc_x0, inc_y0,refl_x0,refl_y0, lamda=lamda, Lsd=Lsd ) + + qr_1d = get_1d_qr( avg_imgr, Qr, Qz, qr, qz, inc_x0, new_mask) + + A plot example: + plot1D( x= qr_1d['qr1'], y = qr_1d['0.0367'], logxy=True ) + """ + qr_start, qr_end, qr_width, qr_num = Qr + qz_start, qz_end, qz_width, qz_num = Qz + qr_edge, qr_center = get_qedge(qr_start, qr_end, qr_width, qr_num, verbose=False) + qz_edge, qz_center = get_qedge(qz_start, qz_end, qz_width, qz_num, verbose=False) + + # print ('The qr_edge is: %s\nThe qr_center is: %s'%(qr_edge, qr_center)) + # print ('The qz_edge is: %s\nThe qz_center is: %s'%(qz_edge, qz_center)) + + label_array_qr = get_qmap_label(qr, qr_edge) + # qr_1d ={} + columns = [] + for i, qzc_ in enumerate(qz_center): + # print (i,qzc_) + label_array_qz = get_qmap_label(qz, qz_edge[i * 2 : 2 * i + 2]) + # print (qzc_, qz_edge[i*2:2*i+2]) + label_array_qzr, qzc, qrc = get_qzrmap(label_array_qz, label_array_qr, qz_center, qr_center) + # print (np.unique(label_array_qzr )) + if mask is not None: + label_array_qzr *= mask + roi_pixel_num = np.sum(label_array_qzr, axis=0) + # print( label_array_qzr ) + qr_ = qr * label_array_qzr + data_ = data * label_array_qzr + + w = np.where(roi_pixel_num) + qr_ave = np.zeros_like(roi_pixel_num, dtype=float)[w] + data_ave = np.zeros_like(roi_pixel_num, dtype=float)[w] + + qr_ave = (np.sum(qr_, axis=0))[w] / roi_pixel_num[w] + data_ave = (np.sum(data_, axis=0))[w] / roi_pixel_num[w] + qr_ave, data_ave = zip(*sorted(zip(*[qr_ave[~np.isnan(qr_ave)], data_ave[~np.isnan(data_ave)]]))) + if i == 0: + N_interp = len(qr_ave) + columns.append(["qr"]) + # qr_1d[i]= qr_ave_intp + qr_ave_intp = np.linspace(np.min(qr_ave), np.max(qr_ave), N_interp) + data_ave = np.interp(qr_ave_intp, qr_ave, data_ave) + # qr_1d[i]= [qr_ave_intp, data_ave] + columns.append(["qz%s=%s" % (i, str(round(qzc_, 4)))]) + if i == 0: + df = np.hstack([(qr_ave_intp).reshape(N_interp, 1), data_ave.reshape(N_interp, 1)]) + else: + df = np.hstack([df, data_ave.reshape(N_interp, 1)]) + df = DataFrame(df) + df.columns = np.concatenate(columns) + + if save: + if path is None: + path = setup_pargs["path"] + if uid is None: + uid = setup_pargs["uid"] + filename = os.path.join(path, "%s_qr_1d.csv" % (uid)) + df.to_csv(filename) + if print_save_message: + print("The qr_1d is saved in %s with filename as %s_qr_1d.csv" % (path, uid)) + return df + + +def get_t_qrc(FD, frame_edge, Qr, Qz, qr, qz, mask=None, path=None, uid=None, save=True, *argv, **kwargs): + """Get t-dependent qr + + Parameters + ---------- + FD: a compressed imgs series handler + frame_edge: list, the ROI frame regions, e.g., [ [0,100], [200,400] ] + mask: a image mask + + Returns + --------- + qrt_pds: dataframe, with columns as [qr, qz0_fra_from_beg1_to_end1, qz0_fra_from_beg2_to_end2, ... + qz1_fra_from_beg1_to_end1, qz1_fra_from_beg2_to_end2, ... + ... + ] + + """ + + Nt = len(frame_edge) + iqs = list(np.zeros(Nt)) + qz_start, qz_end, qz_width, qz_num = Qz + qz_edge, qz_center = get_qedge(qz_start, qz_end, qz_width, qz_num, verbose=False) + # print('here') + # qr_1d = np.zeros( ) + + if uid is None: + uid = "uid" + for i in range(Nt): + # str(round(qz_center[j], 4 ) + t1, t2 = frame_edge[i] + avg_imgx = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False) + + qrti = cal_1d_qr(avg_imgx, Qr, Qz, qr, qz, mask=mask, save=False) + if i == 0: + qrt_pds = np.zeros([len(qrti), 1 + Nt * qz_num]) + columns = np.zeros(1 + Nt * qz_num, dtype=object) + columns[0] = "qr" + qrt_pds[:, 0] = qrti["qr"] + for j in range(qz_num): + coli = qrti.columns[1 + j] + qrt_pds[:, 1 + i + Nt * j] = qrti[coli] + columns[1 + i + Nt * j] = coli + "_fra_%s_to_%s" % (t1, t2) + + qrt_pds = DataFrame(qrt_pds) + qrt_pds.columns = columns + if save: + if path is None: + path = setup_pargs["path"] + if uid is None: + uid = setup_pargs["uid"] + filename = os.path.join(path, "%s_qrt_pds.csv" % (uid)) + qrt_pds.to_csv(filename) + print("The qr~time is saved in %s with filename as %s_qrt_pds.csv" % (path, uid)) + return qrt_pds + + +def plot_qrt_pds(qrt_pds, frame_edge, qz_index=0, uid="uid", path="", fontsize=8, *argv, **kwargs): + """Y.G. Jan 04, 2017 + plot t-dependent qr + + Parameters + ---------- + qrt_pds: dataframe, with columns as [qr, qz0_fra_from_beg1_to_end1, qz0_fra_from_beg2_to_end2, ... + qz1_fra_from_beg1_to_end1, qz1_fra_from_beg2_to_end2, ... + ... + ] + frame_edge: list, the ROI frame regions, e.g., [ [0,100], [200,400] ] + + qz_index, if = integer, e.g. =0, only plot the qr~t for qz0 + if None, plot all qzs + + Returns + + """ + + fig, ax = plt.subplots(figsize=(8, 6)) + cols = np.array(qrt_pds.columns) + Nt = len(frame_edge) + # num_qz = int( (len( cols ) -1 ) /Nt ) + qr = qrt_pds["qr"] + if qz_index is None: + r = range(1, len(cols)) + else: + r = range(1 + qz_index * Nt, 1 + (1 + qz_index) * Nt) + for i in r: + y = qrt_pds[cols[i]] + ax.semilogy(qr, y, label=cols[i], marker=markers[i], color=colors[i], ls="-") + # ax.set_xlabel("q in pixel") + ax.set_xlabel(r"$Q_r$" + r"($\AA^{-1}$)") + ax.set_ylabel("I(q)") + + if "xlim" in kwargs.keys(): + ax.set_xlim(kwargs["xlim"]) + if "ylim" in kwargs.keys(): + ax.set_ylim(kwargs["ylim"]) + + ax.legend(loc="best", fontsize=fontsize) + + title = ax.set_title("%s_Iq_t" % uid) + title.set_y(1.01) + + fp = path + "%s_Iq_t" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + +def plot_t_qrc(qr_1d, frame_edge, save=False, pargs=None, fontsize=8, *argv, **kwargs): + """plot t-dependent qr + + Parameters + ---------- + qr_1d: array, with shape as time length, frame_edge + frame_edge: list, the ROI frame regions, e.g., [ [0,100], [200,400] ] + save: save the plot + if save, all the following paramters are given in argv + { + 'path': + 'uid': } + + Returns + + """ + + fig, ax = plt.subplots(figsize=(8, 6)) + Nt = qr_1d.shape[1] + q = qr_1d[:, 0] + for i in range(Nt - 1): + t1, t2 = frame_edge[i] + ax.semilogy(q, qr_1d[:, i + 1], "o-", label="frame: %s--%s" % (t1, t2)) + # ax.set_xlabel("q in pixel") + ax.set_xlabel(r"$Q_r$" + r"($\AA^{-1}$)") + ax.set_ylabel("I(q)") + + if "xlim" in kwargs.keys(): + ax.set_xlim(kwargs["xlim"]) + if "ylim" in kwargs.keys(): + ax.set_ylim(kwargs["ylim"]) + + ax.legend(loc="best", fontsize=fontsize) + uid = pargs["uid"] + title = ax.set_title("uid= %s--t~I(q)" % uid) + title.set_y(1.01) + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = pargs["path"] + uid = pargs["uid"] + # fp = path + 'uid= %s--Iq~t-'%uid + CurTime + '.png' + fp = path + "uid=%s--Iq-t-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + save_arrays( + np.vstack([q, np.array(iqs)]).T, + label=["q_A-1"] + ["Fram-%s-%s" % (t[0], t[1]) for t in frame_edge], + filename="uid=%s-q-Iqt" % uid, + path=path, + ) + + +########################################## +###Functions for GiSAXS +########################################## + + +def make_gisaxs_grid(qr_w=10, qz_w=12, dim_r=100, dim_z=120): + """Dec 16, 2015, Y.G.@CHX""" + y, x = np.indices([dim_z, dim_r]) + Nr = int(dim_r / qp_w) + Nz = int(dim_z / qz_w) + noqs = Nr * Nz + + ind = 1 + for i in range(0, Nr): + for j in range(0, Nz): + y[qr_w * i : qr_w * (i + 1), qz_w * j : qz_w * (j + 1)] = ind + ind += 1 + return y + + +########################################### +# for Q-map, convert pixel to Q +########################################### + + +def convert_Qmap(img, qx_map, qy_map=None, bins=None, rangeq=None, mask=None, statistic="sum"): + """Y.G. Nov 3@CHX + Convert a scattering image to a qmap by giving qx_map and qy_map + Return converted qmap, x-coordinates and y-coordinates + """ + if qy_map is not None: + if rangeq is None: + qx_min, qx_max = qx_map.min(), qx_map.max() + qy_min, qy_max = qy_map.min(), qy_map.max() + rangeq = [[qx_min, qx_max], [qy_min, qy_max]] + if bins is None: + bins = qx_map.shape + if mask is not None: + m = mask.ravel() + else: + m = None + b2d = BinnedStatistic2D( + qx_map.ravel(), qy_map.ravel(), statistic=statistic, bins=bins, mask=m, range=rangeq + ) + remesh_data, xbins, ybins = b2d(img.ravel()), b2d.bin_centers[0], b2d.bin_centers[1] + else: + if rangeq is None: + qx_min, qx_max = qx_map.min(), qx_map.max() + rangeq = [qx_min, qx_max] + if bins is None: + bins = [qx_map.size] + if mask is not None: + m = mask.ravel() + else: + m = None + b1d = BinnedStatistic1D(qx_map.ravel(), bins=bins, mask=m) + remesh_data = b1d(img.ravel()) + xbins = b1d.bin_centers + ybins = None + return remesh_data, xbins, ybins + + +def get_refl_xy(inc_ang, inc_phi, inc_x0, inc_y0, pixelsize=[0.075, 0.075], Lsd=5000): + """ + Input: + inc_angle: deg, + inc_phi: deg, by default, 0 ( if inc_x = ref_x ) + pixelsize: 0.075 mm for Eiger4M detector + sample to detector distance: Lsd, in mm + + Output: + reflected beam center x, y + + """ + px, py = pixelsize + refl_y0 = np.tan(2 * np.radians(inc_ang)) * Lsd / (py) + inc_y0 + refl_x0 = inc_x0 - np.tan(np.radians(inc_phi)) * (refl_y0 - inc_y0) * py / px + print("The reflection beam center is: [%.2f, %.2f] (pix)" % (refl_x0, refl_y0)) + return refl_x0, refl_y0 + + +def get_alphaf_thetaf( + inc_x0, inc_y0, inc_ang, inc_phi=0, pixelsize=[0.075, 0.075], Lsd=5000, dimx=2070.0, dimy=2167.0 +): + """Nov 19, 2018@SMI to get alphaf and thetaf for gi scattering + Input: + inc_angle: deg, + inc_phi: deg, by default, 0 ( if inc_x = ref_x ) + pixelsize: 0.075 mm for Eiger4M detector + sample to detector distance: Lsd, in mm + detector image size: dimx = 2070,dimy=2167 for Eiger4M detector + Output: + reflected angle alphaf (outplane) + reflected angle thetaf (inplane ) + + """ + px, py = pixelsize + y, x = np.indices([int(dimy), int(dimx)]) + alphai, thetai = np.radians(inc_ang), np.radians(inc_phi) + alphaf = np.arctan2((y - inc_y0) * py, Lsd) - alphai + thetaf = np.arctan2((x - inc_x0) * px, Lsd) / 2 - thetai + # print( px, py, Lsd, dimy, dimx, alphai, thetai) + return alphaf, thetaf + + +def convert_gisaxs_pixel_to_q2( + inc_ang, + alphaf, + thetaf, + phi=0, + lamda=1.0, + thetai=0.0, +): + """ + Dec 16, 2015, Y.G.@CHX + giving: + incident_angle, (inc_ang), in deg + alphaf, + thetaf, + the title angle (phi) + wavelength: angstron + + get: q_parallel (qp), q_direction_z (qz) + + """ + pref = 2 * np.pi / lamda + alphai = np.radians(inc_ang) + thetai = np.radians(thetai) + phi = np.radians(phi) + + qx = np.cos(alphaf) * np.cos(2 * thetaf) - np.cos(alphai) * np.cos(2 * thetai) + qy_ = np.cos(alphaf) * np.sin(2 * thetaf) - np.cos(alphai) * np.sin(2 * thetai) + qz_ = np.sin(alphaf) + np.sin(alphai) + qy = qz_ * np.sin(phi) + qy_ * np.cos(phi) + qz = qz_ * np.cos(phi) - qy_ * np.sin(phi) + qr = np.sqrt(qx**2 + qy**2) + return qx * pref, qy * pref, qr * pref, qz * pref + + +def get_incident_angles(inc_x0, inc_y0, refl_x0, refl_y0, pixelsize=[75, 75], Lsd=5.0): + """ + Dec 16, 2015, Y.G.@CHX + giving: incident beam center: bcenx,bceny + reflected beam on detector: rcenx, rceny + sample to detector distance: Lsd, in meters + pixelsize: 75 um for Eiger4M detector + get incident_angle (alphai), the title angle (phi) + """ + if Lsd >= 1000: + Lsd = Lsd / 1000.0 + + px, py = pixelsize + phi = np.arctan2((-refl_x0 + inc_x0) * px * 10 ** (-6), (refl_y0 - inc_y0) * py * 10 ** (-6)) + alphai = np.arctan2((refl_y0 - inc_y0) * py * 10 ** (-6), Lsd) / 2.0 + # thetai = np.arctan2( (rcenx - bcenx)*px *10**(-6), Lsd ) /2. #?? + + return alphai, phi + + +def get_reflected_angles( + inc_x0, inc_y0, refl_x0, refl_y0, thetai=0.0, pixelsize=[75, 75], Lsd=5.0, dimx=2070.0, dimy=2167.0 +): + """Dec 16, 2015, Y.G.@CHX + giving: incident beam center: bcenx,bceny + reflected beam on detector: rcenx, rceny + sample to detector distance: Lsd, in mm + pixelsize: 75 um for Eiger4M detector + detector image size: dimx = 2070,dimy=2167 for Eiger4M detector + get reflected angle alphaf (outplane) + reflected angle thetaf (inplane ) + """ + # if Lsd>=1000:#it should be something wrong and the unit should be meter + # convert Lsd from mm to m + if Lsd >= 1000: + Lsd = Lsd / 1000.0 + alphai, phi = get_incident_angles(inc_x0, inc_y0, refl_x0, refl_y0, pixelsize, Lsd) + print("The incident_angle (alphai) is: %s" % (alphai * 180 / np.pi)) + px, py = pixelsize + y, x = np.indices([int(dimy), int(dimx)]) + # alphaf = np.arctan2( (y-inc_y0)*py*10**(-6), Lsd )/2 - alphai + alphaf = np.arctan2((y - inc_y0) * py * 10 ** (-6), Lsd) - alphai + thetaf = np.arctan2((x - inc_x0) * px * 10 ** (-6), Lsd) / 2 - thetai + return alphaf, thetaf, alphai, phi + + +def convert_gisaxs_pixel_to_q( + inc_x0, inc_y0, refl_x0, refl_y0, pixelsize=[75, 75], Lsd=5.0, dimx=2070.0, dimy=2167.0, thetai=0.0, lamda=1.0 +): + """ + Dec 16, 2015, Y.G.@CHX + giving: incident beam center: bcenx,bceny + reflected beam on detector: rcenx, rceny + sample to detector distance: Lsd, in meters + pixelsize: 75 um for Eiger4M detector + detector image size: dimx = 2070,dimy=2167 for Eiger4M detector + wavelength: angstron + + get: q_parallel (qp), q_direction_z (qz) + + """ + alphaf, thetaf, alphai, phi = get_reflected_angles( + inc_x0, inc_y0, refl_x0, refl_y0, thetai, pixelsize, Lsd, dimx, dimy + ) + pref = 2 * np.pi / lamda + qx = np.cos(alphaf) * np.cos(2 * thetaf) - np.cos(alphai) * np.cos(2 * thetai) + qy_ = np.cos(alphaf) * np.sin(2 * thetaf) - np.cos(alphai) * np.sin(2 * thetai) + qz_ = np.sin(alphaf) + np.sin(alphai) + qy = qz_ * np.sin(phi) + qy_ * np.cos(phi) + qz = qz_ * np.cos(phi) - qy_ * np.sin(phi) + qr = np.sqrt(qx**2 + qy**2) + return qx * pref, qy * pref, qr * pref, qz * pref + + +def get_qedge(qstart, qend, qwidth, noqs, verbose=True): + """July 18, 2017 Revised by Y.G.@CHX, + Add print info for noqs=1 + Dec 16, 2015, Y.G.@CHX + DOCUMENT get_qedge( ) + give qstart,qend,qwidth,noqs + return a qedge by giving the noqs, qstart,qend,qwidth. + a qcenter, which is center of each qedge + KEYWORD: None""" + import numpy as np + + if noqs != 1: + spacing = (qend - qstart - noqs * qwidth) / (noqs - 1) # spacing between rings + qedges = (roi.ring_edges(qstart, qwidth, spacing, noqs)).ravel() + qcenter = (qedges[::2] + qedges[1::2]) / 2 + else: + spacing = 0 + qedges = (roi.ring_edges(qstart, qwidth, spacing, noqs)).ravel() + # qedges = np.array( [qstart, qend] ) + qcenter = [(qedges[1] + qedges[0]) / 2] + if verbose: + print("Since noqs=1, the qend is actually defined by qstart + qwidth.") + return qedges, qcenter + + +def get_qedge2( + qstart, + qend, + qwidth, + noqs, +): + """DOCUMENT make_qlist( ) + give qstart,qend,qwidth,noqs + return a qedge by giving the noqs, qstart,qend,qwidth. + a qcenter, which is center of each qedge + KEYWORD: None""" + + import numpy as np + + qcenter = np.linspace(qstart, qend, noqs) + # print ('the qcenter is: %s'%qcenter ) + qedge = np.zeros(2 * noqs) + qedge[::2] = qcenter - (qwidth / 2) # +1 #render even value + qedge[1::2] = qcenter + qwidth / 2 # render odd value + return qedge, qcenter + + +########################################### +# for plot Q-map +########################################### + + +def get_qmap_label(qmap, qedge): + import numpy as np + + """ + April 20, 2016, Y.G.@CHX + give a qmap and qedge to bin the qmap into a label array + """ + edges = np.atleast_2d(np.asarray(qedge)).ravel() + label_array = np.digitize(qmap.ravel(), edges, right=False) + label_array = np.int_(label_array) + label_array = (np.where(label_array % 2 != 0, label_array, 0) + 1) // 2 + label_array = label_array.reshape(qmap.shape) + return label_array + + +def get_qzrmap(label_array_qz, label_array_qr, qz_center, qr_center): + """April 20, 2016, Y.G.@CHX, get qzrmap""" + qzmax = label_array_qz.max() + label_array_qr_ = np.zeros(label_array_qr.shape) + ind = np.where(label_array_qr != 0) + label_array_qr_[ind] = label_array_qr[ind] + 1e4 # add some large number to qr + label_array_qzr = label_array_qz * label_array_qr_ + + # convert label_array_qzr to [1,2,3,...] + uqzr = np.unique(label_array_qzr)[1:] + + uqz = np.unique(label_array_qz)[1:] + uqr = np.unique(label_array_qr)[1:] + # print (uqzr) + label_array_qzr_ = np.zeros_like(label_array_qzr) + newl = np.arange(1, len(uqzr) + 1) + + qzc = list(qz_center) * len(uqr) + qrc = [[qr_center[i]] * len(uqz) for i in range(len(uqr))] + + for i, label in enumerate(uqzr): + # print (i, label) + label_array_qzr_.ravel()[np.where(label_array_qzr.ravel() == label)[0]] = newl[i] + + return np.int_(label_array_qzr_), np.array(qzc), np.concatenate(np.array(qrc)) + + +def show_label_array_on_image( + ax, image, label_array, cmap=None, norm=None, log_img=True, alpha=0.3, imshow_cmap="gray", **kwargs +): # norm=LogNorm(), + """ + This will plot the required ROI's(labeled array) on the image + Additional kwargs are passed through to `ax.imshow`. + If `vmin` is in kwargs, it is clipped to minimum of 0.5. + Parameters + ---------- + ax : Axes + The `Axes` object to add the artist too + image : array + The image array + label_array : array + Expected to be an unsigned integer array. 0 is background, + positive integers label region of interest + cmap : str or colormap, optional + Color map to use for plotting the label_array, defaults to 'None' + imshow_cmap : str or colormap, optional + Color map to use for plotting the image, defaults to 'gray' + norm : str, optional + Normalize scale data, defaults to 'Lognorm()' + Returns + ------- + im : AxesImage + The artist added to the axes + im_label : AxesImage + The artist added to the axes + """ + ax.set_aspect("equal") + if log_img: + norm=LogNorm(vmin=vmin, vmax=vmax) + im = ax.imshow(image, cmap=imshow_cmap, interpolation="none", norm=norm) # norm=norm, + else: + im = ax.imshow(image, cmap=imshow_cmap, interpolation="none", norm=norm, **kwargs) # norm=norm, + + im_label = mpl_plot.show_label_array( + ax, label_array, cmap=cmap, norm=norm, alpha=alpha, **kwargs + ) # norm=norm, + + return im, im_label + + +def show_qz(qz): + """Dec 16, 2015, Y.G.@CHX + plot qz mape + + """ + + fig, ax = plt.subplots() + im = ax.imshow(qz, origin="lower", cmap="viridis", vmin=qz.min(), vmax=qz.max()) + fig.colorbar(im) + ax.set_title("Q-z") + # plt.show() + + +def show_qr(qr): + """Dec 16, 2015, Y.G.@CHX + plot qr mape + + """ + fig, ax = plt.subplots() + im = ax.imshow(qr, origin="lower", cmap="viridis", vmin=qr.min(), vmax=qr.max()) + fig.colorbar(im) + ax.set_title("Q-r") + # plt.show() + + +def show_alphaf( + alphaf, +): + """Dec 16, 2015, Y.G.@CHX + plot alphaf mape + + """ + + fig, ax = plt.subplots() + im = ax.imshow(alphaf * 180 / np.pi, origin="lower", cmap="viridis", vmin=-1, vmax=1.5) + # im=ax.imshow(alphaf, origin='lower' ,cmap='viridis',norm= LogNorm(vmin=0.0001,vmax=2.00)) + fig.colorbar(im) + ax.set_title("alphaf") + # plt.show() + + +def get_1d_qr( + data, + Qr, + Qz, + qr, + qz, + inc_x0, + mask=None, + show_roi=True, + ticks=None, + alpha=0.3, + loglog=False, + save=True, + setup_pargs=None, +): + """Dec 16, 2015, Y.G.@CHX + plot one-d of I(q) as a function of qr for different qz + data: a dataframe + Qr: info for qr, = qr_start , qr_end, qr_width, qr_num + Qz: info for qz, = qz_start, qz_end, qz_width , qz_num + qr: qr-map + qz: qz-map + inc_x0: x-center of incident beam + mask: a mask for qr-1d integration + show_roi: boolean, if ture, show the interest ROI + ticks: ticks for the plot, = zticks, zticks_label, rticks, rticks_label + alpha: transparency of ROI + loglog: if True, plot in log-log scale + setup_pargs: gives path, filename... + + Return: qr_1d, a dataframe, with columns as qr1, qz1 (float value), qr2, qz2,.... + Plot 1D cureve as a function of Qr for each Qz + + + + + Examples: + #to make two-qz, from 0.018 to 0.046, width as 0.008, + qz_width = 0.008 + qz_start = 0.018 + qz_width/2 + qz_end = 0.046 - qz_width/2 + qz_num= 2 + + + #to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 + qr_width = 0.1-0.02 + qr_start = 0.02 + qr_width /2 + qr_end = 0.01 - qr_width /2 + qr_num = 1 + + Qr = [qr_start , qr_end, qr_width, qr_num] + Qz= [qz_start, qz_end, qz_width , qz_num ] + new_mask[ :, 1020:1045] =0 + ticks = show_qzr_map( qr,qz, inc_x0, data = avg_imgmr, Nzline=10, Nrline=10 ) + qx, qy, qr, qz = convert_gisaxs_pixel_to_q( inc_x0, inc_y0,refl_x0,refl_y0, lamda=lamda, Lsd=Lsd ) + + qr_1d = get_1d_qr( avg_imgr, Qr, Qz, qr, qz, inc_x0, new_mask, True, ticks, .8) + + A plot example: + plot1D( x= qr_1d['qr1'], y = qr_1d['0.0367'], logxy=True ) + + + """ + + qr_start, qr_end, qr_width, qr_num = Qr + qz_start, qz_end, qz_width, qz_num = Qz + qr_edge, qr_center = get_qedge(qr_start, qr_end, qr_width, qr_num) + qz_edge, qz_center = get_qedge(qz_start, qz_end, qz_width, qz_num) + + print("The qr_edge is: %s\nThe qr_center is: %s" % (qr_edge, qr_center)) + print("The qz_edge is: %s\nThe qz_center is: %s" % (qz_edge, qz_center)) + label_array_qr = get_qmap_label(qr, qr_edge) + + if show_roi: + label_array_qz0 = get_qmap_label(qz, qz_edge) + label_array_qzr0, qzc0, qrc0 = get_qzrmap(label_array_qz0, label_array_qr, qz_center, qr_center) + + if mask is not None: + label_array_qzr0 *= mask + # data_ = data*label_array_qzr0 + show_qzr_roi(data, label_array_qzr0, inc_x0, ticks, alpha) + + fig, ax = plt.subplots() + qr_1d = {} + columns = [] + for i, qzc_ in enumerate(qz_center): + # print (i,qzc_) + label_array_qz = get_qmap_label(qz, qz_edge[i * 2 : 2 * i + 2]) + # print (qzc_, qz_edge[i*2:2*i+2]) + label_array_qzr, qzc, qrc = get_qzrmap(label_array_qz, label_array_qr, qz_center, qr_center) + # print (np.unique(label_array_qzr )) + if mask is not None: + label_array_qzr *= mask + roi_pixel_num = np.sum(label_array_qzr, axis=0) + qr_ = qr * label_array_qzr + data_ = data * label_array_qzr + qr_ave = np.sum(qr_, axis=0) / roi_pixel_num + data_ave = np.sum(data_, axis=0) / roi_pixel_num + + qr_ave, data_ave = zip(*sorted(zip(*[qr_ave[~np.isnan(qr_ave)], data_ave[~np.isnan(data_ave)]]))) + + if i == 0: + N_interp = len(qr_ave) + + qr_ave_intp = np.linspace(np.min(qr_ave), np.max(qr_ave), N_interp) + data_ave = np.interp(qr_ave_intp, qr_ave, data_ave) + + qr_1d[i] = [qr_ave_intp, data_ave] + columns.append(["qr%s" % i, str(round(qzc_, 4))]) + if loglog: + ax.loglog(qr_ave_intp, data_ave, "--o", label="qz= %f" % qzc_, markersize=1) + else: + ax.plot(qr_ave_intp, data_ave, "--o", label="qz= %f" % qzc_) + if i == 0: + df = np.hstack([(qr_ave_intp).reshape(N_interp, 1), data_ave.reshape(N_interp, 1)]) + else: + df = np.hstack([df, (qr_ave_intp).reshape(N_interp, 1), data_ave.reshape(N_interp, 1)]) + + # ax.set_xlabel( r'$q_r$', fontsize=15) + ax.set_xlabel(r"$q_r$" r"($\AA^{-1}$)", fontsize=18) + ax.set_ylabel("$Intensity (a.u.)$", fontsize=18) + ax.set_yscale("log") + # ax.set_xscale('log') + ax.set_xlim(qr.max(), qr.min()) + ax.legend(loc="best") + + df = DataFrame(df) + df.columns = np.concatenate(columns) + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = setup_pargs["path"] + uid = setup_pargs["uid"] + # filename = os.path.join(path, 'qr_1d-%s-%s.csv' % (uid,CurTime)) + filename = os.path.join(path, "uid=%s--qr_1d.csv" % (uid)) + df.to_csv(filename) + print("The qr_1d is saved in %s with filename as uid=%s--qr_1d.csv" % (path, uid)) + + # fp = path + 'Uid= %s--Circular Average'%uid + CurTime + '.png' + fp = path + "uid=%s--qr_1d-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() + return df + + +def plot_qr_1d_with_ROI(qr_1d, qr_center, loglog=False, save=True, uid="uid", path=""): + """Dec 16, 2015, Y.G.@CHX + plot one-d of I(q) as a function of qr with ROI + qr_1d: a dataframe for qr_1d + qr_center: the center of qr + loglog: if True, plot in log-log scale + Return: + Plot 1D cureve with ROI + A plot example: + plot_1d_qr_with_ROI( df, qr_center, loglog=False, save=True ) + + """ + + fig, ax = plt.subplots() + Ncol = len(qr_1d.columns) + Nqr = Ncol % 2 + qz_center = qr_1d.columns[1::1] # qr_1d.columns[1::2] + Nqz = len(qz_center) + for i, qzc_ in enumerate(qz_center): + x = qr_1d[qr_1d.columns[0]] + y = qr_1d[qzc_] + if loglog: + ax.loglog(x, y, "--o", label="qz= %s" % qzc_, markersize=1) + else: + ax.plot(x, y, "--o", label="qz= %s" % qzc_) + for qrc in qr_center: + ax.axvline(qrc) # , linewidth = 5 ) + + # ax.set_xlabel( r'$q_r$', fontsize=15) + ax.set_xlabel(r"$q_r$" r"($\AA^{-1}$)", fontsize=18) + ax.set_ylabel("$Intensity (a.u.)$", fontsize=18) + ax.set_yscale("log") + # ax.set_xscale('log') + ax.set_xlim(x.max(), x.min()) + ax.legend(loc="best") + ax.set_title("%s_Qr_ROI" % uid) + if save: + fp = path + "%s_Qr_ROI" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() + + +def interp_zeros(data): + from scipy.interpolate import interp1d + + gf = data.ravel() + (indice,) = gf.nonzero() + start, stop = indice[0], indice[-1] + 1 + dx, dy = data.shape + x = np.arange(dx * dy) + f = interp1d(x[indice], gf[indice]) + gf[start:stop] = f(x[start:stop]) + return gf.reshape([dx, dy]) + + +def get_qr_tick_label(qr, label_array_qr, inc_x0, interp=True): + """ + Dec 16, 2015, Y.G.@CHX + get zticks,zticks_label + + Parameters: + + qr: 2-D array, qr of a gisaxs image (data) + label_array_qr: a labelled array of qr map, get by: + label_array_qr = get_qmap_label( qr, qz_edge) + Options: + interp: if True, make qz label round by np.round(data, 2) + inc_x0: x-center of incident beam + Return: + rticks: list, r-tick positions in unit of pixel + rticks_label: list, r-tick positions in unit of real space + + Examples: + rticks,rticks_label = get_qr_tick_label( qr, label_array_qr) + + """ + + rticks = [] + rticks_label = [] + num = len(np.unique(label_array_qr)) + for i in range(1, num): + ind = np.sort(np.where(label_array_qr == i)[1]) + # tick = round( qr[label_array_qr==i].mean(),2) + tick = qr[label_array_qr == i].mean() + if ind[0] < inc_x0 and ind[-1] > inc_x0: # + # mean1 = int( (ind[np.where(ind < inc_x0)[0]]).mean() ) + # mean2 = int( (ind[np.where(ind > inc_x0)[0]]).mean() ) + + mean1 = int((ind[np.where(ind < inc_x0)[0]])[0]) + mean2 = int((ind[np.where(ind > inc_x0)[0]])[0]) + rticks.append(mean1) + rticks.append(mean2) + rticks_label.append(tick) + rticks_label.append(tick) + else: + # print('here') + # mean = int( ind.mean() ) + mean = int(ind[0]) + # mean = int( (ind[0] +ind[-1])/2 ) + rticks.append(mean) + rticks_label.append(tick) + # print (rticks) + # print (mean, tick) + n = len(rticks) + for i, rt in enumerate(rticks): + if rt == 0: + rticks[i] = n - i + + if interp: + rticks = np.array(rticks) + rticks_label = np.array(rticks_label) + try: + w = np.where(rticks <= inc_x0)[0] + rticks1 = np.int_(np.interp(np.round(rticks_label[w], 3), rticks_label[w], rticks[w])) + rticks_label1 = np.round(rticks_label[w], 3) + except: + rticks_label1 = [] + try: + w = np.where(rticks > inc_x0)[0] + rticks2 = np.int_(np.interp(np.round(rticks_label[w], 3), rticks_label[w], rticks[w])) + rticks = np.append(rticks1, rticks2) + rticks_label2 = np.round(rticks_label[w], 3) + except: + rticks_label2 = [] + + rticks_label = np.append(rticks_label1, rticks_label2) + + return rticks, rticks_label + + +def get_qz_tick_label(qz, label_array_qz, interp=True): + """ + Dec 16, 2015, Y.G.@CHX + get zticks,zticks_label + + Parameters: + + qz: 2-D array, qz of a gisaxs image (data) + label_array_qz: a labelled array of qz map, get by: + label_array_qz = get_qmap_label( qz, qz_edge) + interp: if True, make qz label round by np.round(data, 2) + + Return: + zticks: list, z-tick positions in unit of pixel + zticks_label: list, z-tick positions in unit of real space + + Examples: + zticks,zticks_label = get_qz_tick_label( qz, label_array_qz) + + """ + + num = len(np.unique(label_array_qz)) + # zticks = np.array( [ int( np.where( label_array_qz==i )[0].mean() ) for i in range( 1,num ) ]) + zticks = np.array([int(np.where(label_array_qz == i)[0][0]) for i in range(1, num)]) + + # zticks_label = np.array( [ round( qz[label_array_qz==i].mean(),4) for i in range( 1, num ) ]) + # zticks_label = np.array( [ qz[label_array_qz==i].mean() for i in range( 1, num ) ]) + zticks_label = np.array([qz[label_array_qz == i][0] for i in range(1, num)]) + + if interp: + zticks = np.int_(np.interp(np.round(zticks_label, 3), zticks_label, zticks)) + zticks_label = np.round(zticks_label, 3) + return zticks, zticks_label + + +def get_qzr_map(qr, qz, inc_x0, Nzline=10, Nrline=10, interp=True, return_qrz_label=True, *argv, **kwargs): + """ + Dec 31, 2016, Y.G.@CHX + Calculate a qzr map of a gisaxs image (data) without plot + + Parameters: + qr: 2-D array, qr of a gisaxs image (data) + qz: 2-D array, qz of a gisaxs image (data) + inc_x0: the incident beam center x + Options: + Nzline: int, z-line number + Nrline: int, r-line number + + Return: + if return_qrz_label + zticks: list, z-tick positions in unit of pixel + zticks_label: list, z-tick positions in unit of real space + rticks: list, r-tick positions in unit of pixel + rticks_label: list, r-tick positions in unit of real space + else: return the additional two below + label_array_qr: qr label array with the same shpae as gisaxs image + label_array_qz: qz label array with the same shpae as gisaxs image + + Examples: + ticks = get_qzr_map( qr, qz, inc_x0 ) + """ + qr_start, qr_end, qr_num = qr.min(), qr.max(), Nrline + qz_start, qz_end, qz_num = qz.min(), qz.max(), Nzline + qr_edge, qr_center = get_qedge(qr_start, qr_end, (qr_end - qr_start) / (qr_num + 100), qr_num) + qz_edge, qz_center = get_qedge(qz_start, qz_end, (qz_end - qz_start) / (qz_num + 100), qz_num) + + label_array_qz = get_qmap_label(qz, qz_edge) + label_array_qr = get_qmap_label(qr, qr_edge) + + labels_qz, indices_qz = roi.extract_label_indices(label_array_qz) + labels_qr, indices_qr = roi.extract_label_indices(label_array_qr) + num_qz = len(np.unique(labels_qz)) + num_qr = len(np.unique(labels_qr)) + zticks, zticks_label = get_qz_tick_label(qz, label_array_qz) + # rticks,rticks_label = get_qr_tick_label(label_array_qr,inc_x0) + try: + rticks, rticks_label = zip(*np.sort(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) + except: + rticks, rticks_label = zip(*sorted(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) + # stride = int(len(zticks)/10) + ticks = [zticks, zticks_label, rticks, rticks_label] + if return_qrz_label: + return zticks, zticks_label, rticks, rticks_label, label_array_qr, label_array_qz + else: + return zticks, zticks_label, rticks, rticks_label + + +def plot_qzr_map(qr, qz, inc_x0, ticks=None, data=None, uid="uid", path="", vmin=0.001, vmax=1e1, *argv, **kwargs): + """ + Dec 31, 2016, Y.G.@CHX + plot a qzr map of a gisaxs image (data) + Parameters: + qr: 2-D array, qr of a gisaxs image (data) + qz: 2-D array, qz of a gisaxs image (data) + inc_x0: the incident beam center x + + ticks = [ zticks,zticks_label,rticks,rticks_label ], use ticks = get_qzr_map( qr, qz, inc_x0 ) to get + + zticks: list, z-tick positions in unit of pixel + zticks_label: list, z-tick positions in unit of real space + rticks: list, r-tick positions in unit of pixel + rticks_label: list, r-tick positions in unit of real space + label_array_qr: qr label array with the same shpae as gisaxs image + label_array_qz: qz label array with the same shpae as gisaxs image + + inc_x0: the incident beam center x + Options: + data: 2-D array, a gisaxs image, if None, =qr+qz + Nzline: int, z-line number + Nrline: int, r-line number + + Return: + None + + Examples: + + ticks = plot_qzr_map( ticks, inc_x0, data = None, Nzline=10, Nrline= 10 ) + ticks = plot_qzr_map( ticks, inc_x0, data = avg_imgmr, Nzline=10, Nrline=10 ) + """ + + import copy + + import matplotlib.cm as mcm + import matplotlib.pyplot as plt + + if ticks is None: + zticks, zticks_label, rticks, rticks_label, label_array_qr, label_array_qz = get_qzr_map( + qr, qz, inc_x0, return_qrz_label=True + ) + else: + zticks, zticks_label, rticks, rticks_label, label_array_qr, label_array_qz = ticks + + cmap = "viridis" + _cmap = copy.copy((mcm.get_cmap(cmap))) + _cmap.set_under("w", 0) + fig, ax = plt.subplots() + if data is None: + data = qr + qz + im = ax.imshow(data, cmap="viridis", origin="lower") + else: + im = ax.imshow(data, cmap="viridis", origin="lower", norm=LogNorm(vmin=vmin, vmax=vmax)) + + imr = ax.imshow( + label_array_qr, origin="lower", cmap="viridis", vmin=0.5, vmax=None + ) # ,interpolation='nearest',) + imz = ax.imshow( + label_array_qz, origin="lower", cmap="viridis", vmin=0.5, vmax=None + ) # ,interpolation='nearest',) + + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.05) + plt.colorbar(im, cax=cax) + ax.set_xlabel(r"$q_r$", fontsize=18) + ax.set_ylabel(r"$q_z$", fontsize=18) + + stride = 1 + ax.set_yticks(zticks[::stride]) + yticks = zticks_label[::stride] + ax.set_yticklabels(yticks, fontsize=7) + # stride = int(len(rticks)/10) + stride = 1 + ax.set_xticks(rticks[::stride]) + xticks = rticks_label[::stride] + ax.set_xticklabels(xticks, fontsize=7) + ax.set_title("%s_Qr_Qz_Map" % uid, y=1.03, fontsize=18) + fp = path + "%s_Qr_Qz_Map" % (uid) + ".png" + fig.savefig(fp, dpi=fig.dpi) + + +def show_qzr_map(qr, qz, inc_x0, data=None, Nzline=10, Nrline=10, interp=True, *argv, **kwargs): + """ + Dec 16, 2015, Y.G.@CHX + plot a qzr map of a gisaxs image (data) + + Parameters: + qr: 2-D array, qr of a gisaxs image (data) + qz: 2-D array, qz of a gisaxs image (data) + inc_x0: the incident beam center x + + Options: + data: 2-D array, a gisaxs image, if None, =qr+qz + Nzline: int, z-line number + Nrline: int, r-line number + + + Return: + zticks: list, z-tick positions in unit of pixel + zticks_label: list, z-tick positions in unit of real space + rticks: list, r-tick positions in unit of pixel + rticks_label: list, r-tick positions in unit of real space + + + Examples: + + ticks = show_qzr_map( qr, qz, inc_x0, data = None, Nzline=10, Nrline= 10 ) + ticks = show_qzr_map( qr,qz, inc_x0, data = avg_imgmr, Nzline=10, Nrline=10 ) + """ + + import copy + + import matplotlib.cm as mcm + import matplotlib.pyplot as plt + + cmap = "viridis" + _cmap = copy.copy((mcm.get_cmap(cmap))) + _cmap.set_under("w", 0) + + qr_start, qr_end, qr_num = qr.min(), qr.max(), Nrline + qz_start, qz_end, qz_num = qz.min(), qz.max(), Nzline + qr_edge, qr_center = get_qedge(qr_start, qr_end, (qr_end - qr_start) / (qr_num + 100), qr_num) + qz_edge, qz_center = get_qedge(qz_start, qz_end, (qz_end - qz_start) / (qz_num + 100), qz_num) + + label_array_qz = get_qmap_label(qz, qz_edge) + label_array_qr = get_qmap_label(qr, qr_edge) + + labels_qz, indices_qz = roi.extract_label_indices(label_array_qz) + labels_qr, indices_qr = roi.extract_label_indices(label_array_qr) + num_qz = len(np.unique(labels_qz)) + num_qr = len(np.unique(labels_qr)) + + fig, ax = plt.subplots(figsize=(8, 14)) + + if data is None: + data = qr + qz + im = ax.imshow(data, cmap="viridis", origin="lower") + else: + im = ax.imshow(data, cmap="viridis", origin="lower", norm=LogNorm(vmin=0.001, vmax=1e1)) + + imr = ax.imshow( + label_array_qr, origin="lower", cmap="viridis", vmin=0.5, vmax=None + ) # ,interpolation='nearest',) + imz = ax.imshow( + label_array_qz, origin="lower", cmap="viridis", vmin=0.5, vmax=None + ) # ,interpolation='nearest',) + + # caxr = fig.add_axes([0.88, 0.2, 0.03, .7]) #x,y, width, heigth + # cba = fig.colorbar(im, cax=caxr ) + # cba = fig.colorbar(im, fraction=0.046, pad=0.04) + + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.05) + plt.colorbar(im, cax=cax) + + # fig.colorbar(im, shrink =.82) + # cba = fig.colorbar(im) + + ax.set_xlabel(r"$q_r$", fontsize=18) + ax.set_ylabel(r"$q_z$", fontsize=18) + + zticks, zticks_label = get_qz_tick_label(qz, label_array_qz) + # rticks,rticks_label = get_qr_tick_label(label_array_qr,inc_x0) + try: + rticks, rticks_label = zip(*np.sort(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) + except: + rticks, rticks_label = zip(*sorted(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) + # stride = int(len(zticks)/10) + + stride = 1 + ax.set_yticks(zticks[::stride]) + yticks = zticks_label[::stride] + ax.set_yticklabels(yticks, fontsize=7) + + # stride = int(len(rticks)/10) + stride = 1 + ax.set_xticks(rticks[::stride]) + xticks = rticks_label[::stride] + ax.set_xticklabels(xticks, fontsize=7) + + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + + ax.set_title("%s_Qr_Qz_Map" % uid, y=1.03, fontsize=18) + + save = False + if "save" in kwargs: + save = kwargs["save"] + + if save: + path = kwargs["path"] + fp = path + "%s_Qr_Qz_Map" % (uid) + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() + + return zticks, zticks_label, rticks, rticks_label + + +def show_qzr_roi( + data, + rois, + inc_x0, + ticks, + alpha=0.3, + vmin=0.01, + vmax=30.0, + uid="uid", + path="", + save=False, + return_fig=False, + *argv, + **kwargs +): + """ + Dec 16, 2015, Y.G.@CHX + plot a qzr map of a gisaxs image with rois( a label array) + + Parameters: + data: 2-D array, a gisaxs image + rois: 2-D array, a label array + inc_x0: the incident beam center x + ticks: zticks, zticks_label, rticks, rticks_label = ticks + zticks: list, z-tick positions in unit of pixel + zticks_label: list, z-tick positions in unit of real space + rticks: list, r-tick positions in unit of pixel + rticks_label: list, r-tick positions in unit of real space + + Options: + alpha: transparency of the label array on top of data + + Return: + a plot of a qzr map of a gisaxs image with rois( a label array) + + + Examples: + show_qzr_roi( avg_imgr, box_maskr, inc_x0, ticks) + + """ + zticks, zticks_label, rticks, rticks_label = ticks + avg_imgr, box_maskr = data, rois + num_qzr = len(np.unique(box_maskr)) - 1 + + # fig, ax = plt.subplots(figsize=(8,12)) + fig, ax = plt.subplots(figsize=(8, 8)) + + ax.set_title("%s_ROI--Labeled Array on Data" % uid) + im, im_label = show_label_array_on_image( + ax, + avg_imgr, + box_maskr, + imshow_cmap="viridis", + cmap="Paired", + alpha=alpha, + vmin=vmin, + vmax=vmax, + origin="lower", + ) + + for i in range(1, num_qzr + 1): + ind = np.where(box_maskr == i)[1] + indz = np.where(box_maskr == i)[0] + c = "%i" % i + y_val = int(indz.mean()) + + # print (ind[0], ind[-1], inc_x0 ) + M, m = max(ind), min(ind) + + # if ind[0] < inc_x0 and ind[-1]>inc_x0: + if m < inc_x0 and M > inc_x0: + x_val1 = int((ind[np.where(ind < inc_x0)[0]]).mean()) + x_val2 = int((ind[np.where(ind > inc_x0)[0]]).mean()) + ax.text(x_val1, y_val, c, va="center", ha="center") + ax.text(x_val2, y_val, c, va="center", ha="center") + + else: + x_val = int(ind.mean()) + # print (xval, y) + ax.text(x_val, y_val, c, va="center", ha="center") + + # print (x_val1,x_val2) + + # stride = int(len(zticks)/3) + stride = 1 + ax.set_yticks(zticks[::stride]) + yticks = zticks_label[::stride] + ax.set_yticklabels(yticks, fontsize=9) + + # stride = int(len(rticks)/3) + stride = 1 + ax.set_xticks(rticks[::stride]) + xticks = rticks_label[::stride] + ax.set_xticklabels(xticks, fontsize=9) + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.05) + plt.colorbar(im, cax=cax) + + ax.set_xlabel(r"$q_r$", fontsize=22) + ax.set_ylabel(r"$q_z$", fontsize=22) + + fp = path + "%s_ROI_on_Image" % (uid) + ".png" + if save: + fig.savefig(fp, dpi=fig.dpi) + if return_fig: + return fig, ax + + +# plot g2 results + + +def plot_gisaxs_g2(g2, taus, res_pargs=None, one_plot=False, *argv, **kwargs): + """Dec 16, 2015, Y.G.@CHX + plot g2 results, + g2: one-time correlation function + taus: the time delays + res_pargs, a dict, can contains + uid/path/qr_center/qz_center/ + one_plot: if True, show all qz in one plot + kwargs: can contains + vlim: [vmin,vmax]: for the plot limit of y, the y-limit will be [vmin * min(y), vmx*max(y)] + ylim/xlim: the limit of y and x + + e.g. + plot_gisaxs_g2( g2b, taus= np.arange( g2b.shape[0]) *timeperframe, q_ring_center = q_ring_center, vlim=[.99, 1.01] ) + """ + + if res_pargs is not None: + uid = res_pargs["uid"] + path = res_pargs["path"] + qz_center = res_pargs["qz_center"] + num_qz = len(qz_center) + qr_center = res_pargs["qr_center"] + num_qr = len(qr_center) + + else: + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + if "qz_center" in kwargs.keys(): + qz_center = kwargs["qz_center"] + num_qz = len(qz_center) + else: + print("Please give qz_center") + if "qr_center" in kwargs.keys(): + qr_center = kwargs["qr_center"] + num_qr = len(qr_center) + else: + print("Please give qr_center") + + if not one_plot: + for qz_ind in range(num_qz): + fig = plt.figure(figsize=(10, 12)) + # fig = plt.figure() + title_qz = " Qz= %.5f " % (qz_center[qz_ind]) + r"$\AA^{-1}$" + plt.title("uid= %s:--->" % uid + title_qz, fontsize=20, y=1.1) + # print (qz_ind,title_qz) + if num_qz != 1: + if num_qr != 1: + plt.axis("off") + sx = int(round(np.sqrt(num_qr))) + if num_qr % sx == 0: + sy = int(num_qr / sx) + else: + sy = int(num_qr / sx + 1) + for sn in range(num_qr): + ax = fig.add_subplot(sx, sy, sn + 1) + ax.set_ylabel("g2") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + + title_qr = " Qr= " + "%.5f " % (qr_center[sn]) + r"$\AA^{-1}$" + if num_qz == 1: + title = "uid= %s:--->" % uid + title_qz + "__" + title_qr + else: + title = title_qr + ax.set_title(title) + + y = g2[:, sn + qz_ind * num_qr] + ax.semilogx(taus, y, "-o", markersize=6) + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + + fp = path + "uid=%s--g2-qz=%s" % (uid, qz_center[qz_ind]) + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + # plt.show() + + else: + if num_qz == 1: + if num_qr == 1: + fig = plt.figure(figsize=(8, 8)) + else: + fig = plt.figure(figsize=(10, 12)) + else: + fig = plt.figure(figsize=(10, 12)) + + plt.title("uid= %s" % uid, fontsize=20, y=1.05) + if num_qz != 1: + if num_qr != 1: + plt.axis("off") + if num_qz == 1: + if num_qr != 1: + plt.axis("off") + + sx = int(round(np.sqrt(num_qr))) + if num_qr % sx == 0: + sy = int(num_qr / sx) + else: + sy = int(num_qr / sx + 1) + + for sn in range(num_qr): + ax = fig.add_subplot(sx, sy, sn + 1) + ax.set_ylabel("g2") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + + # title_qr = " Qr= " + '%.5f '%( qr_center[sn]) + r'$\AA^{-1}$' + title_qr = " Qr= " + "%.5s " % (qr_center[sn]) + r"$\AA^{-1}$" + title = title_qr + ax.set_title(title) + + for qz_ind in range(num_qz): + y = g2[:, sn + qz_ind * num_qr] + if sn == 0: + title_qz = " Qz= %.5f " % (qz_center[qz_ind]) + r"$\AA^{-1}$" + ax.semilogx(taus, y, "-o", markersize=6, label=title_qz) + else: + ax.semilogx(taus, y, "-o", markersize=6, label="") + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + if sn == 0: + ax.legend(loc="best", fontsize=6) + fp = path + "uid=%s--g2" % (uid) + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + # plt.show() + + +# plot g2 results + + +def plot_gisaxs_two_g2(g2, taus, g2b, tausb, res_pargs=None, one_plot=False, *argv, **kwargs): + """Dec 16, 2015, Y.G.@CHX + plot g2 results, + g2: one-time correlation function from a multi-tau method + g2b: another g2 from a two-time method + taus: the time delays + kwargs: can contains + vlim: [vmin,vmax]: for the plot limit of y, the y-limit will be [vmin * min(y), vmx*max(y)] + ylim/xlim: the limit of y and x + + e.g. + plot_saxs_g2( g2b, taus= np.arange( g2b.shape[0]) *timeperframe, q_ring_center = q_ring_center, vlim=[.99, 1.01] ) + + """ + + if res_pargs is not None: + uid = res_pargs["uid"] + path = res_pargs["path"] + qz_center = res_pargs["qz_center"] + num_qz = len(qz_center) + qr_center = res_pargs["qr_center"] + num_qr = len(qr_center) + + else: + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + if "qz_center" in kwargs.keys(): + qz_center = kwargs["qz_center"] + num_qz = len(qz_center) + else: + print("Please give qz_center") + if "qr_center" in kwargs.keys(): + qr_center = kwargs["qr_center"] + num_qr = len(qr_center) + else: + print("Please give qr_center") + + if not one_plot: + for qz_ind in range(num_qz): + fig = plt.figure(figsize=(12, 10)) + # fig = plt.figure() + title_qz = " Qz= %.5f " % (qz_center[qz_ind]) + r"$\AA^{-1}$" + plt.title("uid= %s:--->" % uid + title_qz, fontsize=20, y=1.1) + # print (qz_ind,title_qz) + if num_qz != 1: + plt.axis("off") + sx = int(round(np.sqrt(num_qr))) + if num_qr % sx == 0: + sy = int(num_qr / sx) + else: + sy = int(num_qr / sx + 1) + for sn in range(num_qr): + ax = fig.add_subplot(sx, sy, sn + 1) + ax.set_ylabel("g2") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + + title_qr = " Qr= " + "%.5f " % (qr_center[sn]) + r"$\AA^{-1}$" + if num_qz == 1: + title = "uid= %s:--->" % uid + title_qz + "__" + title_qr + else: + title = title_qr + ax.set_title(title) + + y = g2b[:, sn + qz_ind * num_qr] + ax.semilogx(tausb, y, "--r", markersize=6, label="by-two-time") + + # y2=g2[:, sn] + y2 = g2[:, sn + qz_ind * num_qr] + ax.semilogx(taus, y2, "o", markersize=6, label="by-multi-tau") + + if sn + qz_ind * num_qr == 0: + ax.legend(loc="best") + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + + fp = path + "uid=%s--two-g2-qz=%s" % (uid, qz_center[qz_ind]) + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + # plt.show() + + else: + fig = plt.figure(figsize=(12, 10)) + plt.title("uid= %s" % uid, fontsize=20, y=1.05) + if num_qz != 1: + if num_qr != 1: + plt.axis("off") + if num_qz == 1: + if num_qr != 1: + plt.axis("off") + + sx = int(round(np.sqrt(num_qr))) + if num_qr % sx == 0: + sy = int(num_qr / sx) + else: + sy = int(num_qr / sx + 1) + + for sn in range(num_qr): + ax = fig.add_subplot(sx, sy, sn + 1) + ax.set_ylabel("g2") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + + title_qr = " Qr= " + "%.5s " % (qr_center[sn]) + r"$\AA^{-1}$" + # title_qr = " Qr= " + '%.5f '%( qr_center[sn]) + r'$\AA^{-1}$' + title = title_qr + ax.set_title(title) + + for qz_ind in range(num_qz): + y = g2b[:, sn + qz_ind * num_qr] + y2 = g2[:, sn + qz_ind * num_qr] + title_qz = " Qz= %.5f " % (qz_center[qz_ind]) + r"$\AA^{-1}$" + label1 = "" + label2 = "" + + if sn == 0: + label2 = title_qz + + elif sn == 1: + if qz_ind == 0: + label1 = "by-two-time" + label2 = "by-multi-tau" + + ax.semilogx(tausb, y, "-r", markersize=6, linewidth=4, label=label1) + ax.semilogx(taus, y2, "o", markersize=6, label=label2) + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + if (sn == 0) or (sn == 1): + ax.legend(loc="best", fontsize=6) + + fp = path + "uid=%s--g2--two-g2-" % uid + ".png" + + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + # plt.show() + + +def save_gisaxs_g2(g2, res_pargs, time_label=False, taus=None, filename=None, *argv, **kwargs): + """ + Aug 8, 2016, Y.G.@CHX + save g2 results, + res_pargs should contain + g2: one-time correlation function + res_pargs: contions taus, q_ring_center values + path: + uid: + """ + + if taus is None: + taus = res_pargs["taus"] + + try: + qz_center = res_pargs["qz_center"] + qr_center = res_pargs["qr_center"] + except: + roi_label = res_pargs["roi_label"] + + path = res_pargs["path"] + uid = res_pargs["uid"] + + df = DataFrame(np.hstack([(taus).reshape(len(g2), 1), g2])) + columns = [] + columns.append("tau") + + try: + for qz in qz_center: + for qr in qr_center: + columns.append([str(qz), str(qr)]) + except: + columns.append([v for (k, v) in roi_label.items()]) + + df.columns = columns + + if filename is None: + if time_label: + dt = datetime.now() + CurTime = "%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) + filename = os.path.join(path, "g2-%s-%s.csv" % (uid, CurTime)) + else: + filename = os.path.join(path, "uid=%s--g2.csv" % (uid)) + else: + filename = os.path.join(path, filename) + df.to_csv(filename) + print("The correlation function of uid= %s is saved with filename as %s" % (uid, filename)) + + +def stretched_auto_corr_scat_factor(x, beta, relaxation_rate, alpha=1.0, baseline=1): + return beta * (np.exp(-2 * relaxation_rate * x)) ** alpha + baseline + + +def simple_exponential(x, beta, relaxation_rate, baseline=1): + return beta * np.exp(-2 * relaxation_rate * x) + baseline + + +def fit_gisaxs_g2(g2, res_pargs, function="simple_exponential", one_plot=False, *argv, **kwargs): + """ + July 20,2016, Y.G.@CHX + Fit one-time correlation function + + The support functions include simple exponential and stretched/compressed exponential + Parameters + ---------- + g2: one-time correlation function for fit, with shape as [taus, qs] + res_pargs: a dict, contains keys + taus: the time delay, with the same length as g2 + q_ring_center: the center of q rings, for the title of each sub-plot + uid: unique id, for the title of plot + kwargs: + variables: if exist, should be a dict, like + { 'lags': True, #always True + 'beta', Ture, # usually True + 'relaxation_rate': False, #always False + 'alpha':False, #False for simple exponential, True for stretched/compressed + 'baseline': True #sometimes be False, keep as 1 + } + + function: + 'simple_exponential': fit by a simple exponential function, defined as + beta * np.exp(-2 * relaxation_rate * lags) + baseline + 'streched_exponential': fit by a streched exponential function, defined as + beta * (np.exp(-2 * relaxation_rate * lags))**alpha + baseline + + Returns + ------- + fit resutls: + a dict, with keys as + 'baseline': + 'beta': + 'relaxation_rate': + an example: + result = fit_g2( g2, res_pargs, function = 'simple') + result = fit_g2( g2, res_pargs, function = 'stretched') + + TO DO: + add variables to options + """ + + taus = res_pargs["taus"] + qz_center = res_pargs["qz_center"] + num_qz = len(qz_center) + qr_center = res_pargs["qr_center"] + num_qr = len(qr_center) + uid = res_pargs["uid"] + path = res_pargs["path"] + # uid=res_pargs['uid'] + + num_rings = g2.shape[1] + beta = np.zeros(num_rings) # contrast factor + rate = np.zeros(num_rings) # relaxation rate + alpha = np.zeros(num_rings) # alpha + baseline = np.zeros(num_rings) # baseline + + if function == "simple_exponential" or function == "simple": + _vars = np.unique(_vars + ["alpha"]) + mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= list( _vars) ) + + elif function == "stretched_exponential" or function == "stretched": + mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= _vars) + + else: + print( + "The %s is not supported.The supported functions include simple_exponential and stretched_exponential" + % function + ) + + # mod.set_param_hint( 'beta', value = 0.05 ) + # mod.set_param_hint( 'alpha', value = 1.0 ) + # mod.set_param_hint( 'relaxation_rate', value = 0.005 ) + # mod.set_param_hint( 'baseline', value = 1.0, min=0.5, max= 1.5 ) + mod.set_param_hint("baseline", min=0.5, max=2.5) + mod.set_param_hint("beta", min=0.0) + mod.set_param_hint("alpha", min=0.0) + mod.set_param_hint("relaxation_rate", min=0.0) + + if "fit_variables" in kwargs: + additional_var = kwargs["fit_variables"] + # print ( additional_var ) + _vars = [k for k in list(additional_var.keys()) if additional_var[k] is False] + else: + _vars = [] + + if "guess_values" in kwargs: + if "beta" in list(kwargs["guess_values"].keys()): + beta_ = kwargs["guess_values"]["beta"] + else: + beta_ = 0.05 + + if "alpha" in list(kwargs["guess_values"].keys()): + alpha_ = kwargs["guess_values"]["alpha"] + else: + alpha_ = 1.0 + if "relaxation_rate" in list(kwargs["guess_values"].keys()): + relaxation_rate_ = kwargs["guess_values"]["relaxation_rate"] + else: + relaxation_rate_ = 0.005 + if "baseline" in list(kwargs["guess_values"].keys()): + baseline_ = kwargs["guess_values"]["baseline"] + else: + baseline_ = 1.0 + pars = mod.make_params(beta=beta_, alpha=alpha_, relaxation_rate=relaxation_rate_, baseline=baseline_) + else: + pars = mod.make_params(beta=0.05, alpha=1.0, relaxation_rate=0.005, baseline=1.0) + + for v in _vars: + pars["%s" % v].vary = False + # print ( pars['%s'%v], pars['%s'%v].vary ) + result = {} + + if not one_plot: + for qz_ind in range(num_qz): + # fig = plt.figure(figsize=(10, 12)) + fig = plt.figure(figsize=(12, 10)) + # fig = plt.figure() + title_qz = " Qz= %.5f " % (qz_center[qz_ind]) + r"$\AA^{-1}$" + plt.title("uid= %s:--->" % uid + title_qz, fontsize=20, y=1.1) + # print (qz_ind,title_qz) + if num_qz != 1: + plt.axis("off") + sx = int(round(np.sqrt(num_qr))) + if num_qr % sx == 0: + sy = int(num_qr / sx) + else: + sy = int(num_qr / sx + 1) + for sn in range(num_qr): + ax = fig.add_subplot(sx, sy, sn + 1) + ax.set_ylabel("g2") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + + title_qr = " Qr= " + "%.5f " % (qr_center[sn]) + r"$\AA^{-1}$" + if num_qz == 1: + title = "uid= %s:--->" % uid + title_qz + "__" + title_qr + else: + title = title_qr + ax.set_title(title) + + i = sn + qz_ind * num_qr + y = g2[1:, i] + + result1 = mod.fit(y, pars, x=taus[1:]) + + # print ( result1.best_values) + rate[i] = result1.best_values["relaxation_rate"] + # rate[i] = 1e-16 + beta[i] = result1.best_values["beta"] + + # baseline[i] = 1.0 + baseline[i] = result1.best_values["baseline"] + + if function == "simple_exponential" or function == "simple": + alpha[i] = 1.0 + elif function == "stretched_exponential" or function == "stretched": + alpha[i] = result1.best_values["alpha"] + + ax.semilogx(taus[1:], y, "bo") + ax.semilogx(taus[1:], result1.best_fit, "-r") + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + + txts = r"$\tau$" + r"$ = %.3f$" % (1 / rate[i]) + r"$ s$" + ax.text(x=0.02, y=0.55 + 0.3, s=txts, fontsize=14, transform=ax.transAxes) + txts = r"$\alpha$" + r"$ = %.3f$" % (alpha[i]) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=0.02, y=0.45 + 0.3, s=txts, fontsize=14, transform=ax.transAxes) + txts = r"$baseline$" + r"$ = %.3f$" % (baseline[i]) + ax.text(x=0.02, y=0.35 + 0.3, s=txts, fontsize=14, transform=ax.transAxes) + + result = dict(beta=beta, rate=rate, alpha=alpha, baseline=baseline) + fp = path + "uid=%s--g2-qz=%s--fit" % (uid, qz_center[qz_ind]) + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + # plt.show() + + else: + # fig = plt.figure(figsize=(10, 12)) + # fig = plt.figure(figsize=(12, 10)) + if num_qz == 1: + if num_qr == 1: + fig = plt.figure(figsize=(8, 8)) + else: + fig = plt.figure(figsize=(10, 12)) + else: + fig = plt.figure(figsize=(10, 12)) + + plt.title("uid= %s" % uid, fontsize=20, y=1.05) + if num_qz != 1: + if num_qr != 1: + plt.axis("off") + + sx = int(round(np.sqrt(num_qr))) + if num_qr % sx == 0: + sy = int(num_qr / sx) + else: + sy = int(num_qr / sx + 1) + + for sn in range(num_qr): + ax = fig.add_subplot(sx, sy, sn + 1) + ax.set_ylabel("g2") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + + # title_qr = " Qr= " + '%.5f '%( qr_center[sn]) + r'$\AA^{-1}$' + title_qr = " Qr= " + "%.5s " % (qr_center[sn]) + r"$\AA^{-1}$" + title = title_qr + ax.set_title(title) + + for qz_ind in range(num_qz): + i = sn + qz_ind * num_qr + y = g2[1:, i] + result1 = mod.fit(y, pars, x=taus[1:]) + # print ( result1.best_values) + rate[i] = result1.best_values["relaxation_rate"] + # rate[i] = 1e-16 + beta[i] = result1.best_values["beta"] + # baseline[i] = 1.0 + baseline[i] = result1.best_values["baseline"] + + if function == "simple_exponential" or function == "simple": + alpha[i] = 1.0 + elif function == "stretched_exponential" or function == "stretched": + alpha[i] = result1.best_values["alpha"] + + if sn == 0: + title_qz = " Qz= %.5f " % (qz_center[qz_ind]) + r"$\AA^{-1}$" + ax.semilogx(taus[1:], y, "o", markersize=6, label=title_qz) + else: + ax.semilogx(taus[1:], y, "o", markersize=6, label="") + + ax.semilogx(taus[1:], result1.best_fit, "-r") + + # print( result1.best_values['relaxation_rate'], result1.best_values['beta'] ) + + txts = r"$q_z$" + r"$_%s$" % qz_ind + r"$\tau$" + r"$ = %.3f$" % (1 / rate[i]) + r"$ s$" + ax.text(x=0.02, y=0.55 + 0.3 - 0.1 * qz_ind, s=txts, fontsize=14, transform=ax.transAxes) + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + if sn == 0: + ax.legend(loc="best", fontsize=6) + + result = dict(beta=beta, rate=rate, alpha=alpha, baseline=baseline) + fp = path + "uid=%s--g2--fit-" % (uid) + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + # plt.show() + + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + # fp = path + 'g2--uid=%s-qz=%s-fit'%(uid,qz_center[qz_ind]) + CurTime + '.png' + # fig.savefig( fp, dpi=fig.dpi) + + # result = dict( beta=beta, rate=rate, alpha=alpha, baseline=baseline ) + # fp = path + 'uid=%s--g2--fit-'%(uid) + '.png' + # fig.savefig( fp, dpi=fig.dpi) + # fig.tight_layout() + # plt.show() + + return result + + +# GiSAXS End +############################### + + +def get_each_box_mean_intensity(data_series, box_mask, sampling, timeperframe, plot_=True, *argv, **kwargs): + """Dec 16, 2015, Y.G.@CHX + get each box (ROI) mean intensity as a function of time + + + """ + + mean_int_sets, index_list = roi.mean_intensity(np.array(data_series[::sampling]), box_mask) + try: + N = len(data_series) + except: + N = data_series.length + times = np.arange(N) * timeperframe # get the time for each frame + num_rings = len(np.unique(box_mask)[1:]) + if plot_: + fig, ax = plt.subplots(figsize=(8, 8)) + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + + ax.set_title("uid= %s--Mean intensity of each box" % uid) + for i in range(num_rings): + ax.plot(times[::sampling], mean_int_sets[:, i], label="Box " + str(i + 1), marker="o", ls="-") + ax.set_xlabel("Time") + ax.set_ylabel("Mean Intensity") + ax.legend() + + # fp = path + 'uid=%s--Mean intensity of each box-'%(uid) + '.png' + if "path" not in kwargs.keys(): + path = "" + else: + path = kwargs["path"] + fp = path + "uid=%s--Mean-intensity-of-each-ROI-" % (uid) + ".png" + fig.savefig(fp, dpi=fig.dpi) + + # plt.show() + return times, mean_int_sets + + +def power_func(x, D0, power=2): + return D0 * x**power + + +def fit_qr_qz_rate(qr, qz, rate, plot_=True, *argv, **kwargs): + """ + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + """ + power_variable = False + x = qr + if "fit_range" in kwargs.keys(): + fit_range = kwargs["fit_range"] + else: + fit_range = None + + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + + if fit_range is not None: + y = rate[fit_range[0] : fit_range[1]] + x = q[fit_range[0] : fit_range[1]] + + mod = Model(power_func) + # mod.set_param_hint( 'power', min=0.5, max= 10 ) + # mod.set_param_hint( 'D0', min=0 ) + pars = mod.make_params(power=2, D0=1 * 10 ^ (-5)) + if power_variable: + pars["power"].vary = True + else: + pars["power"].vary = False + + Nqr = len(qr) + Nqz = len(qz) + D0 = np.zeros(Nqz) + power = 2 # np.zeros( Nqz ) + + res = [] + for i, qz_ in enumerate(qz): + try: + y = np.array(rate["rate"][i * Nqr : (i + 1) * Nqr]) + except: + y = np.array(rate[i * Nqr : (i + 1) * Nqr]) + + # print( len(x), len(y) ) + _result = mod.fit(y, pars, x=x) + res.append(_result) + D0[i] = _result.best_values["D0"] + # power[i] = _result.best_values['power'] + print("The fitted diffusion coefficient D0 is: %.3e A^2S-1" % D0[i]) + + if plot_: + fig, ax = plt.subplots() + plt.title("Q%s-Rate--uid= %s_Fit" % (power, uid), fontsize=20, y=1.06) + for i, qz_ in enumerate(qz): + ax.plot(x**power, y, marker="o", label=r"$q_z=%.5f$" % qz_) + ax.plot(x**power, res[i].best_fit, "-r") + txts = r"$D0: %.3e$" % D0[i] + r" $A^2$" + r"$s^{-1}$" + dy = 0.1 + ax.text(x=0.15, y=0.65 - dy * i, s=txts, fontsize=14, transform=ax.transAxes) + legend = ax.legend(loc="best") + + ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$)") + ax.set_xlabel("$q^%s$" r"($\AA^{-2}$)" % power) + + dt = datetime.now() + CurTime = "%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) + # fp = path + 'Q%s-Rate--uid=%s'%(power,uid) + CurTime + '--Fit.png' + fp = path + "uid=%s--Q-Rate" % (uid) + "--fit-.png" + fig.savefig(fp, dpi=fig.dpi) + + fig.tight_layout() + # plt.show() + + return D0 + + +# plot g4 results + + +def plot_gisaxs_g4(g4, taus, res_pargs=None, one_plot=False, *argv, **kwargs): + """Dec 16, 2015, Y.G.@CHX + plot g4 results, + g4: four-time correlation function + taus: the time delays + res_pargs, a dict, can contains + uid/path/qr_center/qz_center/ + kwargs: can contains + vlim: [vmin,vmax]: for the plot limit of y, the y-limit will be [vmin * min(y), vmx*max(y)] + ylim/xlim: the limit of y and x + + e.g. + plot_gisaxs_g4( g4, taus= np.arange( g2b.shape[0]) *timeperframe, q_ring_center = q_ring_center, vlim=[.99, 1.01] ) + + """ + + if res_pargs is not None: + uid = res_pargs["uid"] + path = res_pargs["path"] + qz_center = res_pargs["qz_center"] + num_qz = len(qz_center) + qr_center = res_pargs["qr_center"] + num_qr = len(qr_center) + + else: + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + if "qz_center" in kwargs.keys(): + qz_center = kwargs["qz_center"] + num_qz = len(qz_center) + else: + print("Please give qz_center") + if "qr_center" in kwargs.keys(): + qr_center = kwargs["qr_center"] + num_qr = len(qr_center) + else: + print("Please give qr_center") + + if not one_plot: + for qz_ind in range(num_qz): + fig = plt.figure(figsize=(12, 10)) + # fig = plt.figure() + title_qz = " Qz= %.5f " % (qz_center[qz_ind]) + r"$\AA^{-1}$" + plt.title("uid= %s:--->" % uid + title_qz, fontsize=20, y=1.1) + # print (qz_ind,title_qz) + if num_qz != 1: + if num_qr != 1: + plt.axis("off") + sx = int(round(np.sqrt(num_qr))) + if num_qr % sx == 0: + sy = int(num_qr / sx) + else: + sy = int(num_qr / sx + 1) + for sn in range(num_qr): + ax = fig.add_subplot(sx, sy, sn + 1) + ax.set_ylabel("g4") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + + title_qr = " Qr= " + "%.5f " % (qr_center[sn]) + r"$\AA^{-1}$" + if num_qz == 1: + title = "uid= %s:--->" % uid + title_qz + "__" + title_qr + else: + title = title_qr + ax.set_title(title) + + y = g4[:, sn + qz_ind * num_qr] + ax.semilogx(taus, y, "-o", markersize=6) + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + + fp = path + "uid=%s--g4-qz=%s" % (uid, qz_center[qz_ind]) + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + # plt.show() + + else: + fig = plt.figure(figsize=(12, 10)) + plt.title("uid= %s" % uid, fontsize=20, y=1.05) + if num_qz != 1: + if num_qr != 1: + plt.axis("off") + + sx = int(round(np.sqrt(num_qr))) + if num_qr % sx == 0: + sy = int(num_qr / sx) + else: + sy = int(num_qr / sx + 1) + + for sn in range(num_qr): + ax = fig.add_subplot(sx, sy, sn + 1) + ax.set_ylabel("g4") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + + title_qr = " Qr= " + "%.5f " % (qr_center[sn]) + r"$\AA^{-1}$" + title = title_qr + ax.set_title(title) + + for qz_ind in range(num_qz): + y = g4[:, sn + qz_ind * num_qr] + if sn == 0: + title_qz = " Qz= %.5f " % (qz_center[qz_ind]) + r"$\AA^{-1}$" + ax.semilogx(taus, y, "-o", markersize=6, label=title_qz) + else: + ax.semilogx(taus, y, "-o", markersize=6, label="") + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + if sn == 0: + ax.legend(loc="best", fontsize=6) + fp = path + "uid=%s--g4-" % (uid) + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + # plt.show() + + +def multi_uids_gisaxs_xpcs_analysis( + uids, + md, + run_num=1, + sub_num=None, + good_start=10, + good_end=None, + force_compress=False, + fit=True, + compress=True, + para_run=False, +): + """'Sep 16, 2016, YG@CHX-NSLS2 + Do SAXS-XPCS analysis for multi uid data + uids: a list of uids to be analyzed + md: metadata, should at least include + mask: array, mask data + data_dir: the path to save data, the result will be saved in data_dir/uid/... + dpix: + Ldet: + lambda: + timeperframe: + center + run_num: the run number + sub_num: the number in each sub-run + fit: if fit, do fit for g2 and show/save all fit plots + compress: apply a compress algorithm + + Save g2/metadata/g2-fit plot/g2 q-rate plot/ of each uid in data_dir/uid/... + return: + g2s: a dictionary, {run_num: sub_num: g2_of_each_uid} + taus, + use_uids: return the valid uids + """ + + g2s = {} # g2s[run_number][sub_seq] = g2 of each uid + lag_steps = [0] + useful_uids = {} + if sub_num is None: + sub_num = len(uids) // run_num + + mask = md["mask"] + maskr = mask[::-1, :] + data_dir = md["data_dir"] + box_maskr = md["ring_mask"] + qz_center = md["qz_center"] + qr_center = md["qr_center"] + + for run_seq in range(run_num): + g2s[run_seq + 1] = {} + useful_uids[run_seq + 1] = {} + i = 0 + for sub_seq in range(0, sub_num): + uid = uids[sub_seq + run_seq * sub_num] + print("The %i--th uid to be analyzed is : %s" % (i, uid)) + try: + detector = get_detector(db[uid]) + imgs = load_data(uid, detector) + except: + print("The %i--th uid: %s can not load data" % (i, uid)) + imgs = 0 + + data_dir_ = os.path.join(data_dir, "%s/" % uid) + os.makedirs(data_dir_, exist_ok=True) + i += 1 + if imgs != 0: + Nimg = len(imgs) + md_ = imgs.md + useful_uids[run_seq + 1][i] = uid + + imgsr = reverse_updown(imgs) + imgsra = apply_mask(imgsr, maskr) + + if compress: + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % uid + maskr, avg_imgr, imgsum, bad_frame_list = compress_eigerdata( + imgsr, + maskr, + md_, + filename, + force_compress=force_compress, + bad_pixel_threshold=5e9, + nobytes=4, + para_compress=True, + num_sub=100, + ) + + try: + md["Measurement"] = db[uid]["start"]["Measurement"] + # md['sample']=db[uid]['start']['sample'] + # print( md['Measurement'] ) + except: + md["Measurement"] = "Measurement" + md["sample"] = "sample" + + dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm + lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms + Ldet = md["detector_distance"] + # detector to sample distance (mm), currently, *1000 for saxs, *1 for gisaxs + exposuretime = md["count_time"] + acquisition_period = md["frame_time"] + timeperframe = acquisition_period # for g2 + # timeperframe = exposuretime#for visiblitly + # timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata.... + setup_pargs = dict( + uid=uid, dpix=dpix, Ldet=Ldet, lambda_=lambda_, timeperframe=timeperframe, path=data_dir + ) + md["avg_img"] = avg_imgr + + min_inten = 0 + # good_start = np.where( np.array(imgsum) > min_inten )[0][0] + # good_start = 0 + # good_start = max(good_start, np.where( np.array(imgsum) > min_inten )[0][0] ) + + good_start = good_start + if good_end is None: + good_end_ = len(imgs) + else: + good_end_ = good_end + FD = Multifile(filename, good_start, good_end_) + good_start = max(good_start, np.where(np.array(imgsum) > min_inten)[0][0]) + print("With compression, the good_start frame number is: %s " % good_start) + print("The good_end frame number is: %s " % good_end_) + + if not para_run: + g2, lag_steps_ = cal_g2c( + FD, box_maskr, bad_frame_list, good_start, num_buf=8, imgsum=None, norm=None + ) + else: + g2, lag_steps_ = cal_g2p( + FD, box_maskr, bad_frame_list, good_start, num_buf=8, imgsum=None, norm=None + ) + + if len(lag_steps) < len(lag_steps_): + lag_steps = lag_steps_ + + else: + sampling = 1000 # sampling should be one + + # good_start = check_shutter_open( imgsra, min_inten=5, time_edge = [0,10], plot_ = False ) + good_start = 0 + good_series = apply_mask(imgsar[good_start:], maskr) + imgsum, bad_frame_list = get_each_frame_intensity( + good_series, sampling=sampling, bad_pixel_threshold=1.2e8, plot_=False, uid=uid + ) + bad_image_process = False + + if len(bad_frame_list): + bad_image_process = True + print(bad_image_process) + + g2, lag_steps_ = cal_g2( + good_series, box_maskr, bad_image_process, bad_frame_list, good_start, num_buf=8 + ) + if len(lag_steps) < len(lag_steps_): + lag_steps = lag_step_ + + taus_ = lag_steps_ * timeperframe + taus = lag_steps * timeperframe + res_pargs = dict(taus=taus_, qz_center=qz_center, qr_center=qr_center, path=data_dir_, uid=uid) + save_gisaxs_g2(g2, res_pargs) + # plot_gisaxs_g2( g2, taus, vlim=[0.95, 1.1], res_pargs=res_pargs, one_plot=True) + + if fit: + fit_result = fit_gisaxs_g2( + g2, + res_pargs, + function="stretched", + vlim=[0.95, 1.1], + fit_variables={"baseline": True, "beta": True, "alpha": False, "relaxation_rate": True}, + guess_values={"baseline": 1.229, "beta": 0.05, "alpha": 1.0, "relaxation_rate": 0.01}, + one_plot=True, + ) + + fit_qr_qz_rate(qr_center, qz_center, fit_result, power_variable=False, uid=uid, path=data_dir_) + + psave_obj(md, data_dir_ + "uid=%s-md" % uid) # save the setup parameters + + g2s[run_seq + 1][i] = g2 + + print("*" * 40) + print() + + return g2s, taus, useful_uids diff --git a/pyCHX/backups/pyCHX-backup/XPCS_SAXS.py b/pyCHX/backups/pyCHX-backup/XPCS_SAXS.py new file mode 100644 index 0000000..e910c8c --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/XPCS_SAXS.py @@ -0,0 +1,2773 @@ +""" +Dec 10, 2015 Developed by Y.G.@CHX +yuzhang@bnl.gov +This module is for the SAXS XPCS analysis +""" + +import os + +from pandas import DataFrame +from scipy.special import erf + +from pyCHX.chx_compress_analysis import ( + Multifile, + compress_eigerdata, + get_avg_imgc, + get_each_ring_mean_intensityc, + init_compress_eigerdata, + mean_intensityc, + read_compressed_eigerdata, +) +from pyCHX.chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq +from pyCHX.chx_correlationp import cal_g2p +from pyCHX.chx_generic_functions import * +from pyCHX.chx_libs import RUN_GUI, Figure, colors, colors_, colors_copy, markers, markers_, markers_copy + + +def get_iq_invariant(qt, iqst): + """Get integer( q**2 * iqst ) + iqst: shape should be time, q-length + qt: shape as q-length + return q**2 * iqst, shape will be time length + """ + return np.sum(iqst * qt**2, axis=1) + + +def plot_time_iq_invariant( + time_stamp, + invariant, + pargs, + save=True, +): + fig, ax = plt.subplots() + plot1D( + x=time_stamp, + y=invariant, + xlabel="time (s)", + ylabel="I(q)*Q^2", + title="I(q)*Q^2 ~ time", + m="o", + c="b", + ax=ax, + ) + if save: + path = pargs["path"] + uid = pargs["uid"] + + save_arrays( + np.vstack([time_stamp, np.array(invariant)]).T, + label=["time", "Invariant"], + filename="%s_iq_invariant.csv" % uid, + path=path, + ) + # fp = path + 'uid= %s--Iq~t-'%uid + CurTime + '.png' + fp = path + "%s_iq_invariant" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + +def plot_q2_iq( + qt, + iqst, + time_stamp, + pargs, + ylim=[-0.001, 0.01], + xlim=[0.007, 0.2], + legend_size=4, + save=True, +): + fig, ax = plt.subplots() + N = iqst.shape[0] + for i in range(N): + yi = iqst[i] * qt**2 + # time_labeli = 'time_%s s'%( round( time_edge[i][0] * timeperframe, 3) ) + time_labeli = "time_%s s" % (round(time_stamp[i], 4)) + plot1D( + x=qt, + y=yi, + legend=time_labeli, + xlabel="Q (A-1)", + ylabel="I(q)*Q^2", + title="I(q)*Q^2 ~ time", + m=markers[i], + c=colors[i], + ax=ax, + ylim=ylim, + xlim=xlim, + legend_size=legend_size, + ) + if save: + path = pargs["path"] + uid = pargs["uid"] + fp = path + "%s_q2_iq" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + +def recover_img_from_iq(qp, iq, center, mask): + """YG. develop at CHX, 2017 July 18, + Recover image a circular average + """ + norm = get_pixelist_interp_iq(qp, iq, np.ones_like(mask), center) + img_ = norm.reshape(mask.shape) * mask + return img_ + + +def get_cirucular_average_std(img, mask, setup_pargs, img_name="xx"): + """YG. develop at CHX, 2017 July 18, + Get the standard devation of tge circular average of img + image-->I(q)-->image_mean--> (image- image_mean)**2 --> I(q) --> std = sqrt(I(q)) + """ + qp, iq, q = get_circular_average(img, mask, pargs=setup_pargs, save=False) + center = setup_pargs["center"] + img_ = (img - recover_img_from_iq(qp, iq, center, mask)) ** 2 + qp_, iq_, q_ = get_circular_average(img_, mask, pargs=setup_pargs, save=False) + std = np.sqrt(iq_) + return qp, iq, q, std + + +def get_delta_img(img, mask, setup_pargs, img_name="xx", plot=False): + """YG. develop at CHX, 2017 July 18, + Get the difference between img and image recovered from the circular average of img""" + qp, iq, q = get_circular_average(img, mask, pargs=setup_pargs, save=False) + center = setup_pargs["center"] + img_ = recover_img_from_iq(qp, iq, center, mask) + delta = img - img_ * img.mean() / img_.mean() + if plot: + show_img(delta, logs=True, aspect=1, cmap=cmap_albula, vmin=1e-5, vmax=10**1, image_name=img_name) + return delta + + +def combine_ring_anglar_mask(ring_mask, ang_mask): + """combine ring and anglar mask""" + + ring_max = ring_mask.max() + ang_mask_ = np.zeros(ang_mask.shape) + ind = np.where(ang_mask != 0) + ang_mask_[ind] = ang_mask[ind] + 1e9 # add some large number to qr + dumy_ring_mask = np.zeros(ring_mask.shape) + dumy_ring_mask[ring_mask == 1] = 1 + dumy_ring_ang = dumy_ring_mask * ang_mask + real_ang_lab = np.int_(np.unique(dumy_ring_ang)[1:]) - 1 + ring_ang = ring_mask * ang_mask_ + # print( real_ang_lab ) + + ura = np.unique(ring_ang)[1:] + ur = np.unique(ring_mask)[1:] + ua = np.unique(ang_mask)[real_ang_lab] + # print( np.unique( ring_mask )[1:], np.unique( ang_mask )[1:], np.unique( ring_ang )[1:] ) + + ring_ang_ = np.zeros_like(ring_ang) + newl = np.arange(1, len(ura) + 1) + # newl = np.int_( real_ang_lab ) + # print( ura, ur, ua ) + # print( len(ura) ) + for i, label in enumerate(ura): + # print (i, label) + ring_ang_.ravel()[np.where(ring_ang.ravel() == label)[0]] = newl[i] + # print( np.unique( ring_ang_ ), len( np.unique( ring_ang_ ) ) ) + return np.int_(ring_ang_) + + +def get_seg_from_ring_mask(inner_angle, outer_angle, num_angles, width_angle, center, ring_mask, qr_center): + """YG. Jan 6, 2017 + A simple wrap function to get angle cut mask from ring_mask + Parameter: + inner_angle, outer_angle, num_angles, width_angle: to define the angle + center: beam center + ring_mask: two-d array + Return: + seg_mask: two-d array + + """ + widtha = (outer_angle - inner_angle) / (num_angles + 0.01) + ang_mask, ang_center, ang_edges = get_angular_mask( + ring_mask, + inner_angle=inner_angle, + outer_angle=outer_angle, + width=widtha, + num_angles=num_angles, + center=center, + flow_geometry=True, + ) + # print( np.unique( ang_mask)[1:] ) + seg_mask = combine_ring_anglar_mask(ring_mask, ang_mask) + qval_dict = get_qval_dict(qr_center=qr_center, qz_center=ang_center) + return seg_mask, qval_dict + + +def get_seg_dict_from_ring_mask(inner_angle, outer_angle, num_angles, width_angle, center, ring_mask, qr_center): + """YG. Jan 6, 2017 + A simple wrap function to get angle cut mask from ring_mask + Parameter: + inner_angle, outer_angle, num_angles, width_angle: to define the angle + center: beam center + ring_mask: two-d array + Return: + seg_mask: two-d array + + """ + widtha = (outer_angle - inner_angle) / (num_angles + 0.01) + ang_mask, ang_center, ang_edges = get_angular_mask( + np.ones_like(ring_mask), + inner_angle=inner_angle, + outer_angle=outer_angle, + width=widtha, + num_angles=num_angles, + center=center, + flow_geometry=True, + ) + # print( np.unique( ang_mask)[1:] ) + seg_mask, good_ind = combine_two_roi_mask(ring_mask, ang_mask) + qval_dict = get_qval_dict(qr_center=qr_center, qz_center=ang_center) + # print( np.unique( seg_mask)[1:], good_ind ) + # print( list( qval_dict.keys()), good_ind , len(good_ind) ) + qval_dict_ = {i: qval_dict[k] for (i, k) in enumerate(good_ind)} + return seg_mask, qval_dict_ + + +def combine_two_roi_mask(ring_mask, ang_mask, pixel_num_thres=10): + """combine two roi_mask into a new roi_mask + pixel_num_thres: integer, the low limit pixel number in each roi of the combined mask, + i.e., if the pixel number in one roi of the combined mask smaller than pixel_num_thres, + that roi will be considered as bad one and be removed. + e.g., ring_mask is a ring shaped mask, with unique index as (1,2) + ang_mask is a angular shaped mask, with unique index as (1,2,3,4) + the new mask will be ( 1,2,3,4 [for first ring]; + 5,6,7,8 [for second ring]; + ...) + + """ + rf = np.ravel(ring_mask) + af = np.ravel(ang_mask) + ruiq = np.unique(ring_mask) + auiq = np.unique(ang_mask) + maxa = np.max(auiq) + ring_mask_ = np.zeros_like(ring_mask) + new_mask_ = np.zeros_like(ring_mask) + new_mask_ = np.zeros_like(ring_mask) + for i, ind in enumerate(ruiq[1:]): + ring_mask_.ravel()[np.where(rf == ind)[0]] = maxa * i + + new_mask = (ring_mask_ + ang_mask) * np.array(ring_mask, dtype=bool) * np.array(ang_mask, dtype=bool) + + qind, pixelist = roi.extract_label_indices(new_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + # good_ind = np.unique( new_mask )[1:] + good_ind = np.where(nopr >= pixel_num_thres)[0] + 1 + # print( good_ind ) + l = len(good_ind) + + new_ind = np.arange(1, l + 1) + for i, gi in enumerate(good_ind): + new_mask_.ravel()[np.where(new_mask.ravel() == gi)[0]] = new_ind[i] + return new_mask_, good_ind - 1 + + +def refine_qval_dict(qval_dict, roi_mask, new_mask, pixel_num_thres=10): + """YG Dev@CHX 2019 May 29, refine qval_dict by applying a new mask + qval_dict corresponding to the roi_mask, now with the new mask, there are some roi might have zero (less than + pixel_num_thres number) pixel, so both the roi_mask and the qval_dict will be updated + """ + new_roi_mask = np.zeros_like(roi_mask) + roi_mask2 = roi_mask * new_mask + qind, pixelist = roi.extract_label_indices(roi_mask2) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + good_ind = np.where(nopr >= pixel_num_thres)[0] + 1 + l = len(good_ind) + new_ind = np.arange(1, l + 1) + for i, gi in enumerate(good_ind): + new_roi_mask.ravel()[np.where(roi_mask2.ravel() == gi)[0]] = new_ind[i] + qval_dict_ = {i: qval_dict[k - 1] for (i, k) in enumerate(good_ind)} + return new_roi_mask, qval_dict_ + + +def bin_1D(x, y, nx=None, min_x=None, max_x=None): + """ + Bin the values in y based on their x-coordinates + + Parameters + ---------- + x : array + position + y : array + intensity + nx : integer, optional + number of bins to use defaults to default bin value + min_x : float, optional + Left edge of first bin defaults to minimum value of x + max_x : float, optional + Right edge of last bin defaults to maximum value of x + + Returns + ------- + edges : array + edges of bins, length nx + 1 + + val : array + sum of values in each bin, length nx + + count : array + The number of counts in each bin, length nx + """ + + # handle default values + if min_x is None: + min_x = np.min(x) + if max_x is None: + max_x = np.max(x) + if nx is None: + nx = int(max_x - min_x) + + # print ( min_x, max_x, nx) + + # use a weighted histogram to get the bin sum + bins = np.linspace(start=min_x, stop=max_x, num=nx + 1, endpoint=True) + # print (x) + # print (bins) + val, _ = np.histogram(a=x, bins=bins, weights=y) + # use an un-weighted histogram to get the counts + count, _ = np.histogram(a=x, bins=bins) + # return the three arrays + return bins, val, count + + +def circular_average( + image, calibrated_center, threshold=0, nx=None, pixel_size=(1, 1), min_x=None, max_x=None, mask=None +): + """Circular average of the the image data + The circular average is also known as the radial integration + Parameters + ---------- + image : array + Image to compute the average as a function of radius + calibrated_center : tuple + The center of the image in pixel units + argument order should be (row, col) + threshold : int, optional + Ignore counts above `threshold` + default is zero + nx : int, optional + number of bins in x + defaults is 100 bins + pixel_size : tuple, optional + The size of a pixel (in a real unit, like mm). + argument order should be (pixel_height, pixel_width) + default is (1, 1) + min_x : float, optional number of pixels + Left edge of first bin defaults to minimum value of x + max_x : float, optional number of pixels + Right edge of last bin defaults to maximum value of x + Returns + ------- + bin_centers : array + The center of each bin in R. shape is (nx, ) + ring_averages : array + Radial average of the image. shape is (nx, ). + """ + radial_val = utils.radial_grid(calibrated_center, image.shape, pixel_size) + + if mask is not None: + # maks = np.ones_like( image ) + mask = np.array(mask, dtype=bool) + binr = radial_val[mask] + image_mask = np.array(image)[mask] + + else: + binr = np.ravel(radial_val) + image_mask = np.ravel(image) + + # if nx is None: #make a one-pixel width q + # nx = int( max_r - min_r) + # if min_x is None: + # min_x= int( np.min( binr)) + # min_x_= int( np.min( binr)/(np.sqrt(pixel_size[1]*pixel_size[0] ))) + # if max_x is None: + # max_x = int( np.max(binr )) + # max_x_ = int( np.max(binr)/(np.sqrt(pixel_size[1]*pixel_size[0] )) ) + # if nx is None: + # nx = max_x_ - min_x_ + + # binr_ = np.int_( binr /(np.sqrt(pixel_size[1]*pixel_size[0] )) ) + binr_ = binr / (np.sqrt(pixel_size[1] * pixel_size[0])) + # print ( min_x, max_x, min_x_, max_x_, nx) + bin_edges, sums, counts = bin_1D(binr_, image_mask, nx=nx, min_x=min_x, max_x=max_x) + + # print (len( bin_edges), len( counts) ) + th_mask = counts > threshold + + # print (len(th_mask) ) + ring_averages = sums[th_mask] / counts[th_mask] + + bin_centers = utils.bin_edges_to_centers(bin_edges)[th_mask] + + # print (len( bin_centers ) ) + + return bin_centers, ring_averages + + +def get_circular_average( + avg_img, + mask, + pargs, + show_pixel=True, + min_x=None, + max_x=None, + nx=None, + plot_=False, + save=False, + *argv, + **kwargs +): + """get a circular average of an image + Parameters + ---------- + + avg_img: 2D-array, the image + mask: 2D-array + pargs: a dict, should contains + center: the beam center in pixel + Ldet: sample to detector distance + lambda_: the wavelength + dpix, the pixel size in mm. For Eiger1m/4m, the size is 75 um (0.075 mm) + + nx : int, optional + number of bins in x + defaults is 1500 bins + + plot_: a boolen type, if True, plot the one-D curve + plot_qinpixel:a boolen type, if True, the x-axis of the one-D curve is q in pixel; else in real Q + + Returns + ------- + qp: q in pixel + iq: intensity of circular average + q: q in real unit (A-1) + + + """ + + center, Ldet, lambda_, dpix = pargs["center"], pargs["Ldet"], pargs["lambda_"], pargs["dpix"] + uid = pargs["uid"] + qp, iq = circular_average( + avg_img, center, threshold=0, nx=nx, pixel_size=(dpix, dpix), mask=mask, min_x=min_x, max_x=max_x + ) + qp_ = qp * dpix + # convert bin_centers from r [um] to two_theta and then to q [1/px] (reciprocal space) + two_theta = utils.radius_to_twotheta(Ldet, qp_) + q = utils.twotheta_to_q(two_theta, lambda_) + if plot_: + if show_pixel: + fig = plt.figure(figsize=(8, 6)) + ax1 = fig.add_subplot(111) + # ax2 = ax1.twiny() + ax1.semilogy(qp, iq, "-o") + # ax1.semilogy(q, iq , '-o') + + ax1.set_xlabel("q (pixel)") + # ax1.set_xlabel('q ('r'$\AA^{-1}$)') + # ax2.cla() + ax1.set_ylabel("I(q)") + title = ax1.set_title("uid= %s--Circular Average" % uid) + + else: + fig = plt.figure(figsize=(8, 6)) + ax1 = fig.add_subplot(111) + ax1.semilogy(q, iq, "-o") + ax1.set_xlabel("q (" r"$\AA^{-1}$)") + ax1.set_ylabel("I(q)") + title = ax1.set_title("uid= %s--Circular Average" % uid) + ax2 = None + if "xlim" in kwargs.keys(): + ax1.set_xlim(kwargs["xlim"]) + x1, x2 = kwargs["xlim"] + w = np.where((q >= x1) & (q <= x2))[0] + if "ylim" in kwargs.keys(): + ax1.set_ylim(kwargs["ylim"]) + + title.set_y(1.1) + fig.subplots_adjust(top=0.85) + path = pargs["path"] + fp = path + "%s_q_Iq" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + if save: + path = pargs["path"] + save_lists([q, iq], label=["q_A-1", "Iq"], filename="%s_q_Iq.csv" % uid, path=path) + return qp, iq, q + + +def plot_circular_average( + qp, iq, q, pargs, show_pixel=False, loglog=False, save=True, return_fig=False, *argv, **kwargs +): + if RUN_GUI: + fig = Figure() + ax1 = fig.add_subplot(111) + else: + fig, ax1 = plt.subplots() + + uid = pargs["uid"] + + if show_pixel: + if loglog: + ax1.loglog(qp, iq, "-o") + else: + ax1.semilogy(qp, iq, "-o") + ax1.set_xlabel("q (pixel)") + ax1.set_ylabel("I(q)") + title = ax1.set_title("%s_Circular Average" % uid) + else: + if loglog: + ax1.loglog(qp, iq, "-o") + else: + ax1.semilogy(q, iq, "-o") + ax1.set_xlabel("q (" r"$\AA^{-1}$)") + ax1.set_ylabel("I(q)") + title = ax1.set_title("%s_Circular Average" % uid) + ax2 = None + if "xlim" in kwargs.keys(): + xlim = kwargs["xlim"] + else: + xlim = [q.min(), q.max()] + if "ylim" in kwargs.keys(): + ylim = kwargs["ylim"] + else: + ylim = [iq.min(), iq.max()] + + ax1.set_xlim(xlim) + ax1.set_ylim(ylim) + + title.set_y(1.1) + fig.subplots_adjust(top=0.85) + if save: + path = pargs["path"] + fp = path + "%s_q_Iq" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + if return_fig: + return fig + + +def get_angular_average(avg_img, mask, pargs, min_r, max_r, nx=3600, plot_=False, save=False, *argv, **kwargs): + """get a angular average of an image + Parameters + ---------- + + avg_img: 2D-array, the image + mask: 2D-array + pargs: a dict, should contains + center: the beam center in pixel + Ldet: sample to detector distance + lambda_: the wavelength + dpix, the pixel size in mm. For Eiger1m/4m, the size is 75 um (0.075 mm) + + nx : int, optional + number of bins in x + defaults is 1500 bins + + plot_: a boolen type, if True, plot the one-D curve + plot_qinpixel:a boolen type, if True, the x-axis of the one-D curve is q in pixel; else in real Q + + Returns + ------- + ang: ang in degree + iq: intensity of circular average + + + + """ + + center, Ldet, lambda_, dpix = pargs["center"], pargs["Ldet"], pargs["lambda_"], pargs["dpix"] + uid = pargs["uid"] + + angq, ang = angular_average( + avg_img, calibrated_center=center, pixel_size=(dpix, dpix), nx=nx, min_r=min_r, max_r=max_r, mask=mask + ) + + if plot_: + fig = plt.figure(figsize=(8, 6)) + ax = fig.add_subplot(111) + ax.plot(angq, ang, "-o") + ax.set_xlabel("angle (deg)") + ax.set_ylabel("I(ang)") + # ax.legend(loc = 'best') + uid = pargs["uid"] + title = ax.set_title("Uid= %s--t-I(Ang)" % uid) + title.set_y(1.01) + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = pargs["path"] + uid = pargs["uid"] + + # fp = path + 'Uid= %s--Ang-Iq~t-'%uid + CurTime + '.png' + fp = path + "uid=%s--Ang-Iq-t-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + # plt.show() + + return angq, ang + + +def angular_average( + image, + calibrated_center, + threshold=0, + nx=1500, + pixel_size=(1, 1), + min_r=None, + max_r=None, + min_x=None, + max_x=None, + mask=None, +): + """Angular_average of the the image data + + Parameters + ---------- + image : array + Image to compute the average as a function of radius + calibrated_center : tuple + The center of the image in pixel units + argument order should be (row, col) + threshold : int, optional + Ignore counts above `threshold` + default is zero + nx : int, optional + number of bins in x + defaults is 100 bins + pixel_size : tuple, optional + The size of a pixel (in a real unit, like mm). + argument order should be (pixel_height, pixel_width) + default is (1, 1) + + min_r: float, optional number of pixels + The min r, e.g., the starting radius for angule average + max_r:float, optional number of pixels + The max r, e.g., the ending radius for angule average + max_r - min_r gives the width of the angule average + + min_x : float, optional number of pixels + Left edge of first bin defaults to minimum value of x + max_x : float, optional number of pixels + Right edge of last bin defaults to maximum value of x + Returns + ------- + bin_centers : array + The center of each bin in degree shape is (nx, ) + ring_averages : array + Radial average of the image. shape is (nx, ). + """ + + angle_val = utils.angle_grid(calibrated_center, image.shape, pixel_size) + + if min_r is None: + min_r = 0 + if max_r is None: + max_r = np.sqrt( + (image.shape[0] - calibrated_center[0]) ** 2 + (image.shape[1] - calibrated_center[1]) ** 2 + ) + r_mask = make_ring_mask(calibrated_center, image.shape, min_r, max_r) + + if mask is not None: + # maks = np.ones_like( image ) + mask = np.array(mask * r_mask, dtype=bool) + + bina = angle_val[mask] + image_mask = np.array(image)[mask] + + else: + bina = np.ravel(angle_val) + image_mask = np.ravel(image * r_mask) + + bin_edges, sums, counts = utils.bin_1D(bina, image_mask, nx, min_x=min_x, max_x=max_x) + + # print (counts) + th_mask = counts > threshold + ang_averages = sums[th_mask] / counts[th_mask] + + bin_centers = utils.bin_edges_to_centers(bin_edges)[th_mask] + + return bin_centers * 180 / np.pi, ang_averages + + +def get_t_iqc(FD, frame_edge, mask, pargs, nx=1500, plot_=False, save=False, show_progress=True, *argv, **kwargs): + """Get t-dependent Iq + + Parameters + ---------- + data_series: a image series + frame_edge: list, the ROI frame regions, e.g., [ [0,100], [200,400] ] + mask: a image mask + + nx : int, optional + number of bins in x + defaults is 1500 bins + plot_: a boolen type, if True, plot the time~one-D curve with qp as x-axis + Returns + --------- + qp: q in pixel + iq: intensity of circular average + q: q in real unit (A-1) + + """ + + Nt = len(frame_edge) + iqs = list(np.zeros(Nt)) + for i in range(Nt): + t1, t2 = frame_edge[i] + # print (t1,t2) + avg_img = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False, show_progress=show_progress) + qp, iqs[i], q = get_circular_average(avg_img, mask, pargs, nx=nx, plot_=False) + + if plot_: + fig, ax = plt.subplots(figsize=(8, 6)) + for i in range(Nt): + t1, t2 = frame_edge[i] + ax.semilogy(q, iqs[i], label="frame: %s--%s" % (t1, t2)) + # ax.set_xlabel("q in pixel") + ax.set_xlabel("Q " r"($\AA^{-1}$)") + ax.set_ylabel("I(q)") + + if "xlim" in kwargs.keys(): + ax.set_xlim(kwargs["xlim"]) + if "ylim" in kwargs.keys(): + ax.set_ylim(kwargs["ylim"]) + + ax.legend( + loc="best", + ) + + uid = pargs["uid"] + title = ax.set_title("uid= %s--t~I(q)" % uid) + title.set_y(1.01) + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = pargs["path"] + uid = pargs["uid"] + # fp = path + 'uid= %s--Iq~t-'%uid + CurTime + '.png' + fp = path + "uid=%s--Iq-t-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + save_arrays( + np.vstack([q, np.array(iqs)]).T, + label=["q_A-1"] + ["Fram-%s-%s" % (t[0], t[1]) for t in frame_edge], + filename="uid=%s-q-Iqt.csv" % uid, + path=path, + ) + + # plt.show() + + return qp, np.array(iqs), q + + +def get_t_iqc_imstack( + imgs, frame_edge, mask, pargs, nx=1500, plot_=False, save=False, show_progress=True, *argv, **kwargs +): + """ + Get t-dependent Iq + + variant of get_t_iqc that takes an image stack like a dask array to calculate average images and then does the radial integration + variant by LW 05/162024 + + Parameters + ---------- + imgs: image stack like dask array + frame_edge: list, the ROI frame regions, e.g., [ [0,100], [200,400] ] + mask: a image mask + + nx : int, optional + number of bins in x + defaults is 1500 bins + plot_: a boolen type, if True, plot the time~one-D curve with qp as x-axis + Returns + --------- + qp: q in pixel + iq: intensity of circular average + q: q in real unit (A-1) + + """ + + Nt = len(frame_edge) + iqs = list(np.zeros(Nt)) + for i in range(Nt): + t1, t2 = frame_edge[i] + # print (t1,t2) + avg_img = np.average(imgs[t1:t2, :, :], axis=0) + qp, iqs[i], q = get_circular_average(avg_img, mask, pargs, nx=nx, plot_=False) + + if plot_: + fig, ax = plt.subplots(figsize=(8, 6)) + for i in range(Nt): + t1, t2 = frame_edge[i] + ax.semilogy(q, iqs[i], label="frame: %s--%s" % (t1, t2)) + # ax.set_xlabel("q in pixel") + ax.set_xlabel("Q " r"($\AA^{-1}$)") + ax.set_ylabel("I(q)") + + if "xlim" in kwargs.keys(): + ax.set_xlim(kwargs["xlim"]) + if "ylim" in kwargs.keys(): + ax.set_ylim(kwargs["ylim"]) + + ax.legend( + loc="best", + ) + + uid = pargs["uid"] + title = ax.set_title("uid= %s--t~I(q)" % uid) + title.set_y(1.01) + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = pargs["path"] + uid = pargs["uid"] + # fp = path + 'uid= %s--Iq~t-'%uid + CurTime + '.png' + fp = path + "uid=%s--Iq-t-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + save_arrays( + np.vstack([q, np.array(iqs)]).T, + label=["q_A-1"] + ["Fram-%s-%s" % (t[0], t[1]) for t in frame_edge], + filename="uid=%s-q-Iqt.csv" % uid, + path=path, + ) + + # plt.show() + + return qp, np.array(iqs), q + + +def plot_t_iqc(q, iqs, frame_edge, pargs, save=True, return_fig=False, legend_size=None, *argv, **kwargs): + """Plot t-dependent Iq + + Parameters + ---------- + q: q in real unit (A-1), one-D array + frame_edge: list, the ROI frame regions, e.g., [ [0,100], [200,400] ] + iqs: intensity of circular average, shape is [len(frame_edge), len(q)] + pargs: a dict include data path, uid et.al info + + Returns + --------- + None + """ + Nt = iqs.shape[0] + if frame_edge is None: + frame_edge = np.zeros(Nt, dtype=object) + for i in range(Nt): + frame_edge[i] = ["Edge_%i" % i, "Edge_%i" % (i + 1)] + # Nt = len( frame_edge ) + fig, ax = plt.subplots(figsize=(8, 6)) + for i in range(Nt): + t1, t2 = frame_edge[i] + if np.any(iqs[i]): + ax.semilogy(q, iqs[i], label="frame: %s--%s" % (t1, t2)) + + # ax.set_xlabel("q in pixel") + ax.set_xlabel("Q " r"($\AA^{-1}$)") + ax.set_ylabel("I(q)") + + if "xlim" in kwargs.keys(): + ax.set_xlim(kwargs["xlim"]) + if "ylim" in kwargs.keys(): + ax.set_ylim(kwargs["ylim"]) + ax.legend(loc="best", fontsize=legend_size) + uid = pargs["uid"] + title = ax.set_title("%s--t~I(q)" % uid) + title.set_y(1.01) + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = pargs["path"] + uid = pargs["uid"] + # fp = path + 'uid= %s--Iq~t-'%uid + CurTime + '.png' + fp = path + "%s_q_Iqt" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + save_arrays( + np.vstack([q, np.array(iqs)]).T, + label=["q_A-1"] + ["Fram-%s-%s" % (t[0], t[1]) for t in frame_edge], + filename="%s_q_Iqt" % uid, + path=path, + ) + if return_fig: + return fig, ax + # plt.show() + + +def get_distance(p1, p2): + """Calc the distance between two point""" + return np.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) + + +def calc_q(L, a, wv): + """calc_q(L,a,wv) - calculate the q value for length L, transverse + distance a and wavelength wv. + Use this to calculate the speckle size + + L - sample to detector distance (mm) + a - pixel size transverse length from beam direction (mm) + wv - wavelength + Units of L and a should match and resultant q is in inverse units of wv. + """ + theta = np.arctan2(a, L) + q = 4 * np.pi * np.sin(theta / 2.0) / wv + return q + + +def get_t_iq(data_series, frame_edge, mask, pargs, nx=1500, plot_=False, save=False, *argv, **kwargs): + """Get t-dependent Iq + + Parameters + ---------- + data_series: a image series + frame_edge: list, the ROI frame regions, e.g., [ [0,100], [200,400] ] + mask: a image mask + + nx : int, optional + number of bins in x + defaults is 1500 bins + plot_: a boolen type, if True, plot the time~one-D curve with qp as x-axis + + Returns + --------- + qp: q in pixel + iq: intensity of circular average + q: q in real unit (A-1) + + """ + + Nt = len(frame_edge) + iqs = list(np.zeros(Nt)) + for i in range(Nt): + t1, t2 = frame_edge[i] + # print (t1,t2) + avg_img = get_avg_img(data_series[t1:t2], sampling=1, plot_=False) + qp, iqs[i], q = get_circular_average(avg_img, mask, pargs, nx=nx, plot_=False) + + if plot_: + fig, ax = plt.subplots(figsize=(8, 6)) + for i in range(Nt): + t1, t2 = frame_edge[i] + ax.semilogy(q, iqs[i], label="frame: %s--%s" % (t1, t2)) + # ax.set_xlabel("q in pixel") + ax.set_xlabel("Q " r"($\AA^{-1}$)") + ax.set_ylabel("I(q)") + + if "xlim" in kwargs.keys(): + ax.set_xlim(kwargs["xlim"]) + if "ylim" in kwargs.keys(): + ax.set_ylim(kwargs["ylim"]) + + ax.legend(loc="best") + + uid = pargs["uid"] + title = ax.set_title("uid=%s--t-I(q)" % uid) + title.set_y(1.01) + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = pargs["path"] + uid = pargs["uid"] + # fp = path + 'Uid= %s--Iq~t-'%uid + CurTime + '.png' + fp = path + "uid=%s--Iq-t-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + # plt.show() + + return qp, np.array(iqs), q + + +def get_t_ang( + data_series, + frame_edge, + mask, + center, + pixel_size, + min_r, + max_r, + pargs, + nx=1500, + plot_=False, + save=False, + *argv, + **kwargs +): + """Get t-dependent angule intensity + + Parameters + ---------- + data_series: a image series + frame_edge: list, the ROI frame regions, e.g., [ [0,100], [200,400] ] + mask: a image mask + + pixel_size : tuple, optional + The size of a pixel (in a real unit, like mm). + argument order should be (pixel_height, pixel_width) + default is (1, 1) + center: the beam center in pixel + min_r: float, optional number of pixels + The min r, e.g., the starting radius for angule average + max_r:float, optional number of pixels + The max r, e.g., the ending radius for angule average + + max_r - min_r gives the width of the angule average + + nx : int, optional + number of bins in x + defaults is 1500 bins + plot_: a boolen type, if True, plot the time~one-D curve with qp as x-axis + + Returns + --------- + qp: q in pixel + iq: intensity of circular average + q: q in real unit (A-1) + + """ + + Nt = len(frame_edge) + iqs = list(np.zeros(Nt)) + for i in range(Nt): + t1, t2 = frame_edge[i] + # print (t1,t2) + avg_img = get_avg_img(data_series[t1:t2], sampling=1, plot_=False) + qp, iqs[i] = angular_average( + avg_img, center, pixel_size=pixel_size, nx=nx, min_r=min_r, max_r=max_r, mask=mask + ) + + if plot_: + fig, ax = plt.subplots(figsize=(8, 8)) + for i in range(Nt): + t1, t2 = frame_edge[i] + # ax.semilogy(qp* 180/np.pi, iqs[i], label="frame: %s--%s"%( t1,t2) ) + ax.plot(qp, iqs[i], label="frame: %s--%s" % (t1, t2)) + ax.set_xlabel("angle (deg)") + ax.set_ylabel("I(ang)") + ax.legend(loc="best") + uid = pargs["uid"] + title = ax.set_title("Uid= %s--t-I(Ang)" % uid) + title.set_y(1.01) + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = pargs["path"] + uid = pargs["uid"] + # fp = path + 'Uid= %s--Ang-Iq~t-'%uid + CurTime + '.png' + fp = path + "uid=%s--Ang-Iq-t-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + # plt.show() + + return qp, np.array(iqs) + + +def make_ring_mask(center, shape, min_r, max_r): + """ + Make a ring mask. + + Parameters + ---------- + center : tuple + point in image where r=0; may be a float giving subpixel precision. + Order is (rr, cc). + shape: tuple + Image shape which is used to determine the maximum extent of output + pixel coordinates. Order is (rr, cc). + + min_r: float, optional number of pixels + The min r, e.g., the starting radius of the ring + max_r:float, optional number of pixels + The max r, e.g., the ending radius of the ring + max_r - min_r gives the width of the ring + Returns + ------- + ring_mask : array + + + """ + r_val = utils.radial_grid(center, shape, [1.0, 1.0]) + r_mask = np.zeros_like(r_val, dtype=np.int32) + r_mask[np.where((r_val > min_r) & (r_val < max_r))] = 1 + + return r_mask + + +def _make_roi(coords, edges, shape): + """Helper function to create ring rois and bar rois + Parameters + ---------- + coords : array + shape is image shape + edges : list + List of tuples of inner (left or top) and outer (right or bottom) + edges of each roi. + e.g., edges=[(1, 2), (11, 12), (21, 22)] + shape : tuple + Shape of the image in which to create the ROIs + e.g., shape=(512, 512) + Returns + ------- + label_array : array + Elements not inside any ROI are zero; elements inside each + ROI are 1, 2, 3, corresponding to the order they are + specified in `edges`. + Has shape=`image shape` + """ + label_array = np.digitize(coords, edges, right=False) + # Even elements of label_array are in the space between rings. + label_array = (np.where(label_array % 2 != 0, label_array, 0) + 1) // 2 + return label_array.reshape(shape) + + +def angulars(edges, center, shape): + """ + Draw annual (angluar-shaped) shaped regions of interest. + Each ring will be labeled with an integer. Regions outside any ring will + be filled with zeros. + Parameters + ---------- + edges: list + giving the inner and outer angle in unit of radians + e.g., [(1, 2), (11, 12), (21, 22)] + center: tuple + point in image where r=0; may be a float giving subpixel precision. + Order is (rr, cc). + shape: tuple + Image shape which is used to determine the maximum extent of output + pixel coordinates. Order is (rr, cc). + Returns + ------- + label_array : array + Elements not inside any ROI are zero; elements inside each + ROI are 1, 2, 3, corresponding to the order they are specified + in edges. + """ + edges = np.atleast_2d(np.asarray(edges)).ravel() + if not 0 == len(edges) % 2: + raise ValueError( + "edges should have an even number of elements, " "giving inner, outer radii for each angular" + ) + if not np.all(np.diff(edges) > 0): + raise ValueError( + "edges are expected to be monotonically increasing, " + "giving inner and outer radii of each angular from " + "r=0 outward" + ) + + angle_val = utils.angle_grid(center, shape).ravel() + + return _make_roi(angle_val, edges, shape) + + +def update_angular_mask_width_edge(edge, mask, center, roi_mask): + """YG Dev@CHX May, 2019 primary developed for flow-geometry + Update anglure mask using new edge + Input: + edge: the edge of the anglues + mask: the mask of the image + center: the beam center + roi_mask: the roi mask + Output: + roi_mask: updated roi_mask (effective index starting from 1) + """ + for i, (al, ah) in enumerate(edge): + edge_ = np.array([[al, ah]]) + ang = angulars(np.radians(edge_), center, mask.shape) * mask + w = np.ravel(ang) == 1 + np.ravel(roi_mask)[w] = i + 1 + return roi_mask + + +def fix_angle_mask_at_PN_180(edge, mask, center, roi_mask): + """YG Dev@CHX May, 2019 + to fix the problem of making angluar mask at the angle edge around +/- 180 + Input: + edge: the edge of the anglues + mask: the mask of the image + center: the beam center + roi_mask: the roi mask + Output: + roi_mask: by fixing the edge effect (effective index starting from 1) + """ + for i, (al, ah) in enumerate(edge): + flag = True + if al <= -180.0 and ah > -180: + edge_ = np.array([[al + 360, 180]]) + elif al <= 180.0 and ah > 180: + edge_ = np.array([[-180, ah - 360]]) + elif al <= -180.0 and ah < -180: + edge_ = np.array([[al + 360, ah + 360]]) + elif al >= 180.0 and ah > 180: + edge_ = np.array([[al - 360, ah - 360]]) + + else: + flag = False + if flag: + # print(i+1, al,ah, edge_) + ang = angulars(np.radians(edge_), center, mask.shape) * mask + w = np.ravel(ang) == 1 + # print(w) + np.ravel(roi_mask)[w] = i + 1 + return roi_mask + + +def get_angular_mask( + mask, + inner_angle=0, + outer_angle=360, + width=None, + edges=None, + num_angles=12, + center=None, + dpix=[1, 1], + flow_geometry=False, + flow_angle=None, + fix_180_angle=False, + verbose=False, +): + """ + mask: 2D-array + inner_angle # the starting angle in unit of degree + outer_angle # the ending angle in unit of degree + width # width of each angle, in degree, default is None, there is no gap between the neighbour angle ROI + edges: default, None. otherwise, give a customized angle edges + num_angles # number of angles + + center: the beam center in pixel + dpix, the pixel size in mm. For Eiger1m/4m, the size is 75 um (0.075 mm) + flow_geometry: if True, please give the flow angle. the map will be a mirror symmetry along the flow direction + + Returns + ------- + ang_mask: a ring mask, np.array + ang_center: ang in unit of degree + ang_val: ang edges in degree + + """ + + if flow_geometry: + if verbose: + print( + """ +For the flow geometry, please only define a quarter of the expected ROI. +The quarter ROI should start from around flow_angle - 90 to around the flow_angle +Otherwise, there will be somne errors. +The final ROI will have a center symmetry as well as a mirror symmetry along the flow direction. +An example for flow_angle=90 will be: +edges = roi.ring_edges( -10, 20, 2.5, 5) --> + array([[-10. , 10. ], + [ 12.5, 32.5], + [ 35. , 55. ], + [ 57.5, 77.5], + [ 80. , 100. ]]) + + """ + ) + + if edges is None: + if num_angles != 1: + spacing = (outer_angle - inner_angle - num_angles * width) / (num_angles - 1) # spacing between rings + else: + spacing = 0 + edges = roi.ring_edges(inner_angle, width, spacing, num_angles) + + # print (edges) + angs = angulars(np.radians(edges), center, mask.shape) + ang_center = np.average(edges, axis=1) + ang_mask = angs * mask + ang_mask = np.array(ang_mask, dtype=int) + if flow_geometry: + edges2 = edges - 180 + for edge_ in [edges2]: + ang_mask = update_angular_mask_width_edge(edge_, mask, center, ang_mask) + ang_mask = fix_angle_mask_at_PN_180(edge_, mask, center, ang_mask) + if flow_angle is not None: + edges3 = 2 * flow_angle - edges[:, ::-1] + edges4 = 2 * flow_angle - edges[:, ::-1] - 180 + for edge_ in [edges3, edges4]: + ang_mask = update_angular_mask_width_edge(edge_, mask, center, ang_mask) + ang_mask = fix_angle_mask_at_PN_180(edge_, mask, center, ang_mask) + else: + # for i, edge_ in enumerate( edges ): + # print(edge_) + if fix_180_angle: + ang_mask = fix_angle_mask_at_PN_180(edges, mask, center, ang_mask) + labels, indices = roi.extract_label_indices(ang_mask) + nopr = np.bincount(np.array(labels, dtype=int))[1:] + if len(np.where(nopr == 0)[0] != 0): + print("Some angs contain zero pixels. Please redefine the edges.") + return ang_mask, ang_center, edges + + +def get_angular_mask_old( + mask, + inner_angle=0, + outer_angle=360, + width=None, + edges=None, + num_angles=12, + center=None, + dpix=[1, 1], + flow_geometry=False, + flow_angle=90, +): + """ + mask: 2D-array + inner_angle # the starting angle in unit of degree + outer_angle # the ending angle in unit of degree + width # width of each angle, in degree, default is None, there is no gap between the neighbour angle ROI + edges: default, None. otherwise, give a customized angle edges + num_angles # number of angles + + center: the beam center in pixel + dpix, the pixel size in mm. For Eiger1m/4m, the size is 75 um (0.075 mm) + flow_geometry: if True, please give the flow angle. the map will be a mirror symmetry along the flow direction + + Returns + ------- + ang_mask: a ring mask, np.array + ang_center: ang in unit of degree + ang_val: ang edges in degree + + """ + + if flow_geometry: + if edges is None: + if inner_angle < 0: + print("In this flow_geometry, the inner_angle should be larger than 0") + if outer_angle > 180: + print("In this flow_geometry, the out_angle should be smaller than 180") + + if edges is None: + if num_angles != 1: + spacing = (outer_angle - inner_angle - num_angles * width) / (num_angles - 1) # spacing between rings + else: + spacing = 0 + edges = roi.ring_edges(inner_angle, width, spacing, num_angles) + + # print (edges) + angs = angulars(np.radians(edges), center, mask.shape) + ang_center = np.average(edges, axis=1) + ang_mask = angs * mask + ang_mask = np.array(ang_mask, dtype=int) + + if flow_geometry: + outer_angle -= 180 + inner_angle -= 180 + edges2 = roi.ring_edges(inner_angle, width, spacing, num_angles) + # print (edges) + angs2 = angulars(np.radians(edges2), center, mask.shape) + ang_mask2 = angs2 * mask + ang_mask2 = np.array(ang_mask2, dtype=int) + ang_mask += ang_mask2 + else: + for i, (al, ah) in enumerate(edges): + if al <= -180.0 and ah > -180: + # print(i+1, al,ah) + edge3 = np.array([[al + 360, 180]]) + ang3 = angulars(np.radians(edge3), center, mask.shape) * mask + w = np.ravel(ang3) == 1 + # print(w) + np.ravel(ang_mask)[w] = i + 1 + + labels, indices = roi.extract_label_indices(ang_mask) + nopr = np.bincount(np.array(labels, dtype=int))[1:] + + if len(np.where(nopr == 0)[0] != 0): + # print (nopr) + print("Some angs contain zero pixels. Please redefine the edges.") + return ang_mask, ang_center, edges + + +def two_theta_to_radius(dist_sample, two_theta): + """ + Converts scattering angle (2:math:`2\\theta`) to radius (from the calibrated center) + with known detector to sample distance. + + Parameters + ---------- + dist_sample : float + distance from the sample to the detector (mm) + + two_theta : array + An array of :math:`2\\theta` values + + Returns + ------- + radius : array + The L2 norm of the distance (mm) of each pixel from the calibrated center. + """ + return np.tan(two_theta) * dist_sample + + +def get_ring_mask( + mask, + inner_radius=40, + outer_radius=762, + width=6, + num_rings=12, + edges=None, + unit="pixel", + pargs=None, + return_q_in_pixel=False, +): + # def get_ring_mask( mask, inner_radius= 0.0020, outer_radius = 0.009, width = 0.0002, num_rings = 12, + # edges=None, unit='pixel',pargs=None ): + """ + mask: 2D-array + inner_radius #radius of the first ring + outer_radius # radius of the last ring + width # width of each ring + num_rings # number of rings + pargs: a dict, should contains + center: the beam center in pixel + Ldet: sample to detector distance + lambda_: the wavelength, in unit of A + dpix, the pixel size in mm. For Eiger1m/4m, the size is 75 um (0.075 mm) + unit: if pixel, all the radius inputs are in unit of pixel + else: should be in unit of A-1 + Returns + ------- + ring_mask: a ring mask, np.array + q_ring_center: q in real unit (A-1) + q_ring_val: q edges in A-1 + + """ + + center, Ldet, lambda_, dpix = pargs["center"], pargs["Ldet"], pargs["lambda_"], pargs["dpix"] + + # spacing = (outer_radius - inner_radius)/(num_rings-1) - 2 # spacing between rings + # qc = np.int_( np.linspace( inner_radius,outer_radius, num_rings ) ) + # edges = np.zeros( [ len(qc), 2] ) + # if width%2: + # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 +1 + # else: + # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 + + # find the edges of the required rings + if edges is None: + if num_rings != 1: + spacing = (outer_radius - inner_radius - num_rings * width) / (num_rings - 1) # spacing between rings + else: + spacing = 0 + edges = roi.ring_edges(inner_radius, width, spacing, num_rings) + + if (unit == "pixel") or (unit == "p"): + if not return_q_in_pixel: + two_theta = utils.radius_to_twotheta(Ldet, edges * dpix) + q_ring_val = utils.twotheta_to_q(two_theta, lambda_) + else: + q_ring_val = edges + # print(edges) + else: # in unit of A-1 + two_theta = utils.q_to_twotheta(edges, lambda_) + q_ring_val = edges + edges = two_theta_to_radius(Ldet, two_theta) / dpix # converto pixel + + q_ring_center = np.average(q_ring_val, axis=1) + + rings = roi.rings(edges, center, mask.shape) + ring_mask = rings * mask + ring_mask = np.array(ring_mask, dtype=int) + + labels, indices = roi.extract_label_indices(ring_mask) + nopr = np.bincount(np.array(labels, dtype=int))[1:] + + if len(np.where(nopr == 0)[0] != 0): + print(nopr) + print("Some rings contain zero pixels. Please redefine the edges.") + return ring_mask, q_ring_center, q_ring_val + + +def get_ring_anglar_mask(ring_mask, ang_mask, q_ring_center, ang_center): + """get ring_anglar mask""" + + ring_max = ring_mask.max() + + ang_mask_ = np.zeros(ang_mask.shape) + ind = np.where(ang_mask != 0) + ang_mask_[ind] = ang_mask[ind] + 1e9 # add some large number to qr + + dumy_ring_mask = np.zeros(ring_mask.shape) + dumy_ring_mask[ring_mask == 1] = 1 + dumy_ring_ang = dumy_ring_mask * ang_mask + real_ang_lab = np.int_(np.unique(dumy_ring_ang)[1:]) - 1 + + ring_ang = ring_mask * ang_mask_ + + # convert label_array_qzr to [1,2,3,...] + ura = np.unique(ring_ang)[1:] + + ur = np.unique(ring_mask)[1:] + ua = np.unique(ang_mask)[real_ang_lab] + + ring_ang_ = np.zeros_like(ring_ang) + newl = np.arange(1, len(ura) + 1) + # newl = np.int_( real_ang_lab ) + + rc = [[q_ring_center[i]] * len(ua) for i in range(len(ur))] + ac = list(ang_center[ua]) * len(ur) + + # rc =list( q_ring_center) * len( ua ) + # ac= [ [ ang_center[i]]*len( ur ) for i in range(len( ua )) ] + + for i, label in enumerate(ura): + # print (i, label) + ring_ang_.ravel()[np.where(ring_ang.ravel() == label)[0]] = newl[i] + + return np.int_(ring_ang_), np.concatenate(np.array(rc)), np.array(ac) + + +def show_ring_ang_roi(data, rois, alpha=0.3, save=False, *argv, **kwargs): + """ + May 16, 2016, Y.G.@CHX + plot a saxs image with rois( a label array) + + Parameters: + data: 2-D array, a gisaxs image + rois: 2-D array, a label array + + + Options: + alpha: transparency of the label array on top of data + + Return: + a plot of a qzr map of a gisaxs image with rois( a label array) + + + Examples: + show_qzr_roi( avg_imgr, box_maskr, inc_x0, ticks) + + """ + + # import matplotlib.pyplot as plt + # import copy + # import matplotlib.cm as mcm + + # cmap='viridis' + # _cmap = copy.copy((mcm.get_cmap(cmap))) + # _cmap.set_under('w', 0) + + avg_imgr, box_maskr = data, rois + num_qzr = len(np.unique(box_maskr)) - 1 + fig, ax = plt.subplots(figsize=(8, 12)) + ax.set_title("ROI--Labeled Array on Data") + im, im_label = show_label_array_on_image( + ax, + avg_imgr, + box_maskr, + imshow_cmap="viridis", + cmap="Paired", + alpha=alpha, + vmin=0.01, + vmax=30.0, + origin="lower", + ) + + for i in range(1, num_qzr + 1): + ind = np.where(box_maskr == i)[1] + indz = np.where(box_maskr == i)[0] + c = "%i" % i + y_val = int(indz.mean()) + + x_val = int(ind.mean()) + # print (xval, y) + ax.text(x_val, y_val, c, va="center", ha="center") + + # print (x_val1,x_val2) + + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.05) + plt.colorbar(im, cax=cax) + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--ROI-on-image-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + # ax.set_xlabel(r'$q_r$', fontsize=22) + # ax.set_ylabel(r'$q_z$',fontsize=22) + # plt.show() + + +def plot_qIq_with_ROI( + q, iq, q_ring_center, q_ring_edge=None, logs=True, save=False, return_fig=False, *argv, **kwargs +): + """Aug 6, 2016, Y.G.@CHX + Update@2019, March to make a span plot with q_ring_edge + plot q~Iq with interested q rings""" + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + if RUN_GUI: + fig = Figure(figsize=(8, 6)) + axes = fig.add_subplot(111) + else: + fig, axes = plt.subplots(figsize=(8, 6)) + if logs: + axes.semilogy(q, iq, "-o") + else: + axes.plot(q, iq, "-o") + axes.set_title("%s--Circular Average with the Q ring values" % uid) + axes.set_ylabel("I(q)") + axes.set_xlabel("Q " r"($\AA^{-1}$)") + if "xlim" in kwargs.keys(): + xlim = kwargs["xlim"] + else: + xlim = [q.min(), q.max()] + if "ylim" in kwargs.keys(): + ylim = kwargs["ylim"] + else: + ylim = [iq.min(), iq.max()] + axes.set_xlim(xlim) + axes.set_ylim(ylim) + if q_ring_edge is not None: + for qe in q_ring_edge: + p = axes.axvspan(qe[0], qe[1], facecolor="#2ca02c", alpha=0.5) + else: + num_rings = len(np.unique(q_ring_center)) + for i in range(num_rings): + axes.axvline(q_ring_center[i]) # , linewidth = 5 ) + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "%s_ROI_on_Iq" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() + if return_fig: + return fig, axes + + +def get_each_ring_mean_intensity( + data_series, ring_mask, sampling, timeperframe, plot_=True, save=False, *argv, **kwargs +): + """ + get time dependent mean intensity of each ring + """ + mean_int_sets, index_list = roi.mean_intensity(np.array(data_series[::sampling]), ring_mask) + + times = np.arange(len(data_series)) * timeperframe # get the time for each frame + num_rings = len(np.unique(ring_mask)[1:]) + if plot_: + fig, ax = plt.subplots(figsize=(8, 8)) + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + + ax.set_title("%s--Mean intensity of each ring" % uid) + for i in range(num_rings): + ax.plot(mean_int_sets[:, i], label="Ring " + str(i + 1), marker="o", ls="-") + ax.set_xlabel("Time") + ax.set_ylabel("Mean Intensity") + ax.legend(loc="best") + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + # fp = path + "Uid= %s--Mean intensity of each ring-"%uid + CurTime + '.png' + fp = path + "%s_Mean_intensity_of_each_ROI" % uid + ".png" + + fig.savefig(fp, dpi=fig.dpi) + + # plt.show() + return times, mean_int_sets + + +# plot g2 results +def plot_saxs_rad_ang_g2(g2, taus, res_pargs=None, master_angle_plot=False, return_fig=False, *argv, **kwargs): + """plot g2 results of segments with radius and angle partation , + + g2: one-time correlation function + taus: the time delays + res_pargs, a dict, can contains + uid/path/qr_center/qz_center/ + master_angle_plot: if True, plot angle first, then q + kwargs: can contains + vlim: [vmin,vmax]: for the plot limit of y, the y-limit will be [vmin * min(y), vmx*max(y)] + ylim/xlim: the limit of y and x + + e.g. + plot_saxs_rad_ang_g2( g2b, taus= np.arange( g2b.shape[0]) *timeperframe, q_ring_center = q_ring_center, ang_center=ang_center, vlim=[.99, 1.01] ) + + """ + if res_pargs is not None: + uid = res_pargs["uid"] + path = res_pargs["path"] + q_ring_center = res_pargs["q_ring_center"] + num_qr = len(q_ring_center) + ang_center = res_pargs["ang_center"] + num_qa = len(ang_center) + + else: + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + if "q_ring_center" in kwargs.keys(): + q_ring_center = kwargs["q_ring_center"] + num_qr = len(q_ring_center) + else: + print("Please give q_ring_center") + if "ang_center" in kwargs.keys(): + ang_center = kwargs["ang_center"] + num_qa = len(ang_center) + else: + print("Please give ang_center") + + if master_angle_plot: + first_var = num_qa + sec_var = num_qr + else: + first_var = num_qr + sec_var = num_qa + + for qr_ind in range(first_var): + if RUN_GUI: + fig = Figure(figsize=(10, 12)) + else: + fig = plt.figure(figsize=(10, 12)) + # fig = plt.figure() + if master_angle_plot: + title_qr = "Angle= %.2f" % (ang_center[qr_ind]) + r"$^\circ$" + else: + title_qr = " Qr= %.5f " % (q_ring_center[qr_ind]) + r"$\AA^{-1}$" + + plt.title("uid= %s:--->" % uid + title_qr, fontsize=20, y=1.1) + # print (qz_ind,title_qz) + # if num_qr!=1:plt.axis('off') + plt.axis("off") + sx = int(round(np.sqrt(sec_var))) + if sec_var % sx == 0: + sy = int(sec_var / sx) + else: + sy = int(sec_var / sx + 1) + + for sn in range(sec_var): + ax = fig.add_subplot(sx, sy, sn + 1) + ax.set_ylabel("g2") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + if master_angle_plot: + i = sn + qr_ind * num_qr + title_qa = "%.5f " % (q_ring_center[sn]) + r"$\AA^{-1}$" + else: + i = sn + qr_ind * num_qa + title_qa = "%.2f" % (ang_center[sn]) + r"$^\circ$" + "( %d )" % (i) + # title_qa = " Angle= " + '%.2f'%( ang_center[sn]) + r'$^\circ$' + '( %d )'%i + + # title_qa = '%.2f'%( ang_center[sn]) + r'$^\circ$' + '( %d )'%(i) + # if num_qr==1: + # title = 'uid= %s:--->'%uid + title_qr + '__' + title_qa + # else: + # title = title_qa + title = title_qa + ax.set_title(title, y=1.1, fontsize=12) + y = g2[:, i] + ax.semilogx(taus, y, "-o", markersize=6) + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + + # fp = path + 'g2--uid=%s-qr=%s'%(uid,q_ring_center[qr_ind]) + CurTime + '.png' + fp = path + "uid=%s--g2-qr=%s" % (uid, q_ring_center[qr_ind]) + "-.png" + plt.savefig(fp, dpi=fig.dpi) + fig.set_tight_layout(True) + if return_fig: + return fig + + +############################################ +##a good func to fit g2 for all types of geogmetries +############################################ + + +def fit_saxs_rad_ang_g2( + g2, res_pargs=None, function="simple_exponential", fit_range=None, master_angle_plot=False, *argv, **kwargs +): + """ + Fit one-time correlation function + + The support functions include simple exponential and stretched/compressed exponential + Parameters + ---------- + g2: one-time correlation function for fit, with shape as [taus, qs] + res_pargs: a dict, contains keys + taus: the time delay, with the same length as g2 + q_ring_center: the center of q rings, for the title of each sub-plot + uid: unique id, for the title of plot + + function: + 'simple_exponential': fit by a simple exponential function, defined as + beta * np.exp(-2 * relaxation_rate * lags) + baseline + 'streched_exponential': fit by a streched exponential function, defined as + beta * (np.exp(-2 * relaxation_rate * lags))**alpha + baseline + + #fit_vibration: + # if True, will fit the g2 by a dumped sin function due to beamline mechnical oscillation + + Returns + ------- + fit resutls: + a dict, with keys as + 'baseline': + 'beta': + 'relaxation_rate': + an example: + result = fit_g2( g2, res_pargs, function = 'simple') + result = fit_g2( g2, res_pargs, function = 'stretched') + """ + + if res_pargs is not None: + uid = res_pargs["uid"] + path = res_pargs["path"] + q_ring_center = res_pargs["q_ring_center"] + num_qr = len(q_ring_center) + ang_center = res_pargs["ang_center"] + num_qa = len(ang_center) + taus = res_pargs["taus"] + + else: + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + if "q_ring_center" in kwargs.keys(): + q_ring_center = kwargs["q_ring_center"] + num_qr = len(q_ring_center) + else: + print("Please give q_ring_center") + if "ang_center" in kwargs.keys(): + ang_center = kwargs["ang_center"] + num_qa = len(ang_center) + else: + print("Please give ang_center") + + num_rings = g2.shape[1] + beta = np.zeros(num_rings) # contrast factor + rate = np.zeros(num_rings) # relaxation rate + alpha = np.zeros(num_rings) # alpha + baseline = np.zeros(num_rings) # baseline + freq = np.zeros(num_rings) + + if function == "flow_para_function" or function == "flow_para": + flow = np.zeros(num_rings) # baseline + if "fit_variables" in kwargs: + additional_var = kwargs["fit_variables"] + _vars = [k for k in list(additional_var.keys()) if additional_var[k] is False] + else: + _vars = [] + + # print (_vars) + + _guess_val = dict(beta=0.1, alpha=1.0, relaxation_rate=0.005, baseline=1.0) + + if "guess_values" in kwargs: + guess_values = kwargs["guess_values"] + _guess_val.update(guess_values) + + if function == "simple_exponential" or function == "simple": + _vars = np.unique(_vars + ["alpha"]) + mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= list( _vars) ) + + elif function == "stretched_exponential" or function == "stretched": + mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= _vars) + + elif function == "stretched_vibration": + mod = Model(stretched_auto_corr_scat_factor_with_vibration) # , independent_vars= _vars) + + elif function == "flow_para_function" or function == "flow_para": + mod = Model(flow_para_function) # , independent_vars= _vars) + + else: + print( + "The %s is not supported.The supported functions include simple_exponential and stretched_exponential" + % function + ) + + mod.set_param_hint("baseline", min=0.5, max=1.5) + mod.set_param_hint("beta", min=0.0) + mod.set_param_hint("alpha", min=0.0) + mod.set_param_hint("relaxation_rate", min=0.0) + if function == "flow_para_function" or function == "flow_para": + mod.set_param_hint("flow_velocity", min=0) + if function == "stretched_vibration": + mod.set_param_hint("freq", min=0) + mod.set_param_hint("amp", min=0) + + _beta = _guess_val["beta"] + _alpha = _guess_val["alpha"] + _relaxation_rate = _guess_val["relaxation_rate"] + _baseline = _guess_val["baseline"] + pars = mod.make_params(beta=_beta, alpha=_alpha, relaxation_rate=_relaxation_rate, baseline=_baseline) + + if function == "flow_para_function" or function == "flow_para": + _flow_velocity = _guess_val["flow_velocity"] + pars = mod.make_params( + beta=_beta, + alpha=_alpha, + flow_velocity=_flow_velocity, + relaxation_rate=_relaxation_rate, + baseline=_baseline, + ) + + if function == "stretched_vibration": + _freq = _guess_val["freq"] + _amp = _guess_val["amp"] + pars = mod.make_params( + beta=_beta, alpha=_alpha, freq=_freq, amp=_amp, relaxation_rate=_relaxation_rate, baseline=_baseline + ) + + for v in _vars: + pars["%s" % v].vary = False + if master_angle_plot: + first_var = num_qa + sec_var = num_qr + else: + first_var = num_qr + sec_var = num_qa + + for qr_ind in range(first_var): + # fig = plt.figure(figsize=(10, 12)) + fig = plt.figure(figsize=(14, 8)) + # fig = plt.figure() + if master_angle_plot: + title_qr = "Angle= %.2f" % (ang_center[qr_ind]) + r"$^\circ$" + else: + title_qr = " Qr= %.5f " % (q_ring_center[qr_ind]) + r"$\AA^{-1}$" + + # plt.title('uid= %s:--->'%uid + title_qr,fontsize=20, y =1.1) + plt.axis("off") + + # sx = int(round(np.sqrt( sec_var )) ) + sy = 4 + # if sec_var%sx == 0: + if sec_var % sy == 0: + # sy = int(sec_var/sx) + sx = int(sec_var / sy) + else: + # sy=int(sec_var/sx+1) + sx = int(sec_var / sy + 1) + + for sn in range(sec_var): + ax = fig.add_subplot(sx, sy, sn + 1) + ax.set_ylabel(r"$g^($" + r"$^2$" + r"$^)$" + r"$(Q,$" + r"$\tau$" + r"$)$") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + if master_angle_plot: + i = sn + qr_ind * num_qr + title_qa = "%.5f " % (q_ring_center[sn]) + r"$\AA^{-1}$" + else: + i = sn + qr_ind * num_qa + title_qa = "%.2f" % (ang_center[sn]) + r"$^\circ$" + "( %d )" % (i) + + title = title_qa + ax.set_title(title, y=1.1) + + if fit_range is not None: + y = g2[1:, i][fit_range[0] : fit_range[1]] + lags = taus[1:][fit_range[0] : fit_range[1]] + else: + y = g2[1:, i] + lags = taus[1:] + + result1 = mod.fit(y, pars, x=lags) + + # print ( result1.best_values) + rate[i] = result1.best_values["relaxation_rate"] + # rate[i] = 1e-16 + beta[i] = result1.best_values["beta"] + + # baseline[i] = 1.0 + baseline[i] = result1.best_values["baseline"] + + # print( result1.best_values['freq'] ) + + if function == "simple_exponential" or function == "simple": + alpha[i] = 1.0 + elif function == "stretched_exponential" or function == "stretched": + alpha[i] = result1.best_values["alpha"] + elif function == "stretched_vibration": + alpha[i] = result1.best_values["alpha"] + freq[i] = result1.best_values["freq"] + + if function == "flow_para_function" or function == "flow_para": + flow[i] = result1.best_values["flow_velocity"] + + ax.semilogx(taus[1:], g2[1:, i], "ro") + ax.semilogx(lags, result1.best_fit, "-b") + + txts = r"$\gamma$" + r"$ = %.3f$" % (1 / rate[i]) + r"$ s$" + x = 0.25 + y0 = 0.75 + fontsize = 12 + ax.text(x=x, y=y0, s=txts, fontsize=fontsize, transform=ax.transAxes) + txts = r"$\alpha$" + r"$ = %.3f$" % (alpha[i]) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=x, y=y0 - 0.1, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r"$baseline$" + r"$ = %.3f$" % (baseline[i]) + ax.text(x=x, y=y0 - 0.2, s=txts, fontsize=fontsize, transform=ax.transAxes) + + if function == "flow_para_function" or function == "flow_para": + txts = r"$flow_v$" + r"$ = %.3f$" % (flow[i]) + ax.text(x=x, y=y0 - 0.3, s=txts, fontsize=fontsize, transform=ax.transAxes) + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + + fp = path + "uid=%s--g2--qr-%s--fit-" % (uid, q_ring_center[qr_ind]) + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + # plt.show() + + result = dict(beta=beta, rate=rate, alpha=alpha, baseline=baseline) + if function == "flow_para_function" or function == "flow_para": + result = dict(beta=beta, rate=rate, alpha=alpha, baseline=baseline, flow_velocity=flow) + if function == "stretched_vibration": + result = dict(beta=beta, rate=rate, alpha=alpha, baseline=baseline, freq=freq) + + return result + + +def save_seg_saxs_g2(g2, res_pargs, time_label=True, *argv, **kwargs): + """ + Aug 8, 2016, Y.G.@CHX + save g2 results, + res_pargs should contain + g2: one-time correlation function + res_pargs: contions taus, q_ring_center values + path: + uid: + + """ + taus = res_pargs["taus"] + qz_center = res_pargs["q_ring_center"] + qr_center = res_pargs["ang_center"] + path = res_pargs["path"] + uid = res_pargs["uid"] + + df = DataFrame(np.hstack([(taus).reshape(len(g2), 1), g2])) + columns = [] + columns.append("tau") + + for qz in qz_center: + for qr in qr_center: + columns.append([str(qz), str(qr)]) + + df.columns = columns + + if time_label: + dt = datetime.now() + CurTime = "%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) + filename = os.path.join(path, "g2-%s-%s.csv" % (uid, CurTime)) + else: + filename = os.path.join(path, "uid=%s--g2.csv" % (uid)) + df.to_csv(filename) + print("The g2 of uid= %s is saved with filename as %s" % (uid, filename)) + + +def linear_fit(x, y): + D0 = np.polyfit(x, y, 1) + gmfit = np.poly1d(D0) + return D0, gmfit + + +def plot_gamma(): + """not work""" + fig, ax = plt.subplots() + ax.set_title("Uid= %s--Beta" % uid) + ax.set_title("Uid= %s--Gamma" % uid) + # ax.plot( q_ring_center**2 , 1/rate, 'ro', ls='--') + + ax.loglog(q_ring_center, 1 / result["rate"], "ro", ls="--") + # ax.set_ylabel('Log( Beta0 'r'$\beta$'"($s^{-1}$)") + ax.set_ylabel("Log( Gamma )") + ax.set_xlabel("$Log(q)$" r"($\AA^{-1}$)") + # plt.show() + + +def multi_uids_saxs_flow_xpcs_analysis( + uids, + md, + run_num=1, + sub_num=None, + good_start=10, + good_end=None, + force_compress=False, + fit_vibration=True, + fit=True, + compress=True, + para_run=False, +): + """'Aug 16, 2016, YG@CHX-NSLS2 + Do SAXS-XPCS analysis for multi uid data + uids: a list of uids to be analyzed + md: metadata, should at least include + mask: array, mask data + data_dir: the path to save data, the result will be saved in data_dir/uid/... + dpix: + Ldet: + lambda: + timeperframe: + center + run_num: the run number + sub_num: the number in each sub-run + fit: if fit, do fit for g2 and show/save all fit plots + compress: apply a compress algorithm + + Save g2/metadata/g2-fit plot/g2 q-rate plot/ of each uid in data_dir/uid/... + return: + g2s: a dictionary, {run_num: sub_num: g2_of_each_uid} + taus, + use_uids: return the valid uids + """ + + g2s = {} # g2s[run_number][sub_seq] = g2 of each uid + lag_steps = [0] + useful_uids = {} + if sub_num is None: + sub_num = len(uids) // run_num + + mask = md["mask"] + data_dir = md["data_dir"] + # ring_mask = md['ring_mask'] + # q_ring_center = md['q_ring_center'] + + seg_mask_v = md["seg_mask_v"] + seg_mask_p = md["seg_mask_p"] + rcen_p, acen_p = md["rcen_p"], md["acen_v"] + rcen_v, acen_v = md["rcen_p"], md["acen_v"] + + lag_steps = [0] + + for run_seq in range(run_num): + g2s[run_seq + 1] = {} + useful_uids[run_seq + 1] = {} + i = 0 + for sub_seq in range(0, sub_num): + # good_end=good_end + + uid = uids[sub_seq + run_seq * sub_num] + print("The %i--th uid to be analyzed is : %s" % (i, uid)) + try: + detector = get_detector(db[uid]) + imgs = load_data(uid, detector, reverse=True) + except: + print("The %i--th uid: %s can not load data" % (i, uid)) + imgs = 0 + + data_dir_ = os.path.join(data_dir, "%s/" % uid) + os.makedirs(data_dir_, exist_ok=True) + + i += 1 + if imgs != 0: + imgsa = apply_mask(imgs, mask) + Nimg = len(imgs) + md_ = imgs.md + useful_uids[run_seq + 1][i] = uid + g2s[run_seq + 1][i] = {} + # if compress: + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % uid + # update code here to use new pass uid to compress, 2016, Dec 3 + if False: + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata( + imgs, + mask, + md_, + filename, + force_compress=force_compress, + bad_pixel_threshold=2.4e18, + nobytes=4, + para_compress=True, + num_sub=100, + ) + if True: + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata( + uid, + mask, + md_, + filename, + force_compress=False, + bad_pixel_threshold=2.4e18, + nobytes=4, + para_compress=True, + num_sub=100, + dtypes="uid", + reverse=True, + ) + + try: + md["Measurement"] = db[uid]["start"]["Measurement"] + # md['sample']=db[uid]['start']['sample'] + # md['sample']= 'PS205000-PMMA-207000-SMMA3' + print(md["Measurement"]) + + except: + md["Measurement"] = "Measurement" + md["sample"] = "sample" + + dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm + lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms + Ldet = md["detector_distance"] * 1000 # detector to sample distance (mm) + exposuretime = md["count_time"] + acquisition_period = md["frame_time"] + timeperframe = acquisition_period # for g2 + # timeperframe = exposuretime#for visiblitly + # timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata.... + center = md["center"] + + setup_pargs = dict( + uid=uid, + dpix=dpix, + Ldet=Ldet, + lambda_=lambda_, + timeperframe=timeperframe, + center=center, + path=data_dir_, + ) + + md["avg_img"] = avg_img + # plot1D( y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])], + # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) + min_inten = 10 + + # good_start = np.where( np.array(imgsum) > min_inten )[0][0] + good_start = good_start + + if good_end is None: + good_end_ = len(imgs) + else: + good_end_ = good_end + FD = Multifile(filename, good_start, good_end_) + + good_start = max(good_start, np.where(np.array(imgsum) > min_inten)[0][0]) + print("With compression, the good_start frame number is: %s " % good_start) + print("The good_end frame number is: %s " % good_end_) + + norm = None + ################### + + # Do correlaton here + for nconf, seg_mask in enumerate([seg_mask_v, seg_mask_p]): + if nconf == 0: + conf = "v" + else: + conf = "p" + + rcen = md["rcen_%s" % conf] + acen = md["acen_%s" % conf] + + if not para_run: + g2, lag_stepsv = cal_g2( + FD, + seg_mask, + bad_frame_list, + good_start, + num_buf=8, + ) + else: + g2, lag_stepsv = cal_g2p( + FD, seg_mask, bad_frame_list, good_start, num_buf=8, imgsum=None, norm=norm + ) + + if len(lag_steps) < len(lag_stepsv): + lag_steps = lag_stepsv + taus = lag_steps * timeperframe + res_pargs = dict( + taus=taus, + q_ring_center=np.unique(rcen), + ang_center=np.unique(acen), + path=data_dir_, + uid=uid + "_1a_mq%s" % conf, + ) + save_g2(g2, taus=taus, qr=rcen, qz=acen, uid=uid + "_1a_mq%s" % conf, path=data_dir_) + + if nconf == 0: + g2s[run_seq + 1][i]["v"] = g2 # perpendular + else: + g2s[run_seq + 1][i]["p"] = g2 # parallel + + if fit: + if False: + g2_fit_result, taus_fit, g2_fit = get_g2_fit( + g2, + res_pargs=res_pargs, + function="stretched_vibration", + vlim=[0.95, 1.05], + fit_variables={ + "baseline": True, + "beta": True, + "alpha": False, + "relaxation_rate": True, + "freq": fit_vibration, + "amp": True, + }, + fit_range=None, + guess_values={ + "baseline": 1.0, + "beta": 0.05, + "alpha": 1.0, + "relaxation_rate": 0.01, + "freq": 60, + "amp": 0.1, + }, + ) + + if nconf == 0: # for vertical + function = "stretched" + g2_fit_result, taus_fit, g2_fit = get_g2_fit( + g2, + res_pargs=res_pargs, + function=function, + vlim=[0.95, 1.05], + fit_variables={ + "baseline": True, + "beta": True, + "alpha": False, + "relaxation_rate": True, + }, + fit_range=None, + guess_values={ + "baseline": 1.0, + "beta": 0.05, + "alpha": 1.0, + "relaxation_rate": 0.01, + }, + ) + else: + function = "flow_para" + g2_fit_result, taus_fit, g2_fit = get_g2_fit( + g2, + res_pargs=res_pargs, + function=function, + vlim=[0.99, 1.05], + fit_range=None, + fit_variables={ + "baseline": True, + "beta": True, + "alpha": False, + "relaxation_rate": True, + "flow_velocity": True, + }, + guess_values={ + "baseline": 1.0, + "beta": 0.05, + "alpha": 1.0, + "relaxation_rate": 0.01, + "flow_velocity": 1, + }, + ) + + save_g2( + g2_fit, + taus=taus_fit, + qr=rcen, + qz=acen, + uid=uid + "_1a_mq%s" % conf + "_fit", + path=data_dir_, + ) + + res_pargs_fit = dict( + taus=taus, + q_ring_center=np.unique(rcen), + ang_center=[acen[0]], + path=data_dir_, + uid=uid + "_1a_mq%s" % conf + "_fit", + ) + + plot_g2( + g2, + res_pargs=res_pargs, + tau_2=taus_fit, + g2_2=g2_fit, + fit_res=g2_fit_result, + function=function, + master_plot="qz", + vlim=[0.95, 1.05], + geometry="ang_saxs", + append_name=conf + "_fit", + ) + + dfv = save_g2_fit_para_tocsv( + g2_fit_result, filename=uid + "_1a_mq" + conf + "_fit_para", path=data_dir_ + ) + + fit_q_rate( + np.unique(rcen)[:], + dfv["relaxation_rate"], + power_variable=False, + uid=uid + "_" + conf + "_fit_rate", + path=data_dir_, + ) + + # psave_obj( fit_result, data_dir_ + 'uid=%s-g2-fit-para'%uid ) + psave_obj(md, data_dir_ + "uid=%s-md" % uid) # save the setup parameters + + FD = 0 + avg_img, imgsum, bad_frame_list = [0, 0, 0] + md["avg_img"] = 0 + imgs = 0 + print("*" * 40) + print() + + taus = taus + return g2s, taus, useful_uids + + +def multi_uids_saxs_xpcs_analysis( + uids, + md, + run_num=1, + sub_num=None, + good_start=10, + good_end=None, + force_compress=False, + fit=True, + compress=True, + para_run=False, +): + """'Aug 16, 2016, YG@CHX-NSLS2 + Do SAXS-XPCS analysis for multi uid data + uids: a list of uids to be analyzed + md: metadata, should at least include + mask: array, mask data + data_dir: the path to save data, the result will be saved in data_dir/uid/... + dpix: + Ldet: + lambda: + timeperframe: + center + run_num: the run number + sub_num: the number in each sub-run + fit: if fit, do fit for g2 and show/save all fit plots + compress: apply a compress algorithm + + Save g2/metadata/g2-fit plot/g2 q-rate plot/ of each uid in data_dir/uid/... + return: + g2s: a dictionary, {run_num: sub_num: g2_of_each_uid} + taus, + use_uids: return the valid uids + """ + + g2s = {} # g2s[run_number][sub_seq] = g2 of each uid + lag_steps = [0] + useful_uids = {} + if sub_num is None: + sub_num = len(uids) // run_num + + mask = md["mask"] + data_dir = md["data_dir"] + ring_mask = md["ring_mask"] + q_ring_center = md["q_ring_center"] + + for run_seq in range(run_num): + g2s[run_seq + 1] = {} + useful_uids[run_seq + 1] = {} + i = 0 + for sub_seq in range(0, sub_num): + # good_end=good_end + + uid = uids[sub_seq + run_seq * sub_num] + print("The %i--th uid to be analyzed is : %s" % (i, uid)) + try: + detector = get_detector(db[uid]) + imgs = load_data(uid, detector, reverse=True) + except: + print("The %i--th uid: %s can not load data" % (i, uid)) + imgs = 0 + + data_dir_ = os.path.join(data_dir, "%s/" % uid) + os.makedirs(data_dir_, exist_ok=True) + + i += 1 + if imgs != 0: + imgsa = apply_mask(imgs, mask) + Nimg = len(imgs) + md_ = imgs.md + useful_uids[run_seq + 1][i] = uid + if compress: + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % uid + # update code here to use new pass uid to compress, 2016, Dec 3 + if False: + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata( + imgs, + mask, + md_, + filename, + force_compress=force_compress, + bad_pixel_threshold=2.4e18, + nobytes=4, + para_compress=True, + num_sub=100, + ) + if True: + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata( + uid, + mask, + md_, + filename, + force_compress=True, + bad_pixel_threshold=2.4e18, + nobytes=4, + para_compress=True, + num_sub=100, + dtypes="uid", + reverse=True, + ) + + try: + md["Measurement"] = db[uid]["start"]["Measurement"] + # md['sample']=db[uid]['start']['sample'] + # md['sample']= 'PS205000-PMMA-207000-SMMA3' + print(md["Measurement"]) + + except: + md["Measurement"] = "Measurement" + md["sample"] = "sample" + + dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm + lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms + Ldet = md["detector_distance"] * 1000 # detector to sample distance (mm) + exposuretime = md["count_time"] + acquisition_period = md["frame_time"] + timeperframe = acquisition_period # for g2 + # timeperframe = exposuretime#for visiblitly + # timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata.... + center = md["center"] + + setup_pargs = dict( + uid=uid, + dpix=dpix, + Ldet=Ldet, + lambda_=lambda_, + timeperframe=timeperframe, + center=center, + path=data_dir_, + ) + + md["avg_img"] = avg_img + # plot1D( y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])], + # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) + min_inten = 10 + + # good_start = np.where( np.array(imgsum) > min_inten )[0][0] + good_start = good_start + + if good_end is None: + good_end_ = len(imgs) + else: + good_end_ = good_end + FD = Multifile(filename, good_start, good_end_) + + good_start = max(good_start, np.where(np.array(imgsum) > min_inten)[0][0]) + print("With compression, the good_start frame number is: %s " % good_start) + print("The good_end frame number is: %s " % good_end_) + + hmask = create_hot_pixel_mask(avg_img, 1e8) + qp, iq, q = get_circular_average( + avg_img, + mask * hmask, + pargs=setup_pargs, + nx=None, + plot_=False, + show_pixel=True, + xlim=[0.001, 0.05], + ylim=[0.0001, 500], + ) + + norm = get_pixelist_interp_iq(qp, iq, ring_mask, center) + if not para_run: + g2, lag_steps_ = cal_g2c( + FD, ring_mask, bad_frame_list, good_start, num_buf=8, imgsum=None, norm=norm + ) + else: + g2, lag_steps_ = cal_g2p( + FD, ring_mask, bad_frame_list, good_start, num_buf=8, imgsum=None, norm=norm + ) + + if len(lag_steps) < len(lag_steps_): + lag_steps = lag_steps_ + + FD = 0 + avg_img, imgsum, bad_frame_list = [0, 0, 0] + md["avg_img"] = 0 + imgs = 0 + + else: + sampling = 1000 # sampling should be one + + # good_start = check_shutter_open( imgsra, min_inten=5, time_edge = [0,10], plot_ = False ) + good_start = good_start + + good_series = apply_mask(imgsa[good_start:], mask) + + imgsum, bad_frame_list = get_each_frame_intensity( + good_series, sampling=sampling, bad_pixel_threshold=1.2e8, plot_=False, uid=uid + ) + bad_image_process = False + + if len(bad_frame_list): + bad_image_process = True + print(bad_image_process) + + g2, lag_steps_ = cal_g2( + good_series, ring_mask, bad_image_process, bad_frame_list, good_start, num_buf=8 + ) + if len(lag_steps) < len(lag_steps_): + lag_steps = lag_step_ + + taus_ = lag_steps_ * timeperframe + taus = lag_steps * timeperframe + + res_pargs = dict(taus=taus_, q_ring_center=q_ring_center, path=data_dir_, uid=uid) + save_saxs_g2(g2, res_pargs) + # plot_saxs_g2( g2, taus, vlim=[0.95, 1.05], res_pargs=res_pargs) + if fit: + fit_result = fit_saxs_g2( + g2, + res_pargs, + function="stretched", + vlim=[0.95, 1.05], + fit_variables={"baseline": True, "beta": True, "alpha": False, "relaxation_rate": True}, + guess_values={"baseline": 1.0, "beta": 0.05, "alpha": 1.0, "relaxation_rate": 0.01}, + ) + fit_q_rate( + q_ring_center[:], fit_result["rate"][:], power_variable=False, uid=uid, path=data_dir_ + ) + + psave_obj(fit_result, data_dir_ + "uid=%s-g2-fit-para" % uid) + psave_obj(md, data_dir_ + "uid=%s-md" % uid) # save the setup parameters + + g2s[run_seq + 1][i] = g2 + print("*" * 40) + print() + + return g2s, taus, useful_uids + + +def plot_mul_g2(g2s, md): + """ + Plot multi g2 functions generated by multi_uids_saxs_xpcs_analysis + Will create a large plot with q_number pannels + Each pannel (for each q) will show a number (run number of g2 functions + """ + + q_ring_center = md["q_ring_center"] + sids = md["sids"] + useful_uids = md["useful_uids"] + taus = md["taus"] + run_num = md["run_num"] + sub_num = md["sub_num"] + uid_ = md["uid_"] + + fig = plt.figure(figsize=(12, 20)) + plt.title("uid= %s:--->" % uid_, fontsize=20, y=1.06) + + Nq = len(q_ring_center) + if Nq != 1: + plt.axis("off") + sx = int(round(np.sqrt(Nq))) + + if Nq % sx == 0: + sy = int(Nq / sx) + else: + sy = int(Nq / sx + 1) + + for sn in range(Nq): + ax = fig.add_subplot(sx, sy, sn + 1) + ax.set_ylabel(r"$g_2$" + "(" + r"$\tau$" + ")") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + + for run_seq in range(run_num): + i = 0 + for sub_seq in range(0, sub_num): + # print( run_seq, sub_seq ) + uid = useful_uids[run_seq + 1][sub_seq + 1] + sid = sids[i] + if i == 0: + title = r"$Q_r= $" + "%.5f " % (q_ring_center[sn]) + r"$\AA^{-1}$" + ax.set_title(title, y=1.1, fontsize=12) + y = g2s[run_seq + 1][sub_seq + 1][:, sn] + len_tau = len(taus) + len_g2 = len(y) + len_ = min(len_tau, len_g2) + + # print ( len_tau, len(y)) + # ax.semilogx(taus[1:len_], y[1:len_], marker = '%s'%next(markers_), color='%s'%next(colors_), + # markersize=6, label = '%s'%sid) + + ax.semilogx( + taus[1:len_], y[1:len_], marker=markers[i], color=colors[i], markersize=6, label="%s" % sid + ) + + if sn == 0: + ax.legend(loc="best", fontsize=6) + + i = i + 1 + fig.set_tight_layout(True) + + +def get_QrQw_From_RoiMask(roi_mask, setup_pargs): + """YG Dev Feb 4@CHX Get Q-center and Q-width fo transmission SAXS + Input: + roi_mask: int-type array, 2D roi mask, with q-index starting from 1 + setup_pargs: dict, at least with keys as + dpix (det pixel size),lamdba_( wavelength), center( beam center) + Output: + qr_cen: the q center of each ring + qr_wid: the q width of each ring + + """ + qp_roi, iq_roi, q_roi = get_circular_average(roi_mask, np.array(roi_mask, dtype=bool), pargs=setup_pargs) + Nmax = roi_mask.max() + qr_cen = np.zeros(Nmax) + qr_wid = np.zeros(Nmax) + for i in range(1, 1 + Nmax): + indi = np.where(iq_roi == i)[0] + qind_s = q_roi[indi[0]] + qind_e = q_roi[indi[-1]] + # print(qind_s, qind_e) + qr_cen[i - 1] = 0.5 * (qind_e + qind_s) + qr_wid[i - 1] = qind_e - qind_s + return qr_cen, qr_wid diff --git a/pyCHX/backups/pyCHX-backup/XPCS_XSVS_SAXS_Multi_2017_V4.py b/pyCHX/backups/pyCHX-backup/XPCS_XSVS_SAXS_Multi_2017_V4.py new file mode 100644 index 0000000..062db0d --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/XPCS_XSVS_SAXS_Multi_2017_V4.py @@ -0,0 +1,606 @@ +# python XPCS_XSVS_SAXS_Multi_2017_V4.py + + +from pyCHX.chx_packages import * +from pyCHX.chx_xpcs_xsvs_jupyter import run_xpcs_xsvs_single + + +def XPCS_XSVS_SAXS_Multi( + start_time, + stop_time, + run_pargs, + suf_ids=None, + uid_average="Au50_7p5PEGX1_vs_slow_120116", +): + scat_geometry = run_pargs["scat_geometry"] + force_compress = run_pargs["force_compress"] + para_compress = run_pargs["para_compress"] + run_fit_form = run_pargs["run_fit_form"] + run_waterfall = run_pargs["run_waterfall"] + run_t_ROI_Inten = run_pargs["run_t_ROI_Inten"] + # run_fit_g2 = run_pargs['run_fit_g2'], + fit_g2_func = run_pargs["fit_g2_func"] + run_one_time = run_pargs["run_one_time"] + run_two_time = run_pargs["run_two_time"] + run_four_time = run_pargs["run_four_time"] + run_xsvs = run_pargs["run_xsvs"] + ############################################################### + if scat_geometry != "saxs": # to be done for other types + run_xsvs = False + ############################################################### + att_pdf_report = run_pargs["att_pdf_report"] + show_plot = run_pargs["show_plot"] + CYCLE = run_pargs["CYCLE"] + mask_path = run_pargs["mask_path"] + mask_name = run_pargs["mask_name"] + good_start = run_pargs["good_start"] + use_imgsum_norm = run_pargs["use_imgsum_norm"] + + mask = load_mask(mask_path, mask_name, plot_=False, image_name="%s_mask" % mask_name, reverse=True) + # mask *= pixel_mask + mask[:, 2069] = 0 # False #Concluded from the previous results + # np.save( data_dir + 'mask', mask) + show_img(mask, image_name="%s_mask" % uid_average, save=True, path=data_dir) + mask_load = mask.copy() + + username = getpass.getuser() + data_dir0 = os.path.join("/XF11ID/analysis/", run_pargs["CYCLE"], username, "Results/") + os.makedirs(data_dir0, exist_ok=True) + print("Results from this analysis will be stashed in the directory %s" % data_dir0) + data_dir = os.path.join(data_dir0, uid_average + "/") + os.makedirs(data_dir, exist_ok=True) + uid_average = "uid=" + uid_average + + if suf_ids is None: + sids, uids, fuids = find_uids(start_time, stop_time) + else: + sids, uids, fuids = suf_ids + print(uids) + uid = uids[0] + + data_dir_ = data_dir + uid_ = uid_average + ### For Load results + + multi_res = {} + for uid, fuid in zip(guids, fuids): + multi_res[uid] = extract_xpcs_results_from_h5( + filename="uid=%s_Res.h5" % fuid, import_dir=data_dir0 + uid + "/" + ) + # Get and Plot Averaged Data + + mkeys = list(multi_res.keys()) + uid = uid_average + setup_pargs["uid"] = uid + avg_img = get_averaged_data_from_multi_res(multi_res, keystr="avg_img") + imgsum = get_averaged_data_from_multi_res(multi_res, keystr="imgsum") + if scat_geometry == "saxs": + q_saxs = get_averaged_data_from_multi_res(multi_res, keystr="q_saxs") + iq_saxs = get_averaged_data_from_multi_res(multi_res, keystr="iq_saxs") + qt = get_averaged_data_from_multi_res(multi_res, keystr="qt") + iqst = get_averaged_data_from_multi_res(multi_res, keystr="iqst") + elif scat_geometry == "gi_saxs": + qr_1d_pds = get_averaged_data_from_multi_res(multi_res, keystr="qr_1d_pds") + qr_1d_pds = trans_data_to_pd(qr_1d_pds, label=qr_1d_pds_label) + if run_waterfall: + wat = get_averaged_data_from_multi_res(multi_res, keystr="wat") + if run_t_ROI_Inten: + times_roi = get_averaged_data_from_multi_res(multi_res, keystr="times_roi") + mean_int_sets = get_averaged_data_from_multi_res(multi_res, keystr="mean_int_sets") + + if run_one_time: + g2 = get_averaged_data_from_multi_res(multi_res, keystr="g2") + taus = get_averaged_data_from_multi_res(multi_res, keystr="taus") + g2_pds = save_g2_general( + g2, + taus=taus, + qr=np.array(list(qval_dict.values()))[:, 0], + uid=uid + "_g2.csv", + path=data_dir, + return_res=True, + ) + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( + g2, + taus, + function=fit_g2_func, + vlim=[0.95, 1.05], + fit_range=None, + fit_variables={"baseline": True, "beta": True, "alpha": False, "relaxation_rate": True}, + guess_values={ + "baseline": 1.0, + "beta": 0.05, + "alpha": 1.0, + "relaxation_rate": 0.01, + }, + ) + g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename=uid + "_g2_fit_paras.csv", path=data_dir) + + if run_two_time: + g12b = get_averaged_data_from_multi_res(multi_res, keystr="g12b", different_length=True) + g2b = get_averaged_data_from_multi_res(multi_res, keystr="g2b") + tausb = get_averaged_data_from_multi_res(multi_res, keystr="tausb") + + g2b_pds = save_g2_general( + g2b, + taus=tausb, + qr=np.array(list(qval_dict.values()))[:, 0], + qz=None, + uid=uid + "_g2b.csv", + path=data_dir, + return_res=True, + ) + g2_fit_resultb, taus_fitb, g2_fitb = get_g2_fit_general( + g2b, + tausb, + function=fit_g2_func, + vlim=[0.95, 1.05], + fit_range=None, + fit_variables={"baseline": True, "beta": True, "alpha": False, "relaxation_rate": True}, + guess_values={ + "baseline": 1.0, + "beta": 0.05, + "alpha": 1.0, + "relaxation_rate": 0.01, + }, + ) + + g2b_fit_paras = save_g2_fit_para_tocsv(g2_fit_resultb, filename=uid + "_g2b_fit_paras.csv", path=data_dir) + + if run_four_time: + g4 = get_averaged_data_from_multi_res(multi_res, keystr="g4") + taus4 = get_averaged_data_from_multi_res(multi_res, keystr="taus4") + g4_pds = save_g2_general( + g4, + taus=taus4, + qr=np.array(list(qval_dict.values()))[:, 0], + qz=None, + uid=uid + "_g4.csv", + path=data_dir, + return_res=True, + ) + + if run_xsvs: + contrast_factorL = get_averaged_data_from_multi_res( + multi_res, keystr="contrast_factorL", different_length=False + ) + times_xsvs = get_averaged_data_from_multi_res(multi_res, keystr="times_xsvs", different_length=False) + cont_pds = save_arrays( + contrast_factorL, + label=times_xsvs, + filename="%s_contrast_factorL.csv" % uid, + path=data_dir, + return_res=True, + ) + if False: + spec_kmean = get_averaged_data_from_multi_res(multi_res, keystr="spec_kmean") + spec_pds = get_averaged_data_from_multi_res(multi_res, keystr="spec_pds", different_length=False) + times_xsvs = get_averaged_data_from_multi_res(multi_res, keystr="times_xsvs", different_length=False) + spec_his, spec_std = get_his_std_from_pds(spec_pds, his_shapes=None) + ML_val, KL_val, K_ = get_xsvs_fit( + spec_his, + spec_kmean, + spec_std, + max_bins=2, + varyK=False, + ) + contrast_factorL = get_contrast(ML_val) + spec_km_pds = save_KM( + spec_kmean, + KL_val, + ML_val, + qs=q_ring_center, + level_time=times_xsvs, + uid=uid_average, + path=data_dir_average, + ) + plot_xsvs_fit( + spec_his, + ML_val, + KL_val, + K_mean=spec_kmean, + spec_std=spec_std, + xlim=[0, 15], + vlim=[0.9, 1.1], + uid=uid_average, + qth=None, + logy=True, + times=times_xsvs, + q_ring_center=q_ring_center, + path=data_dir, + ) + + if scat_geometry == "saxs": + show_saxs_qmap( + avg_img, + setup_pargs, + width=600, + vmin=0.1, + vmax=np.max(avg_img * 0.1), + logs=True, + image_name="%s_img_avg" % uid, + save=True, + ) + plot_circular_average( + q_saxs, + iq_saxs, + q_saxs, + pargs=setup_pargs, + xlim=[q_saxs.min(), q_saxs.max()], + ylim=[iq_saxs.min(), iq_saxs.max()], + ) + plot_qIq_with_ROI( + q_saxs, + iq_saxs, + qr, + logs=True, + uid=uid, + xlim=[q_saxs.min(), q_saxs.max()], + ylim=[iq_saxs.min(), iq_saxs.max()], + save=True, + path=data_dir, + ) + plot1D( + y=imgsum, + title="%s_img_sum_t" % uid, + xlabel="Frame", + colors="b", + ylabel="Total_Intensity", + legend="imgsum", + save=True, + path=data_dir, + ) + plot_t_iqc( + qt, + iqst, + frame_edge=None, + pargs=setup_pargs, + xlim=[qt.min(), qt.max()], + ylim=[iqst.min(), iqst.max()], + save=True, + ) + show_ROI_on_image( + avg_img, + roi_mask, + center, + label_on=False, + rwidth=700, + alpha=0.9, + save=True, + path=data_dir, + uid=uid, + vmin=np.min(avg_img), + vmax=np.max(avg_img), + ) + + elif scat_geometry == "gi_saxs": + show_img( + avg_img, + vmin=0.1, + vmax=np.max(avg_img * 0.1), + logs=True, + image_name=uidstr + "_img_avg", + save=True, + path=data_dir, + ) + plot_qr_1d_with_ROI( + qr_1d_pds, + qr_center=np.unique(np.array(list(qval_dict.values()))[:, 0]), + loglog=False, + save=True, + uid=uidstr, + path=data_dir, + ) + show_qzr_roi(avg_img, roi_mask, inc_x0, ticks, alpha=0.5, save=True, path=data_dir, uid=uidstr) + + if run_waterfall: + plot_waterfallc( + wat, qth_interest, aspect=None, vmax=np.max(wat), uid=uid, save=True, path=data_dir, beg=good_start + ) + if run_t_ROI_Inten: + plot_each_ring_mean_intensityc(times_roi, mean_int_sets, uid=uid, save=True, path=data_dir) + + if run_one_time: + plot_g2_general( + g2_dict={1: g2, 2: g2_fit}, + taus_dict={1: taus, 2: taus_fit}, + vlim=[0.95, 1.05], + qval_dict=qval_dict, + fit_res=g2_fit_result, + geometry=scat_geometry, + filename=uid + "_g2", + path=data_dir, + function=fit_g2_func, + ylabel="g2", + append_name="_fit", + ) + + D0, qrate_fit_res = get_q_rate_fit_general( + qval_dict, g2_fit_paras["relaxation_rate"], geometry=scat_geometry + ) + plot_q_rate_fit_general( + qval_dict, + g2_fit_paras["relaxation_rate"], + qrate_fit_res, + geometry=scat_geometry, + uid=uid, + path=data_dir, + ) + + if run_two_time: + show_C12( + g12b, + q_ind=qth_interest, + N1=0, + N2=min(len(imgsa), 1000), + vmin=1.01, + vmax=1.25, + timeperframe=timeperframe, + save=True, + path=data_dir, + uid=uid, + ) + plot_g2_general( + g2_dict={1: g2b, 2: g2_fitb}, + taus_dict={1: tausb, 2: taus_fitb}, + vlim=[0.95, 1.05], + qval_dict=qval_dict, + fit_res=g2_fit_resultb, + geometry=scat_geometry, + filename=uid + "_g2", + path=data_dir, + function=fit_g2_func, + ylabel="g2", + append_name="_b_fit", + ) + + if run_two_time and run_one_time: + plot_g2_general( + g2_dict={1: g2, 2: g2b}, + taus_dict={1: taus, 2: tausb}, + vlim=[0.95, 1.05], + qval_dict=qval_dict, + g2_labels=["from_one_time", "from_two_time"], + geometry=scat_geometry, + filename=uid + "_g2_two_g2", + path=data_dir, + ylabel="g2", + ) + if run_four_time: + plot_g2_general( + g2_dict={1: g4}, + taus_dict={1: taus4}, + vlim=[0.95, 1.05], + qval_dict=qval_dict, + fit_res=None, + geometry=scat_geometry, + filename=uid + "_g4", + path=data_dir, + ylabel="g4", + ) + + if run_xsvs: + plot_g2_contrast( + contrast_factorL, + g2, + times_xsvs, + taus, + qr, + vlim=[0.8, 2.0], + qth=qth_interest, + uid=uid, + path=data_dir, + legend_size=14, + ) + plot_g2_contrast( + contrast_factorL, + g2, + times_xsvs, + taus, + qr, + vlim=[0.8, 1.2], + qth=None, + uid=uid, + path=data_dir, + legend_size=4, + ) + + md = multi_res[mkeys[0]]["md"] + md["uid"] = uid + md["suid"] = uid + md["Measurement"] = uid + md["beg"] = None + md["end"] = None + md["bad_frame_list"] = "unknown" + md["metadata_file"] = data_dir + "md.csv-&-md.pkl" + psave_obj(md, data_dir + "%s_md" % uid) # save the setup parameters + save_dict_csv(md, data_dir + "%s_md.csv" % uid, "w") + + Exdt = {} + if scat_geometry == "gi_saxs": + for k, v in zip( + ["md", "roi_mask", "qval_dict", "avg_img", "mask", "pixel_mask", "imgsum", "qr_1d_pds"], + [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, qr_1d_pds], + ): + Exdt[k] = v + elif scat_geometry == "saxs": + for k, v in zip( + [ + "md", + "q_saxs", + "iq_saxs", + "iqst", + "qt", + "roi_mask", + "qval_dict", + "avg_img", + "mask", + "pixel_mask", + "imgsum", + "bad_frame_list", + ], + [ + md, + q_saxs, + iq_saxs, + iqst, + qt, + roi_mask, + qval_dict, + avg_img, + mask, + pixel_mask, + imgsum, + ], + ): + Exdt[k] = v + + if run_waterfall: + Exdt["wat"] = wat + if run_t_ROI_Inten: + Exdt["times_roi"] = times_roi + Exdt["mean_int_sets"] = mean_int_sets + if run_one_time: + for k, v in zip(["taus", "g2", "g2_fit_paras"], [taus, g2, g2_fit_paras]): + Exdt[k] = v + if run_two_time: + for k, v in zip(["tausb", "g2b", "g2b_fit_paras", "g12b"], [tausb, g2b, g2b_fit_paras, g12b]): + Exdt[k] = v + if run_four_time: + for k, v in zip(["taus4", "g4"], [taus4, g4]): + Exdt[k] = v + if run_xsvs: + for k, v in zip( + ["spec_kmean", "spec_pds", "times_xsvs", "spec_km_pds", "contrast_factorL"], + [spec_kmean, spec_pds, times_xsvs, spec_km_pds, contrast_factorL], + ): + Exdt[k] = v + + contr_pds = save_arrays( + Exdt["contrast_factorL"], + label=Exdt["times_xsvs"], + filename="%s_contr.csv" % uid, + path=data_dir, + return_res=True, + ) + + export_xpcs_results_to_h5(uid + "_Res.h5", data_dir, export_dict=Exdt) + # extract_dict = extract_xpcs_results_from_h5( filename = uid + '_Res.h5', import_dir = data_dir ) + ## Create PDF report for each uid + pdf_out_dir = data_dir + pdf_filename = "XPCS_Analysis_Report_for_%s%s.pdf" % (uid_average, pdf_version) + if run_xsvs: + pdf_filename = "XPCS_XSVS_Analysis_Report_for_%s%s.pdf" % (uid_average, pdf_version) + + # pdf_filename = "XPCS_XSVS_Analysis_Report_for_uid=%s%s.pdf"%(uid_average,'_2') + make_pdf_report( + data_dir, + uid_average, + pdf_out_dir, + pdf_filename, + username, + run_fit_form, + run_one_time, + run_two_time, + run_four_time, + run_xsvs, + report_type=scat_geometry, + ) + ### Attach each g2 result to the corresponding olog entry + if att_pdf_report: + os.environ["HTTPS_PROXY"] = "https://proxy:8888" + os.environ["no_proxy"] = "cs.nsls2.local,localhost,127.0.0.1" + pname = pdf_out_dir + pdf_filename + atch = [Attachment(open(pname, "rb"))] + try: + update_olog_uid(uid=fuids[-1], text="Add XPCS Averaged Analysis PDF Report", attachments=atch) + except: + print( + "I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file." + % pname + ) + + print(fuids[-1]) + + # The End! + + +if False: + start_time, stop_time = "2016-12-1 16:30:00", "2016-12-1 16:31:50" # for 10 nm, 20, for test purpose + suf_ids = find_uids(start_time, stop_time) + sp = "test" + uid_averages = [sp + "_vs_test1_120116", sp + "_vs_test2_120116", sp + "_vs_test3_120116"] + + run_pargs = dict( + scat_geometry="saxs", + # scat_geometry = 'gi_saxs', + force_compress=False, # True, #False, #True,#False, + para_compress=True, + run_fit_form=False, + run_waterfall=True, # False, + run_t_ROI_Inten=True, + # run_fit_g2 = True, + fit_g2_func="stretched", + run_one_time=True, # False, + run_two_time=True, # False, + run_four_time=False, # True, #False, + run_xsvs=True, + att_pdf_report=True, + show_plot=False, + CYCLE="2016_3", + # if scat_geometry == 'gi_saxs': + # mask_path = '/XF11ID/analysis/2016_3/masks/', + # mask_name = 'Nov16_4M-GiSAXS_mask.npy', + # elif scat_geometry == 'saxs': + mask_path="/XF11ID/analysis/2016_3/masks/", + mask_name="Nov28_4M_SAXS_mask.npy", + good_start=5, + #####################################for saxs + uniformq=True, + inner_radius=0.005, # 0.005 for 50 nmAu/SiO2, 0.006, #for 10nm/coralpor + outer_radius=0.04, # 0.04 for 50 nmAu/SiO2, 0.05, #for 10nm/coralpor + num_rings=12, + gap_ring_number=6, + number_rings=1, + ############################for gi_saxs + # inc_x0 = 1473, + # inc_y0 = 372, + # refl_x0 = 1473, + # refl_y0 = 730, + qz_start=0.025, + qz_end=0.04, + qz_num=3, + gap_qz_num=1, + # qz_width = ( qz_end - qz_start)/(qz_num +1), + qr_start=0.0025, + qr_end=0.07, + qr_num=14, + gap_qr_num=5, + definde_second_roi=True, + qz_start2=0.04, + qz_end2=0.050, + qz_num2=1, + gap_qz_num2=1, + qr_start2=0.002, + qr_end2=0.064, + qr_num2=10, + gap_qr_num2=5, + # qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 + # width = 0.0002 + qth_interest=1, # the intested single qth + use_sqnorm=False, + use_imgsum_norm=True, + pdf_version="_1", # for pdf report name + ) + + step = 1 + Nt = len(uid_averages) + for i in range(Nt): + t0 = time.time() + suf_idsi = ( + suf_ids[0][i * step : (i + 1) * step], + suf_ids[1][i * step : (i + 1) * step], + suf_ids[2][i * step : (i + 1) * step], + ) + XPCS_XSVS_SAXS_Multi(0, 0, run_pargs=run_pargs, suf_ids=suf_idsi, uid_average=uid_averages[i]) + + run_time(t0) diff --git a/pyCHX/backups/pyCHX-backup/__init__.py b/pyCHX/backups/pyCHX-backup/__init__.py new file mode 100644 index 0000000..955b5f6 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/__init__.py @@ -0,0 +1,6 @@ +__author__ = "Yugang Zhang" + +from ._version import get_versions + +__version__ = get_versions()["version"] +del get_versions diff --git a/pyCHX/backups/pyCHX-backup/_version.py b/pyCHX/backups/pyCHX-backup/_version.py new file mode 100644 index 0000000..6532713 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/_version.py @@ -0,0 +1,455 @@ +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. Generated by +# versioneer-0.15 (https://github.com/warner/python-versioneer) + +import errno +import os +import re +import subprocess +import sys + + +def get_keywords(): + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "$Format:%d$" + git_full = "$Format:%H$" + keywords = {"refnames": git_refnames, "full": git_full} + return keywords + + +class VersioneerConfig: + pass + + +def get_config(): + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "pep440" + cfg.tag_prefix = "v" + cfg.parentdir_prefix = "None" + cfg.versionfile_source = "chxtools/_version.py" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + pass + + +LONG_VERSION_PY = {} +HANDLERS = {} + + +def register_vcs_handler(vcs, method): # decorator + def decorate(f): + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): + assert isinstance(commands, list) + p = None + for c in commands: + try: + dispcmd = str([c] + args) + # remember shell=False, so use git.cmd on windows, not just git + p = subprocess.Popen( + [c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None) + ) + break + except EnvironmentError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print("unable to run %s" % dispcmd) + print(e) + return None + else: + if verbose: + print("unable to find command, tried %s" % (commands,)) + return None + stdout = p.communicate()[0].strip() + if sys.version_info[0] >= 3: + stdout = stdout.decode() + if p.returncode != 0: + if verbose: + print("unable to run %s (error)" % dispcmd) + return None + return stdout + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + # Source tarballs conventionally unpack into a directory that includes + # both the project name and a version string. + dirname = os.path.basename(root) + if not dirname.startswith(parentdir_prefix): + if verbose: + print( + "guessing rootdir is '%s', but '%s' doesn't start with " + "prefix '%s'" % (root, dirname, parentdir_prefix) + ) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + return {"version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None} + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + f = open(versionfile_abs, "r") + for line in f.readlines(): + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + f.close() + except EnvironmentError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + if not keywords: + raise NotThisMethod("no keywords at all, weird") + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = set([r.strip() for r in refnames.strip("()").split(",")]) + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = set([r for r in refs if re.search(r"\d", r)]) + if verbose: + print("discarding '%s', no digits" % ",".join(refs - tags)) + if verbose: + print("likely tags: %s" % ",".join(sorted(tags))) + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix) :] + if verbose: + print("picking %s" % r) + return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None} + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + } + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): + # this runs 'git' from the root of the source tree. This only gets called + # if the git-archive 'subst' keywords were *not* expanded, and + # _version.py hasn't already been rewritten with a short version string, + # meaning we're inside a checked out source tree. + + if not os.path.exists(os.path.join(root, ".git")): + if verbose: + print("no .git in %s" % root) + raise NotThisMethod("no .git directory") + + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + # if there is a tag, this yields TAG-NUM-gHEX[-dirty] + # if there are no tags, this yields HEX[-dirty] (no NUM) + describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[: git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) + if not mo: + # unparseable. Maybe git-describe is misbehaving? + pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix) :] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) + pieces["distance"] = int(count_out) # total number of commits + + return pieces + + +def plus_or_dot(pieces): + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + # now build up version string, with post-release "local version + # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + # exceptions: + # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_pre(pieces): + # TAG[.post.devDISTANCE] . No -dirty + + # exceptions: + # 1: no tags. 0.post.devDISTANCE + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += ".post.dev%d" % pieces["distance"] + else: + # exception #1 + rendered = "0.post.dev%d" % pieces["distance"] + return rendered + + +def render_pep440_post(pieces): + # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that + # .dev0 sorts backwards (a dirty tree will appear "older" than the + # corresponding clean one), but you shouldn't be releasing software with + # -dirty anyways. + + # exceptions: + # 1: no tags. 0.postDISTANCE[.dev0] + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + return rendered + + +def render_pep440_old(pieces): + # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. + + # exceptions: + # 1: no tags. 0.postDISTANCE[.dev0] + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty + # --always' + + # exceptions: + # 1: no tags. HEX[-dirty] (note: no 'g' prefix) + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty + # --always -long'. The distance/hash is unconditional. + + # exceptions: + # 1: no tags. HEX[-dirty] (note: no 'g' prefix) + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + if pieces["error"]: + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + } + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%s'" % style) + + return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None} + + +def get_versions(): + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for i in cfg.versionfile_source.split("/"): + root = os.path.dirname(root) + except NameError: + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + } + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"} diff --git a/pyCHX/backups/pyCHX-backup/backups/Create_Report_05012024.py b/pyCHX/backups/pyCHX-backup/backups/Create_Report_05012024.py new file mode 100644 index 0000000..f434328 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/backups/Create_Report_05012024.py @@ -0,0 +1,1940 @@ +''' +Yugang Created at Aug 08, 2016, CHX-NSLS-II + +Create a PDF file from XPCS data analysis results, which are generated by CHX data analysis pipeline + +How to use: +python Create_Report.py full_file_path uid output_dir (option) + +An exmplae to use: +python Create_Report.py /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/ af8f66 + +python Create_Report.py /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/ af8f66 /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/test/ + +''' + +def check_dict_keys( dicts, key): + if key not in list(dicts.keys()): + dicts[key] = 'unknown' + + + +import h5py + +from reportlab.pdfgen import canvas +from reportlab.lib.units import inch, cm , mm +from reportlab.lib.colors import pink, green, brown, white, black, red, blue + + +from reportlab.lib.styles import getSampleStyleSheet +#from reportlab.platypus import Image, Paragraph, Table + +from reportlab.lib.pagesizes import letter, A4 +from pyCHX.chx_generic_functions import (pload_obj ) + + +from PIL import Image +from time import time +from datetime import datetime + +import sys,os +import pandas as pds +import numpy as np + + +def add_one_line_string( c, s, top, left=30, fontsize = 11 ): + if (fontsize*len(s )) >1000: + fontsize = 1000./(len(s)) + c.setFont("Helvetica", fontsize ) + c.drawString(left, top, s) + + + +def add_image_string( c, imgf, data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top, return_ = False ): + + image = data_dir + imgf + if os.path.exists(image): + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= img_height + width = height/ratio + #if width>400: + # width = 350 + # height = width*ratio + c.drawImage( image, img_left, img_top, width= width,height=height,mask=None) + + c.setFont("Helvetica", 16) + c.setFillColor( blue ) + c.drawString(str1_left, str1_top,str1 ) + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString(str2_left, str2_top, 'filename: %s'%imgf ) + if return_: + return height/ratio + + else: + c.setFillColor( blue ) + c.drawString( str1_left, str1_top, str1) + c.setFillColor(red) + c.drawString( str1_left, str1_top -40, '-->Not Calculated!' ) + + + +class create_pdf_report( object ): + + '''Aug 16, YG@CHX-NSLS-II + Create a pdf report by giving data_dir, uid, out_dir + data_dir: the input data directory, including all necessary images + the images names should be: + meta_file = 'uid=%s-md'%uid + avg_img_file = 'uid=%s--img-avg-.png'%uid + ROI_on_img_file = 'uid=%s--ROI-on-Image-.png'%uid + qiq_file = 'uid=%s--Circular-Average-.png'%uid + ROI_on_Iq_file = 'uid=%s--ROI-on-Iq-.png'%uid + + Iq_t_file = 'uid=%s--Iq-t-.png'%uid + img_sum_t_file = 'uid=%s--img-sum-t.png'%uid + wat_file= 'uid=%s--Waterfall-.png'%uid + Mean_inten_t_file= 'uid=%s--Mean-intensity-of-each-ROI-.png'%uid + + g2_file = 'uid=%s--g2-.png'%uid + g2_fit_file = 'uid=%s--g2--fit-.png'%uid + q_rate_file = 'uid=--%s--Q-Rate--fit-.png'%uid + + two_time_file = 'uid=%s--Two-time-.png'%uid + two_g2_file = 'uid=%s--g2--two-g2-.png'%uid + + uid: the unique id + out_dir: the output directory + report_type: + 'saxs': report saxs results + 'gisaxs': report gisaxs results + + + Output: + A PDF file with name as "XPCS Analysis Report for uid=%s"%uid in out_dir folder + ''' + + def __init__( self, data_dir, uid, out_dir=None, filename=None, load=True, user=None, + report_type='saxs',md=None, res_h5_filename=None ): + from datetime import datetime + self.data_dir = data_dir + self.uid = uid + self.md = md + #print(md) + if user is None: + user = 'chx' + self.user = user + if out_dir is None: + out_dir = data_dir + if not os.path.exists(out_dir): + os.makedirs(out_dir) + self.out_dir=out_dir + + self.styles = getSampleStyleSheet() + self.width, self.height = letter + + self.report_type = report_type + dt =datetime.now() + CurTime = '%02d/%02d/%s/-%02d/%02d/' % ( dt.month, dt.day, dt.year,dt.hour,dt.minute) + self.CurTime = CurTime + if filename is None: + filename="XPCS_Analysis_Report_for_uid=%s.pdf"%uid + filename=out_dir + filename + c = canvas.Canvas( filename, pagesize=letter) + self.filename= filename + self.res_h5_filename = res_h5_filename + #c.setTitle("XPCS Analysis Report for uid=%s"%uid) + c.setTitle(filename) + self.c = c + if load: + self.load_metadata() + + def load_metadata(self): + uid=self.uid + data_dir = self.data_dir + #load metadata + meta_file = 'uid=%s_md'%uid + self.metafile = data_dir + meta_file + if self.md is None: + md = pload_obj( data_dir + meta_file ) + self.md = md + else: + md = self.md + #print('Get md from giving md') + #print(md) + self.sub_title_num = 0 + uid_g2 = None + uid_c12 = None + if 'uid_g2' in list(md.keys()): + uid_g2 = md['uid_g2'] + if 'uid_c12' in list(md.keys()): + uid_c12 = md['uid_c12'] + + '''global definition''' + + if 'beg_OneTime' in list( md.keys()): + beg_OneTime = md['beg_OneTime'] + end_OneTime = md['end_OneTime'] + else: + beg_OneTime = None + end_OneTime = None + + if 'beg_TwoTime' in list( md.keys()): + beg_TwoTime = md['beg_TwoTime'] + end_TwoTime = md['end_TwoTime'] + else: + beg_TwoTime = None + end_TwoTime = None + + + try: + beg = md['beg'] + end= md['end'] + uid_ = uid + '_fra_%s_%s'%(beg, end) + if beg_OneTime is None: + uid_OneTime = uid + '_fra_%s_%s'%(beg, end) + else: + uid_OneTime = uid + '_fra_%s_%s'%(beg_OneTime, end_OneTime) + if beg_TwoTime is None: + uid_TwoTime = uid + '_fra_%s_%s'%(beg, end) + else: + uid_TwoTime = uid + '_fra_%s_%s'%(beg_TwoTime, end_TwoTime) + + except: + uid_ = uid + uid_OneTime = uid + if beg is None: + uid_ = uid + uid_OneTime = uid + + self.avg_img_file = 'uid=%s_img_avg.png'%uid + self.ROI_on_img_file = 'uid=%s_ROI_on_Image.png'%uid + + self.qiq_file = 'uid=%s_q_Iq.png'%uid + self.qiq_fit_file = 'uid=%s_form_factor_fit.png'%uid + #self.qr_1d_file = 'uid=%s_Qr_ROI.png'%uid + if self.report_type =='saxs' or self.report_type =='ang_saxs': + self.ROI_on_Iq_file = 'uid=%s_ROI_on_Iq.png'%uid + + elif self.report_type =='gi_saxs': + self.ROI_on_Iq_file = 'uid=%s_Qr_ROI.png'%uid + + self.Iq_t_file = 'uid=%s_q_Iqt.png'%uid + self.img_sum_t_file = 'uid=%s_img_sum_t.png'%uid + self.wat_file= 'uid=%s_waterfall.png'%uid + self.Mean_inten_t_file= 'uid=%s_t_ROIs.png'%uid + self.oavs_file = 'uid=%s_OAVS.png'%uid + + if uid_g2 is None: + uid_g2 = uid_OneTime + self.g2_file = 'uid=%s_g2.png'%uid_g2 + self.g2_fit_file = 'uid=%s_g2_fit.png'%uid_g2 + #print( self.g2_fit_file ) + self.g2_new_page = False + self.g2_fit_new_page = False + if self.report_type =='saxs': + jfn = 'uid=%s_g2.png'%uid_g2 + if os.path.exists( data_dir + jfn): + self.g2_file = jfn + else: + jfn = 'uid=%s_g2__joint.png'%uid_g2 + if os.path.exists( data_dir + jfn): + self.g2_file = jfn + self.g2_new_page = True + #self.g2_new_page = True + jfn = 'uid=%s_g2_fit.png'%uid_g2 + if os.path.exists(data_dir + jfn ): + self.g2_fit_file = jfn + #self.g2_fit_new_page = True + else: + jfn = 'uid=%s_g2_fit__joint.png'%uid_g2 + if os.path.exists(data_dir + jfn ): + self.g2_fit_file = jfn + self.g2_fit_new_page = True + + else: + jfn = 'uid=%s_g2__joint.png'%uid_g2 + if os.path.exists( data_dir + jfn): + self.g2_file = jfn + self.g2_new_page = True + jfn = 'uid=%s_g2_fit__joint.png'%uid_g2 + if os.path.exists(data_dir + jfn ): + self.g2_fit_file = jfn + self.g2_fit_new_page = True + + self.q_rate_file = 'uid=%s_Q_Rate_fit.png'%uid_g2 + self.q_rate_loglog_file = 'uid=%s_Q_Rate_loglog.png'%uid_g2 + self.g2_q_fitpara_file = 'uid=%s_g2_q_fitpara_plot.png'%uid_g2 + + + #print( self.q_rate_file ) + if uid_c12 is None: + uid_c12 = uid_ + self.q_rate_two_time_fit_file = 'uid=%s_two_time_Q_Rate_fit.png'%uid_c12 + #print( self.q_rate_two_time_fit_file ) + + self.two_time_file = 'uid=%s_Two_time.png'%uid_c12 + self.two_g2_file = 'uid=%s_g2_two_g2.png'%uid_c12 + + if self.report_type =='saxs': + + jfn = 'uid=%s_g2_two_g2.png'%uid_c12 + self.two_g2_new_page = False + if os.path.exists( data_dir + jfn ): + #print( 'Here we go') + self.two_g2_file = jfn + #self.two_g2_new_page = True + else: + jfn = 'uid=%s_g2_two_g2__joint.png'%uid_c12 + self.two_g2_new_page = False + if os.path.exists( data_dir + jfn ): + #print( 'Here we go') + self.two_g2_file = jfn + self.two_g2_new_page = True + else: + jfn = 'uid=%s_g2_two_g2__joint.png'%uid_c12 + self.two_g2_new_page = False + if os.path.exists( data_dir + jfn ): + #print( 'Here we go') + self.two_g2_file = jfn + self.two_g2_new_page = True + + + self.four_time_file = 'uid=%s_g4.png'%uid_ + jfn = 'uid=%s_g4__joint.png'%uid_ + self.g4_new_page = False + if os.path.exists( data_dir + jfn ): + self.four_time_file = jfn + self.g4_new_page = True + + self.xsvs_fit_file = 'uid=%s_xsvs_fit.png'%uid_ + self.contrast_file = 'uid=%s_contrast.png'%uid_ + self.dose_file = 'uid=%s_dose_analysis.png'%uid_ + + jfn = 'uid=%s_dose_analysis__joint.png'%uid_ + self.dose_file_new_page = False + if os.path.exists( data_dir + jfn ): + self.dose_file = jfn + self.dose_file_new_page = True + + #print( self.dose_file ) + if False: + self.flow_g2v = 'uid=%s_1a_mqv_g2_v_fit.png'%uid_ + self.flow_g2p = 'uid=%s_1a_mqp_g2_p_fit.png'%uid_ + self.flow_g2v_rate_fit = 'uid=%s_v_fit_rate_Q_Rate_fit.png'%uid_ + self.flow_g2p_rate_fit = 'uid=%s_p_fit_rate_Q_Rate_fit.png'%uid_ + + if True: + self.two_time = 'uid=%s_pv_two_time.png'%uid_ + #self.two_time_v = 'uid=%s_pv_two_time.png'%uid_ + + #self.flow_g2bv = 'uid=%s_g2b_v_fit.png'%uid_ + #self.flow_g2bp = 'uid=%s_g2b_p_fit.png'%uid_ + self.flow_g2_g2b_p = 'uid=%s_g2_two_g2_p.png'%uid_ + self.flow_g2_g2b_v = 'uid=%s_g2_two_g2_v.png'%uid_ + + self.flow_g2bv_rate_fit = 'uid=%s_vertb_Q_Rate_fit.png'%uid_ + self.flow_g2bp_rate_fit = 'uid=%s_parab_Q_Rate_fit.png'%uid_ + + self.flow_g2v = 'uid=%s_g2_v_fit.png'%uid_ + self.flow_g2p = 'uid=%s_g2_p_fit.png'%uid_ + self.flow_g2v_rate_fit = 'uid=%s_vert_Q_Rate_fit.png'%uid_ + self.flow_g2p_rate_fit = 'uid=%s_para_Q_Rate_fit.png'%uid_ + + #self.report_header(page=1, top=730, new_page=False) + #self.report_meta(new_page=False) + + self.q2Iq_file = 'uid=%s_q2_iq.png'%uid + self.iq_invariant_file = 'uid=%s_iq_invariant.png'%uid + + def report_invariant( self, top= 300, new_page=False): + '''create the invariant analysis report + two images: + ROI on average intensity image + ROI on circular average + ''' + uid=self.uid + c= self.c + #add sub-title, static images + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 230 + self.sub_title_num +=1 + c.drawString(10, top, "%s. I(q) Invariant Analysis"%self.sub_title_num ) #add title + #add q2Iq + c.setFont("Helvetica", 14) + imgf = self.q2Iq_file + #print( imgf ) + label = 'q^2*I(q)' + add_image_string( c, imgf, self.data_dir, img_left= 60, img_top=top - ds*1.15, img_height=180, + str1_left=110, str1_top = top-35,str1=label, + str2_left = 60, str2_top = top -320 ) + + #add iq_invariant + imgf = self.iq_invariant_file + img_height= 180 + img_left,img_top =320, top - ds*1.15 + str1_left, str1_top,str1= 420, top- 35, 'I(q) Invariant' + str2_left, str2_top = 350, top- 320 + + #print ( imgf ) + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + if new_page: + c.showPage() + c.save() + + + + def report_header(self, page=1, new_page=False): + '''create headers, including title/page number''' + c= self.c + CurTime = self.CurTime + uid=self.uid + user=self.user + c.setFillColor(black) + c.setFont("Helvetica", 14) + #add page number + c.drawString(250, 10, "Page--%s--"%( page ) ) + #add time stamp + + #c.drawString(350, 10, "Created at %s@CHX-by-%s"%( CurTime,user ) ) + s_ = "Created at %s@CHX-By-%s"%( CurTime,user ) + add_one_line_string( c, s_, 10, left=350,fontsize = 11 ) + + #add title + #c.setFont("Helvetica", 22) + title = "XPCS Analysis Report for uid=%s"%uid + c.setFont("Helvetica", 1000/( len(title) ) ) + #c.drawString(180,760, "XPCS Report of uid=%s"%uid ) #add title + c.drawString(50,760, "XPCS Analysis Report for uid=%s"%uid ) #add title + #add a line under title + c.setStrokeColor( red ) + c.setLineWidth(width=1.5) + c.line( 50, 750, 550, 750 ) + if new_page: + c.showPage() + c.save() + + + def report_meta(self, top=740, new_page=False): + '''create the meta data report, + the meta data include: + uid + Sample: + Measurement + Wavelength + Detector-Sample Distance + Beam Center + Mask file + Data dir + Pipeline notebook + ''' + + c=self.c + #load metadata + md = self.md + try: + uid = md['uid'] + except: + uid=self.uid + #add sub-title, metadata + c.setFont("Helvetica", 20) + ds = 15 + self.sub_title_num += 1 + c.drawString(10, top, "%s. Metadata"%self.sub_title_num ) #add title + top = top - 5 + fontsize = 11 + c.setFont("Helvetica", fontsize) + + nec_keys = [ 'sample', 'start_time', 'stop_time','Measurement' ,'exposure time' ,'incident_wavelength', 'cam_acquire_t', + 'frame_time','detector_distance', 'feedback_x', 'feedback_y', 'shutter mode', + 'beam_center_x', 'beam_center_y', 'beam_refl_center_x', 'beam_refl_center_y','mask_file','bad_frame_list', 'transmission', 'roi_mask_file'] + for key in nec_keys: + check_dict_keys(md, key) + + try:#try exp time from detector + exposuretime= md['count_time'] #exposure time in sec + except: + exposuretime= md['cam_acquire_time'] #exposure time in sec + + try:#try acq time from detector + acquisition_period = md['frame_time'] + except: + try: + acquisition_period = md['acquire period'] + except: + uid = md['uid'] + acquisition_period = float( db[uid]['start']['acquire period'] ) + + + s = [] + s.append( 'UID: %s'%uid ) ###line 1, for uid + s.append('Sample: %s'%md['sample'] ) ####line 2 sample + s.append('Data Acquisition From: %s To: %s'%(md['start_time'], md['stop_time']))####line 3 Data Acquisition time + s.append( 'Measurement: %s'%md['Measurement'] ) ####line 4 'Measurement + + #print( md['incident_wavelength'], int(md['number of images']), md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) + #print(acquisition_period) + s.append( 'Wavelength: %s A | Num of Image: %d | Exposure time: %s ms | Acquire period: %s ms'%( md['incident_wavelength'], int(md['number of images']),round(float(exposuretime)*1000,4), round(float( acquisition_period )*1000,4) ) ) ####line 5 'lamda... + + s.append( 'Detector-Sample Distance: %s m| FeedBack Mode: x -> %s & y -> %s| Shutter Mode: %s'%( + md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) ) ####line 6 'Detector-Sample Distance.. + if self.report_type == 'saxs': + s7= 'Beam Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) + elif self.report_type == 'gi_saxs': + s7= ('Incident Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) + + ' || ' + + 'Reflect Center: [%s, %s] (pixel)'%(md['beam_refl_center_x'], md['beam_refl_center_y']) ) + elif self.report_type == 'ang_saxs' or self.report_type == 'gi_waxs' : + s7= 'Beam Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) + else: + s7 = '' + + s7 += ' || ' + 'BadLen: %s'%len(md['bad_frame_list']) + s7 += ' || ' + 'Transmission: %s'%md['transmission'] + s.append( s7 ) ####line 7 'Beam center... + m = 'Mask file: %s'%md['mask_file'] + ' || ' + 'ROI mask file: %s'%md['roi_mask_file'] + #s.append( 'Mask file: %s'%md['mask_file'] ) ####line 8 mask filename + #s.append( ) ####line 8 mask filename + s.append(m) + + if self.res_h5_filename is not None: + self.data_dir_ = self.data_dir + self.res_h5_filename + else: + self.data_dir_ = self.data_dir + s.append( 'Analysis Results Dir: %s'%self.data_dir_ ) ####line 9 results folder + + + s.append( 'Metadata Dir: %s.csv-&.pkl'%self.metafile ) ####line 10 metadata folder + try: + s.append( 'Pipeline notebook: %s'%md['NOTEBOOK_FULL_PATH'] ) ####line 11 notebook folder + except: + pass + #print( 'here' ) + line =1 + for s_ in s: + add_one_line_string( c, s_, top -ds*line , left=30,fontsize = fontsize ) + line += 1 + + if new_page: + c.showPage() + c.save() + + def report_static( self, top=560, new_page=False, iq_fit=False): + '''create the static analysis report + two images: + average intensity image + circular average + + ''' + #add sub-title, static images + + c= self.c + c.setFont("Helvetica", 20) + uid=self.uid + + ds = 220 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Static Analysis"%self.sub_title_num ) #add title + + #add average image + c.setFont("Helvetica", 14) + + imgf = self.avg_img_file + + if self.report_type == 'saxs': + ipos = 60 + dshift=0 + elif self.report_type == 'gi_saxs': + ipos = 200 + dshift= 140 + elif self.report_type == 'ang_saxs': + ipos = 200 + dshift= 140 + else: + ipos = 200 + dshift= 140 + + + add_image_string( c, imgf, self.data_dir, img_left= ipos, img_top=top-ds, img_height=180, + str1_left=90 + dshift, str1_top = top-35,str1='Average Intensity Image', + str2_left = 80 + dshift, str2_top = top -230 ) + + #add q_Iq + if self.report_type == 'saxs': + imgf = self.qiq_file + #print(imgf) + if iq_fit: + imgf = self.qiq_fit_file + label = 'Circular Average' + lab_pos = 390 + fn_pos = 320 + add_image_string( c, imgf, self.data_dir, img_left=320, img_top=top-ds, img_height=180, + str1_left=lab_pos, str1_top = top-35,str1=label, + str2_left = fn_pos, str2_top = top -230 ) + else: + if False: + imgf = self.ROI_on_Iq_file #self.qr_1d_file + label = 'Qr-1D' + lab_pos = 420 + fn_pos = 350 + + add_image_string( c, imgf, self.data_dir, img_left=320, img_top=top-ds, img_height=180, + str1_left=lab_pos, str1_top = top-35,str1=label, + str2_left = fn_pos, str2_top = top -230 ) + if new_page: + c.showPage() + c.save() + + def report_ROI( self, top= 300, new_page=False): + '''create the static analysis report + two images: + ROI on average intensity image + ROI on circular average + ''' + uid=self.uid + c= self.c + #add sub-title, static images + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 230 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Define of ROI"%self.sub_title_num ) #add title + #add ROI on image + c.setFont("Helvetica", 14) + imgf = self.ROI_on_img_file + label = 'ROI on Image' + add_image_string( c, imgf, self.data_dir, img_left= 60, img_top=top - ds*1.15, img_height=240, + str1_left=110, str1_top = top-35,str1=label, + str2_left = 60, str2_top = top -260 ) + + #add q_Iq + if self.report_type == 'saxs' or self.report_type == 'gi_saxs' or self.report_type == 'ang_saxs': + imgf = self.ROI_on_Iq_file + img_height=180 + img_left,img_top =320, top - ds + str1_left, str1_top,str1= 420, top- 35, 'ROI on Iq' + str2_left, str2_top = 350, top- 260 + + #print ( imgf ) + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + if new_page: + c.showPage() + c.save() + + + def report_time_analysis( self, top= 720,new_page=False): + '''create the time dependent analysis report + four images: + each image total intensity as a function of time + iq~t + waterfall + mean intensity of each ROI as a function of time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + top1=top + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Time Dependent Plot"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + + + top = top1 - 160 + + #add img_sum_t + if self.report_type == 'saxs': + ipos = 80 + elif self.report_type == 'gi_saxs': + ipos = 200 + elif self.report_type == 'ang_saxs': + ipos = 200 + else: + ipos = 200 + + imgf = self.img_sum_t_file + img_height=140 + img_left,img_top = ipos, top + str1_left, str1_top,str1= ipos + 60, top1 - 20 , 'img sum ~ t' + str2_left, str2_top = ipos, top- 5 + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + #plot iq~t + if self.report_type == 'saxs': + imgf = self.Iq_t_file + image = self.data_dir + imgf + + + img_height=140 + img_left,img_top = 350, top + str1_left, str1_top,str1= 420, top1-20 , 'iq ~ t' + str2_left, str2_top = 360, top- 5 + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + elif self.report_type == 'gi_saxs': + pass + + top = top1 - 340 + #add waterfall plot + imgf = self.wat_file + + img_height=160 + img_left,img_top = 80, top + str1_left, str1_top,str1= 140, top + img_height, 'waterfall plot' + str2_left, str2_top = 80, top- 5 + + if self.report_type != 'ang_saxs': + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + else: + pass + + #add mean-intensity of each roi + imgf = self.Mean_inten_t_file + + img_height=160 + img_left,img_top = 360, top + str1_left, str1_top,str1= 330, top + img_height, 'Mean-intensity-of-each-ROI' + str2_left, str2_top = 310, top- 5 + if self.report_type != 'ang_saxs': + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + else: + pass + + if new_page: + c.showPage() + c.save() + + def report_oavs( self, top= 350, oavs_file=None, new_page=False): + '''create the oavs images report + + ''' + + c= self.c + uid=self.uid + #add sub-title, One Time Correlation Function + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. OAVS Images"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + #add g2 plot + if oavs_file is None: + imgf = self.oavs_file + else: + imgf = oavs_file + #print(self.data_dir + imgf) + + if os.path.exists(self.data_dir + imgf): + im = Image.open( self.data_dir+imgf ) + ratio = float(im.size[1])/im.size[0] + img_width = 600 + img_height= img_width * ratio #img_height + #width = height/ratio + + if not new_page: + #img_height= 550 + top = top - 600 + str2_left, str2_top = 80, top - 400 + img_left,img_top = 1, top + + if new_page: + #img_height= 150 + top = top - img_height - 50 + str2_left, str2_top = 80, top - 50 + img_left,img_top = 10, top + + str1_left, str1_top, str1= 150, top + img_height, 'OAVS images' + img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top, return_=True ) + #print( imgf,self.data_dir ) + print(img_width, img_height) + + + + def report_one_time( self, top= 350, g2_fit_file=None, q_rate_file=None, new_page=False): + '''create the one time correlation function report + Two images: + One Time Correlation Function with fit + q-rate fit + ''' + + c= self.c + uid=self.uid + #add sub-title, One Time Correlation Function + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. One Time Correlation Function"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + #add g2 plot + if g2_fit_file is None: + imgf = self.g2_fit_file + else: + imgf = g2_fit_file + + if self.report_type != 'ang_saxs': + img_height= 300 + top = top - 320 + str2_left, str2_top = 80, top- 0 + + else: + img_height= 550 + top = top - 600 + str2_left, str2_top = 80, top - 400 + #add one_time caculation + img_left,img_top = 1, top + if self.g2_fit_new_page or self.g2_new_page: + + img_height= 550 + top = top - 250 + str2_left, str2_top = 80, top - 0 + img_left,img_top = 60, top + + str1_left, str1_top,str1= 150, top + img_height, 'g2 fit plot' + img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top, return_=True ) + #print( imgf,self.data_dir ) + #add g2 plot fit + #print(self.q_rate_file ) + if os.path.isfile( self.data_dir + self.q_rate_file ): + #print('here') + #print(self.q_rate_file ) + top = top + 70 # + if q_rate_file is None: + imgf = self.q_rate_file + else: + imgf = q_rate_file + if self.report_type != 'ang_saxs': + #print(img_width) + if img_width > 400: + img_height = 90 + else: + img_height= 180 + img_left,img_top = img_width-10, top #350, top + str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot' + else: + img_height= 300 + img_left,img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 180, 'q-rate fit plot' + if self.g2_fit_new_page or self.g2_new_page: + top = top - 200 + img_height= 180 + img_left,img_top = 350, top + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot' + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + else: + top = top + 320 # + if q_rate_file is None: + imgf = self.q_rate_loglog_file + else: + imgf = q_rate_file + #print(imgf) + if self.report_type != 'ang_saxs': + #print(img_width) + if img_width > 400: + img_height = 90/2 + else: + img_height= 180 /2 + img_left,img_top = img_width-10, top #350, top + str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'q-rate loglog plot' + else: + img_height= 300/2 + img_left,img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 180, 'q-rate loglog plot' + if self.g2_fit_new_page or self.g2_new_page: + top = top - 200 + 50 + img_height= 180 / 1.5 + img_left,img_top = 350, top + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 120, 'q-rate loglog plot' + + #print('here') + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + top = top - 100 # + if q_rate_file is None: + imgf = self.g2_q_fitpara_file + else: + imgf = q_rate_file + if self.report_type != 'ang_saxs': + #print(img_width) + if img_width > 400: + img_height = 90 + else: + img_height= 180 + img_left,img_top = img_width-10, top #350, top + str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'g2 fit para' + else: + img_height= 300 + img_left,img_top = 350, top - 150 + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 180, 'g2 fit para' + if self.g2_fit_new_page or self.g2_new_page: + top = top - 200 + img_height= 180 * 1.5 + img_left,img_top = 350, top + str2_left, str2_top = 380, top - 5 + str1_left, str1_top,str1= 450, top + 280, 'g2 fit para' + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + if new_page: + c.showPage() + c.save() + + + + def report_mulit_one_time( self, top= 720,new_page=False): + '''create the mulit one time correlation function report + Two images: + One Time Correlation Function with fit + q-rate fit + ''' + c= self.c + uid=self.uid + #add sub-title, One Time Correlation Function + c.setFillColor(black) + c.setFont("Helvetica", 20) + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. One Time Correlation Function"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + #add g2 plot + top = top - 320 + + imgf = self.g2_fit_file + image = self.data_dir + imgf + if not os.path.exists(image): + image = self.data_dir + self.g2_file + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= 300 + c.drawImage( image, 1, top, width= height/ratio,height=height, mask= 'auto') + #c.drawImage( image, 1, top, width= height/ratio,height=height, mask= None ) + c.setFont("Helvetica", 16) + c.setFillColor( blue) + c.drawString( 150, top + height , 'g2 fit plot' ) + + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString( 80, top- 0, 'filename: %s'%imgf ) + + #add g2 plot fit + top = top + 70 # + imgf = self.q_rate_file + image = self.data_dir + imgf + if os.path.exists(image): + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= 180 + c.drawImage( image, 350, top, width= height/ratio,height=height,mask= 'auto') + + c.setFont("Helvetica", 16) + c.setFillColor( blue) + c.drawString( 450, top + 230, 'q-rate fit plot' ) + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString( 380, top- 5, 'filename: %s'%imgf ) + + if new_page: + c.showPage() + c.save() + + + + def report_two_time( self, top= 720, new_page=False): + '''create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Two Time Correlation Function"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + + top1=top + top = top1 - 330 + #add q_Iq_t + imgf = self.two_time_file + + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 180, top + 300, 'two time correlation function' + str2_left, str2_top = 180, top - 10 + img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top, return_=True ) + + + + top = top - 340 + #add q_Iq_t + imgf = self.two_g2_file + + if True:#not self.two_g2_new_page: + + img_height= 300 + img_left,img_top = 100 -70, top + str1_left, str1_top,str1= 210-70, top + 310, 'compared g2' + str2_left, str2_top = 180-70, top - 10 + + if self.two_g2_new_page: + img_left,img_top = 100, top + print(imgf ) + img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top,return_=True ) + #print(imgf) + top = top + 50 + imgf = self.q_rate_two_time_fit_file + #print(imgf, img_width, top) + if img_width < 400: + img_height= 140 + img_left,img_top = 350, top + 30 + str2_left, str2_top = 380 - 80, top - 5 + str1_left, str1_top,str1= 450 -80 , top + 230, 'q-rate fit from two-time' + + else: + img_height = 90 + img_left,img_top = img_width-10, top #350, top + str2_left, str2_top = img_width + 50, top - 5 #380, top - 5 + str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot' + + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + + + + if new_page: + c.showPage() + c.save() + + def report_four_time( self, top= 720, new_page=False): + '''create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Four Time Correlation Function"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + + top1=top + top = top1 - 330 + #add q_Iq_t + imgf = self.four_time_file + + if not self.g4_new_page: + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 180, top + 300, 'four time correlation function' + str2_left, str2_top = 180, top - 10 + else: + img_height= 600 + top -= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 180, top + 300-250, 'four time correlation function' + str2_left, str2_top = 180, top - 10 + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + if new_page: + c.showPage() + c.save() + + def report_dose( self, top= 720, new_page=False): + + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Dose Analysis"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + + top1=top + top = top1 - 530 + #add q_Iq_t + imgf = self.dose_file + + img_height= 500 + img_left,img_top = 80, top + str1_left, str1_top,str1= 180, top + 500, 'dose analysis' + str2_left, str2_top = 180, top - 10 + + #print( self.data_dir + self.dose_file) + if os.path.exists( self.data_dir + imgf): + #print( self.dose_file) + im = Image.open( self.data_dir + imgf ) + ratio = float(im.size[1])/im.size[0] + width = img_height/ratio + #print(width) + if width >450: + img_height = 450*ratio + + if self.dose_file_new_page: + #img_left,img_top = 180, top + img_left,img_top = 100, top + + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + if new_page: + c.showPage() + c.save() + + + + def report_flow_pv_g2( self, top= 720, new_page=False): + '''create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Flow One Time Analysis"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + + top1=top + top = top1 - 330 + #add xsvs fit + + imgf = self.flow_g2v + image = self.data_dir + imgf + + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'XPCS Vertical Flow' + str2_left, str2_top = 180, top - 10 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + imgf = self.flow_g2v_rate_fit + img_height= 200 + img_left,img_top = 350, top +50 + str1_left, str1_top,str1= 210, top + 300, '' + str2_left, str2_top = 350, top - 10 + 50 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + + top = top - 340 + #add contrast fit + imgf = self.flow_g2p + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'XPCS Parallel Flow' + str2_left, str2_top = 180, top - 10 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + imgf = self.flow_g2p_rate_fit + img_height= 200 + img_left,img_top = 350, top +50 + str1_left, str1_top,str1= 210, top + 300, '' + str2_left, str2_top = 350, top - 10 + 50 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + if new_page: + c.showPage() + c.save() + + + def report_flow_pv_two_time( self, top= 720, new_page=False): + '''create the two time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Flow One &Two Time Comparison"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + + top1=top + top = top1 - 330 + #add xsvs fit + + + if False: + imgf = self.two_time + image = self.data_dir + imgf + + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'Two_time' + str2_left, str2_top = 180, top - 10 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + imgf = self.flow_g2_g2b_p + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'XPCS Vertical Flow by two-time' + str2_left, str2_top = 180, top - 10 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + imgf = self.flow_g2bp_rate_fit + img_height= 200 + img_left,img_top = 350, top +50 + str1_left, str1_top,str1= 210, top + 300, '' + str2_left, str2_top = 350, top - 10 + 50 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + + + top = top - 340 + #add contrast fit + imgf = self.flow_g2_g2b_v + + img_height= 300 + img_left,img_top = 80, top + str1_left, str1_top,str1= 210, top + 300, 'XPCS Parallel Flow by two-time' + str2_left, str2_top = 180, top - 10 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + imgf = self.flow_g2bv_rate_fit + img_height= 200 + img_left,img_top = 350, top +50 + str1_left, str1_top,str1= 210, top + 300, '' + str2_left, str2_top = 350, top - 10 + 50 + add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height, + str1_left, str1_top,str1, + str2_left, str2_top ) + + if new_page: + c.showPage() + c.save() + + def report_xsvs( self, top= 720, new_page=False): + '''create the one time correlation function report + Two images: + Two Time Correlation Function + two one-time correlatoin function from multi-one-time and from diagonal two-time + ''' + c= self.c + uid=self.uid + #add sub-title, Time-dependent plot + c.setFont("Helvetica", 20) + + ds = 20 + self.sub_title_num +=1 + c.drawString(10, top, "%s. Visibility Analysis"%self.sub_title_num ) #add title + c.setFont("Helvetica", 14) + top = top - 330 + #add xsvs fit + imgf = self.xsvs_fit_file + add_image_string( c, imgf, self.data_dir, img_left=100, img_top=top, img_height= 300, + + str1_left=210, str1_top = top +300,str1='XSVS_Fit_by_Negtive_Binomal Function', + str2_left = 180, str2_top = top -10 ) + + #add contrast fit + top = top -340 + imgf = self.contrast_file + add_image_string( c, imgf, self.data_dir, img_left=100, img_top=top, img_height= 300, + + str1_left=210, str1_top = top + 310,str1='contrast get from xsvs and xpcs', + str2_left = 180, str2_top = top -10 ) + + if False: + top1=top + top = top1 - 330 + #add xsvs fit + imgf = self.xsvs_fit_file + image = self.data_dir + imgf + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= 300 + c.drawImage( image, 100, top, width= height/ratio,height=height,mask=None) + c.setFont("Helvetica", 16) + c.setFillColor( blue) + c.drawString( 210, top + 300 , 'XSVS_Fit_by_Negtive_Binomal Function' ) + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString( 180, top- 10, 'filename: %s'%imgf ) + top = top - 340 + #add contrast fit + imgf = self.contrast_file + image = self.data_dir + imgf + im = Image.open( image ) + ratio = float(im.size[1])/im.size[0] + height= 300 + c.drawImage( image, 100, top, width= height/ratio,height=height,mask=None) + + c.setFont("Helvetica", 16) + c.setFillColor( blue) + c.drawString( 210, top + 310, 'contrast get from xsvs and xpcs' ) + c.setFont("Helvetica", 12) + c.setFillColor(red) + c.drawString( 180, top- 10, 'filename: %s'%imgf ) + + + if new_page: + c.showPage() + c.save() + + + + + def new_page(self): + c=self.c + c.showPage() + + def save_page(self): + c=self.c + c.save() + + def done(self): + out_dir = self.out_dir + uid=self.uid + + print() + print('*'*40) + print ('The pdf report is created with filename as: %s'%(self.filename )) + print('*'*40) + + + + +def create_multi_pdf_reports_for_uids( uids, g2, data_dir, report_type='saxs', append_name='' ): + ''' Aug 16, YG@CHX-NSLS-II + Create multi pdf reports for each uid in uids + uids: a list of uids to be reported + g2: a dictionary, {run_num: sub_num: g2_of_each_uid} + data_dir: + Save pdf report in data dir + ''' + for key in list( g2.keys()): + i=1 + for sub_key in list( g2[key].keys() ): + uid_i = uids[key][sub_key] + data_dir_ = os.path.join( data_dir, '%s/'%uid_i ) + if append_name!='': + uid_name = uid_i + append_name + else: + uid_name = uid_i + c= create_pdf_report( data_dir_, uid_i,data_dir, + report_type=report_type, filename="XPCS_Analysis_Report_for_uid=%s.pdf"%uid_name ) + #Page one: Meta-data/Iq-Q/ROI + c.report_header(page=1) + c.report_meta( top=730) + #c.report_one_time( top= 500 ) + #c.new_page() + if report_type =='flow': + c.report_flow_pv_g2( top= 720) + c.save_page() + c.done() + + + + + +def create_one_pdf_reports_for_uids( uids, g2, data_dir, filename='all_in_one', report_type='saxs' ): + ''' Aug 16, YG@CHX-NSLS-II + Create one pdf reports for each uid in uids + uids: a list of uids to be reported + g2: a dictionary, {run_num: sub_num: g2_of_each_uid} + data_dir: + Save pdf report in data dir + ''' + c= create_pdf_report( data_dir, uid=filename, out_dir=data_dir, load=False, report_type= report_type) + page=1 + + for key in list( g2.keys()): + i=1 + for sub_key in list( g2[key].keys() ): + uid_i = uids[key][sub_key] + data_dir_ = os.path.join( data_dir, '%s/'%uid_i) + + c.uid = uid_i + c.data_dir = data_dir_ + c.load_metadata() + + #Page one: Meta-data/Iq-Q/ROI + c.report_header(page=page) + c.report_meta( top=730) + c.report_one_time( top= 500 ) + c.new_page() + page += 1 + c.uid = filename + c.save_page() + c.done() + + +def save_res_h5( full_uid, data_dir, save_two_time=False ): + ''' + YG. Nov 10, 2016 + save the results to a h5 file + will save meta data/avg_img/mask/roi (ring_mask or box_mask)/ + will aslo save multi-tau calculated one-time correlation function g2/taus + will also save two-time derived one-time correlation function /g2b/taus2 + if save_two_time if True, will save two-time correaltion function + ''' + with h5py.File(data_dir + '%s.h5'%full_uid, 'w') as hf: + #write meta data + meta_data = hf.create_dataset("meta_data", (1,), dtype='i') + for key in md.keys(): + try: + meta_data.attrs[key] = md[key] + except: + pass + + shapes = md['avg_img'].shape + avg_h5 = hf.create_dataset("avg_img", data = md['avg_img'] ) + mask_h5 = hf.create_dataset("mask", data = md['mask'] ) + roi_h5 = hf.create_dataset("roi", data = md['ring_mask'] ) + + g2_h5 = hf.create_dataset("g2", data = g2 ) + taus_h5 = hf.create_dataset("taus", data = taus ) + + if save_two_time: + g12b_h5 = hf.create_dataset("g12b", data = g12b ) + g2b_h5 = hf.create_dataset("g2b", data = g2b ) + taus2_h5 = hf.create_dataset("taus2", data = taus2 ) + +def printname(name): + print (name) +#f.visit(printname) +def load_res_h5( full_uid, data_dir ): + '''YG. Nov 10, 2016 + load results from a h5 file + will load meta data/avg_img/mask/roi (ring_mask or box_mask)/ + will aslo load multi-tau calculated one-time correlation function g2/taus + will also load two-time derived one-time correlation function /g2b/taus2 + if save_two_time if True, will load two-time correaltion function + + ''' + with h5py.File(data_dir + '%s.h5'%full_uid, 'r') as hf: + meta_data_h5 = hf.get( "meta_data" ) + meta_data = {} + for att in meta_data_h5.attrs: + meta_data[att] = meta_data_h5.attrs[att] + avg_h5 = np.array( hf.get("avg_img" ) ) + mask_h5 = np.array(hf.get("mask" )) + roi_h5 =np.array( hf.get("roi" )) + g2_h5 = np.array( hf.get("g2" )) + taus_h5 = np.array( hf.get("taus" )) + g2b_h5 = np.array( hf.get("g2b")) + taus2_h5 = np.array( hf.get("taus2")) + if 'g12b' in hf: + g12b_h5 = np.array( hf.get("g12b")) + + if 'g12b' in hf: + return meta_data, avg_h5, mask_h5,roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5, g12b + else: + return meta_data, avg_h5, mask_h5,roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5 + + + + +def make_pdf_report( data_dir, uid, pdf_out_dir, pdf_filename, username, + run_fit_form, run_one_time, run_two_time, run_four_time, run_xsvs, run_dose=None, + oavs_report = False,report_type='saxs', md=None,report_invariant=False, return_class=False, res_h5_filename=None + ): + + if uid.startswith("uid=") or uid.startswith("Uid="): + uid = uid[4:] + c= create_pdf_report( data_dir, uid, pdf_out_dir, filename= pdf_filename, user= username, report_type=report_type, md = md, res_h5_filename=res_h5_filename ) + #print( c.md) + #Page one: Meta-data/Iq-Q/ROI + c.report_header(page=1) + c.report_meta( top=730) + c.report_static( top=540, iq_fit = run_fit_form ) + c.report_ROI( top= 290) + page = 1 + ##Page Two for plot OVAS images if oavs_report is True + if oavs_report: + c.new_page() + c.report_header(page=2) + c.report_oavs( top= 720, oavs_file=None, new_page=True) + page +=1 + + #Page Two: img~t/iq~t/waterfall/mean~t/g2/rate~q + c.new_page() + page +=1 + c.report_header(page=page) + + if c.report_type != 'ang_saxs': + c.report_time_analysis( top= 720) + if run_one_time: + if c.report_type != 'ang_saxs': + top = 350 + else: + top = 500 + if c.g2_fit_new_page: + c.new_page() + page +=1 + top = 720 + c.report_one_time( top= top ) + + + #self.two_g2_new_page = True + #self.g2_fit_new_page = True + + #Page Three: two-time/two g2 + + if run_two_time: + c.new_page() + page +=1 + c.report_header(page= page) + c.report_two_time( top= 720 ) + + if run_four_time: + c.new_page() + page +=1 + c.report_header(page= page) + c.report_four_time( top= 720 ) + + if run_xsvs: + c.new_page() + page +=1 + c.report_header(page= page) + c.report_xsvs( top= 720 ) + if run_dose: + c.new_page() + page +=1 + c.report_header(page= page) + c.report_dose( top = 702) + if report_invariant: + c.new_page() + page +=1 + c.report_header(page= page) + c.report_invariant( top = 702) + + else: + c.report_flow_pv_g2( top= 720, new_page= True) + c.report_flow_pv_two_time( top= 720, new_page= True ) + + c.save_page() + c.done() + if return_class: + return c + + +###################################### +###Deal with saving dict to hdf5 file +def save_dict_to_hdf5(dic, filename): + """ + .... + """ + with h5py.File(filename, 'w') as h5file: + recursively_save_dict_contents_to_group(h5file, '/', dic) + +def load_dict_from_hdf5(filename): + """ + .... + """ + with h5py.File(filename, 'r') as h5file: + return recursively_load_dict_contents_from_group(h5file, '/') + +def recursively_save_dict_contents_to_group( h5file, path, dic): + """...""" + # argument type checking + if not isinstance(dic, dict): + raise ValueError("must provide a dictionary") + + if not isinstance(path, str): + raise ValueError("path must be a string") + if not isinstance(h5file, h5py._hl.files.File): + raise ValueError("must be an open h5py file") + # save items to the hdf5 file + for key, item in dic.items(): + #print(key,item) + key = str(key) + if isinstance(item, list): + item = np.array(item) + #print(item) + if not isinstance(key, str): + raise ValueError("dict keys must be strings to save to hdf5") + # save strings, numpy.int64, and numpy.float64 types + if isinstance(item, (np.int64, np.float64, str, float, np.float32,int)): # removed depreciated np.float LW @06/11/2023 + #print( 'here' ) + h5file[path + key] = item + if not h5file[path + key].value == item: + raise ValueError('The data representation in the HDF5 file does not match the original dict.') + # save numpy arrays + elif isinstance(item, np.ndarray): + try: + h5file[path + key] = item + except: + item = np.array(item).astype('|S9') + h5file[path + key] = item + if not np.array_equal(h5file[path + key].value, item): + raise ValueError('The data representation in the HDF5 file does not match the original dict.') + # save dictionaries + elif isinstance(item, dict): + recursively_save_dict_contents_to_group(h5file, path + key + '/', item) + # other types cannot be saved and will result in an error + else: + #print(item) + raise ValueError('Cannot save %s type.' % type(item)) + + +def recursively_load_dict_contents_from_group( h5file, path): + """...""" + ans = {} + for key, item in h5file[path].items(): + if isinstance(item, h5py._hl.dataset.Dataset): + ans[key] = item.value + elif isinstance(item, h5py._hl.group.Group): + ans[key] = recursively_load_dict_contents_from_group(h5file, path + key + '/') + return ans + + +def export_xpcs_results_to_h5( filename, export_dir, export_dict ): + ''' + YG. May 10, 2017 + save the results to a h5 file + + YG. Aug28 2019 modify, add try in export pandas to h5 to fit the new version of pandas + + filename: the h5 file name + export_dir: the exported file folder + export_dict: dict, with keys as md, g2, g4 et.al. + ''' + + fout = export_dir + filename + dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p'] + dict_nest=['taus_uids', 'g2_uids' ] + + with h5py.File(fout, 'w') as hf: + flag=False + for key in list(export_dict.keys()): + #print( key ) + if key in dicts: #=='md' or key == 'qval_dict': + md= export_dict[key] + meta_data = hf.create_dataset( key, (1,), dtype='i') + for key_ in md.keys(): + try: + meta_data.attrs[str(key_)] = md[key_] + except: + pass + elif key in dict_nest: + #print(key) + try: + recursively_save_dict_contents_to_group(hf, '/%s/'%key, export_dict[key] ) + except: + print("Can't export the key: %s in this dataset."%key) + + elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + try: + export_dict[key].to_hdf( fout, key=key, mode='a', ) + except: + flag=True + else: + data = hf.create_dataset(key, data = export_dict[key] ) + #add this fill line at Octo 27, 2017 + data.set_fill_value = np.nan + if flag: + for key in list(export_dict.keys()): + if key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + export_dict[key].to_hdf( fout, key=key, mode='a', ) + + print( 'The xpcs analysis results are exported to %s with filename as %s'%(export_dir , filename)) + + + +def extract_xpcs_results_from_h5_debug( filename, import_dir, onekey=None, exclude_keys=None ): + ''' + YG. Dec 22, 2016 + extract data from a h5 file + + filename: the h5 file name + import_dir: the imported file folder + onekey: string, if not None, only extract that key + return: + extact_dict: dict, with keys as md, g2, g4 et.al. + ''' + + import pandas as pds + import numpy as np + extract_dict = {} + fp = import_dir + filename + pds_type_keys = [] + dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p', 'taus_uids', 'g2_uids'] + if exclude_keys is None: + exclude_keys =[] + if onekey is None: + for k in dicts: + extract_dict[k] = {} + with h5py.File( fp, 'r') as hf: + #print (list( hf.keys()) ) + for key in list( hf.keys()): + if key not in exclude_keys: + if key in dicts: + extract_dict[key] = recursively_load_dict_contents_from_group(hf, '/' + key + '/') + elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + pds_type_keys.append( key ) + else: + extract_dict[key] = np.array( hf.get( key )) + for key in pds_type_keys: + if key not in exclude_keys: + extract_dict[key] = pds.read_hdf(fp, key= key ) + else: + if onekey == 'md': + with h5py.File( fp, 'r') as hf: + md = hf.get('md') + for key in list(md.attrs): + extract_dict['md'][key] = md.attrs[key] + elif onekey in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + extract_dict[onekey] = pds.read_hdf(fp, key= onekey ) + else: + try: + with h5py.File( fp, 'r') as hf: + extract_dict[onekey] = np.array( hf.get( onekey )) + except: + print("The %s dosen't have this %s value"%(fp, onekey) ) + return extract_dict + + + + + + + + +def export_xpcs_results_to_h5_old( filename, export_dir, export_dict ): + ''' + YG. Dec 22, 2016 + save the results to a h5 file + + filename: the h5 file name + export_dir: the exported file folder + export_dict: dict, with keys as md, g2, g4 et.al. + ''' + import h5py + fout = export_dir + filename + dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p'] #{k1: { }} + dict_nest= ['taus_uids', 'g2_uids'] #{k1: {k2:}} + with h5py.File(fout, 'w') as hf: + for key in list(export_dict.keys()): + #print( key ) + if key in dicts: #=='md' or key == 'qval_dict': + md= export_dict[key] + meta_data = hf.create_dataset( key, (1,), dtype='i') + for key_ in md.keys(): + try: + meta_data.attrs[str(key_)] = md[key_] + except: + pass + elif key in dict_nest: + k1 = export_dict[key] + v1 = hf.create_dataset( key, (1,), dtype='i') + for k2 in k1.keys(): + + v2 = hf.create_dataset( k1, (1,), dtype='i') + + + elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + export_dict[key].to_hdf( fout, key=key, mode='a', ) + else: + data = hf.create_dataset(key, data = export_dict[key] ) + print( 'The xpcs analysis results are exported to %s with filename as %s'%(export_dir , filename)) + + +def extract_xpcs_results_from_h5( filename, import_dir, onekey=None, exclude_keys=None, two_time_qindex = None ): + ''' + YG. Dec 22, 2016 + extract data from a h5 file + + filename: the h5 file name + import_dir: the imported file folder + onekey: string, if not None, only extract that key + return: + extact_dict: dict, with keys as md, g2, g4 et.al. + ''' + + import pandas as pds + import numpy as np + extract_dict = {} + fp = import_dir + filename + pds_type_keys = [] + dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p', 'taus_uids', 'g2_uids'] + if exclude_keys is None: + exclude_keys =[] + if onekey is None: + for k in dicts: + extract_dict[k] = {} + with h5py.File( fp, 'r') as hf: + #print (list( hf.keys()) ) + for key in list( hf.keys()): + if key not in exclude_keys: + if key in dicts: + md = hf.get(key) + for key_ in list(md.attrs): + #print(key, key_) + if key == 'qval_dict': + extract_dict[key][int(key_)] = md.attrs[key_] + else: + extract_dict[key][key_] = md.attrs[key_] + + elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + pds_type_keys.append( key ) + else: + if key == 'g12b': + if two_time_qindex is not None: + extract_dict[key] = hf.get( key )[:,:,two_time_qindex] + else: + extract_dict[key] = hf.get( key )[:] + else: + extract_dict[key] = hf.get( key )[:] #np.array( hf.get( key )) + + for key in pds_type_keys: + if key not in exclude_keys: + extract_dict[key] = pds.read_hdf(fp, key= key ) + else: + if onekey == 'md': + with h5py.File( fp, 'r') as hf: + md = hf.get('md') + for key in list(md.attrs): + extract_dict['md'][key] = md.attrs[key] + elif onekey in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']: + extract_dict[onekey] = pds.read_hdf(fp, key= onekey ) + else: + try: + with h5py.File( fp, 'r') as hf: + if key == 'g12b': + if two_time_qindex is not None: + extract_dict[key] = hf.get( key )[:,:,two_time_qindex] + else: + extract_dict[key] = hf.get( key )[:] + else: + extract_dict[key] = hf.get( key )[:] #np.array( hf.get( key )) + #extract_dict[onekey] = hf.get( key )[:] #np.array( hf.get( onekey )) + except: + print("The %s dosen't have this %s value"%(fp, onekey) ) + return extract_dict + + + + + +def read_contrast_from_multi_csv( uids, path, times=None, unit=20 ): + '''Y.G. 2016, Dec 23, load contrast from multi csv file''' + + N = len(uids) + if times is None: + times = np.array( [0] + [2**i for i in range(N)] )*unit + for i, uid in enumerate(uids): + fp = path + uid + '/uid=%s--contrast_factorL.csv'%uid + contri = pds.read_csv( fp ) + qs = np.array( contri[contri.columns[0]] ) + contri_ = np.array( contri[contri.columns[1]] ) + if i ==0: + contr = np.zeros( [ N, len(qs)]) + contr[i] = contri_ + #contr[0,:] = np.nan + return times, contr + +def read_contrast_from_multi_h5( uids, path, ): + '''Y.G. 2016, Dec 23, load contrast from multi h5 file''' + N = len(uids) + times_xsvs = np.zeros( N ) + for i, uid in enumerate(uids): + t = extract_xpcs_results_from_h5( filename= '%s_Res.h5'%uid, + import_dir = path + uid + '/' , onekey= 'times_xsvs') + times_xsvs[i] = t['times_xsvs'][0] + contri = extract_xpcs_results_from_h5( filename= '%s_Res.h5'%uid, + import_dir = path + uid + '/' , onekey= 'contrast_factorL') + if i ==0: + contr = np.zeros( [ N, contri['contrast_factorL'].shape[0] ]) + contr[i] = contri['contrast_factorL'][:,0] + return times_xsvs, contr + + + + + + diff --git a/pyCHX/backups/pyCHX-backup/backups/chx_compress_01242025.py b/pyCHX/backups/pyCHX-backup/backups/chx_compress_01242025.py new file mode 100644 index 0000000..b42d538 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/backups/chx_compress_01242025.py @@ -0,0 +1,1476 @@ +import gc +import os +import pickle as pkl +import shutil +import struct +import sys +from contextlib import closing +from glob import iglob +from multiprocessing import Pool + +import dill +import matplotlib.pyplot as plt + +# imports handler from CHX +# this is where the decision is made whether or not to use dask +# from chxtools.handlers import EigerImages, EigerHandler +from eiger_io.fs_handler import EigerHandler, EigerImages +from tqdm import tqdm + +from pyCHX.chx_generic_functions import ( + copy_data, + create_time_slice, + delete_data, + get_detector, + get_eigerImage_per_file, + get_sid_filenames, + load_data, + reverse_updown, + rot90_clockwise, +) +from pyCHX.chx_libs import RUN_GUI, LogNorm, datetime, db, getpass, np, os, roi, time + + +def run_dill_encoded(what): + fun, args = dill.loads(what) + return fun(*args) + + +def apply_async(pool, fun, args, callback=None): + return pool.apply_async(run_dill_encoded, (dill.dumps((fun, args)),), callback=callback) + + +def map_async(pool, fun, args): + return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),)) + + +def pass_FD(FD, n): + # FD.rdframe(n) + try: + FD.seekimg(n) + except: + pass + return False + + +def go_through_FD(FD): + if not pass_FD(FD, FD.beg): + for i in range(FD.beg, FD.end): + pass_FD(FD, i) + else: + pass + + +def compress_eigerdata( + images, + mask, + md, + filename=None, + force_compress=False, + bad_pixel_threshold=1e15, + bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, + nobytes=2, + bins=1, + bad_frame_list=None, + para_compress=False, + num_sub=100, + dtypes="uid", + reverse=True, + rot90=False, + num_max_para_process=500, + with_pickle=False, + direct_load_data=True, + data_path=None, + images_per_file=100, + copy_rawdata=True, + new_path="/tmp_data/data/", +): + """ + Init 2016, YG@CHX + DEV 2018, June, make images_per_file a dummy, will be determined by get_eigerImage_per_file if direct_load_data + Add copy_rawdata opt. + + """ + + end = len(images) // bins + if filename is None: + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % md["uid"] + if dtypes != "uid": + para_compress = False + else: + if para_compress: + images = "foo" + # para_compress= True + # print( dtypes ) + if direct_load_data: + images_per_file = get_eigerImage_per_file(data_path) + if data_path is None: + sud = get_sid_filenames(db[uid]) + data_path = sud[2][0] + if force_compress: + print("Create a new compress file with filename as :%s." % filename) + if para_compress: + # stop connection to be before forking... (let it reset again); 11/09/2024 this seems to fail with 'registry doesn't have attribute disconnect... -> try making this optional; this might have been a leftover: if compression happens "natuarally" (not as force_compress=True) this disconnect/reconnect is already missing...we definitely had this error before... + try: + db.reg.disconnect() + db.mds.reset_connection() + except: + pass + print("Using a multiprocess to compress the data.") + return para_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + num_sub=num_sub, + dtypes=dtypes, + rot90=rot90, + reverse=reverse, + num_max_para_process=num_max_para_process, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + copy_rawdata=copy_rawdata, + new_path=new_path, + ) + else: + return init_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + ) + else: + if not os.path.exists(filename): + print("Create a new compress file with filename as :%s." % filename) + if para_compress: + print("Using a multiprocess to compress the data.") + return para_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + num_sub=num_sub, + dtypes=dtypes, + reverse=reverse, + rot90=rot90, + num_max_para_process=num_max_para_process, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + copy_rawdata=copy_rawdata, + ) + else: + return init_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + ) + else: + print("Using already created compressed file with filename as :%s." % filename) + beg = 0 + return read_compressed_eigerdata( + mask, + filename, + beg, + end, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + bad_frame_list=bad_frame_list, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + ) + + +def read_compressed_eigerdata( + mask, + filename, + beg, + end, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + bad_frame_list=None, + with_pickle=False, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + Read already compress eiger data + Return + mask + avg_img + imsum + bad_frame_list + + """ + # should use try and except instead of with_pickle in the future! + CAL = False + if not with_pickle: + CAL = True + else: + try: + mask, avg_img, imgsum, bad_frame_list_ = pkl.load(open(filename + ".pkl", "rb")) + except: + CAL = True + if CAL: + FD = Multifile(filename, beg, end) + imgsum = np.zeros(FD.end - FD.beg, dtype=np.float64) + avg_img = np.zeros([FD.md["ncols"], FD.md["nrows"]], dtype=np.float64) + imgsum, bad_frame_list_ = get_each_frame_intensityc( + FD, + sampling=1, + bad_pixel_threshold=bad_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + hot_pixel_threshold=hot_pixel_threshold, + plot_=False, + bad_frame_list=bad_frame_list, + ) + avg_img = get_avg_imgc(FD, beg=None, end=None, sampling=1, plot_=False, bad_frame_list=bad_frame_list_) + FD.FID.close() + + return mask, avg_img, imgsum, bad_frame_list_ + + +def para_compress_eigerdata( + images, + mask, + md, + filename, + num_sub=100, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + dtypes="uid", + reverse=True, + rot90=False, + num_max_para_process=500, + cpu_core_number=72, + with_pickle=True, + direct_load_data=False, + data_path=None, + images_per_file=100, + copy_rawdata=True, + new_path="/tmp_data/data/", +): + + data_path_ = data_path + if dtypes == "uid": + uid = md["uid"] # images + if not direct_load_data: + detector = get_detector(db[uid]) + images_ = load_data(uid, detector, reverse=reverse, rot90=rot90) + else: + # print('Here for images_per_file: %s'%images_per_file) + # images_ = EigerImages( data_path, images_per_file=images_per_file) + # print('here') + if not copy_rawdata: + images_ = EigerImages(data_path, images_per_file, md) + else: + print("Due to a IO problem running on GPFS. The raw data will be copied to /tmp_data/Data.") + print("Copying...") + copy_data(data_path, new_path) + # print(data_path, new_path) + new_master_file = new_path + os.path.basename(data_path) + data_path_ = new_master_file + images_ = EigerImages(new_master_file, images_per_file, md) + # print(md) + if reverse: + images_ = reverse_updown(images_) # Why not np.flipud? + if rot90: + images_ = rot90_clockwise(images_) + + N = len(images_) + + else: + N = len(images) + N = int(np.ceil(N / bins)) + Nf = int(np.ceil(N / num_sub)) + if Nf > cpu_core_number: + print("The process number is larger than %s (XF11ID server core number)" % cpu_core_number) + num_sub_old = num_sub + num_sub = int(np.ceil(N / cpu_core_number)) + Nf = int(np.ceil(N / num_sub)) + print("The sub compressed file number was changed from %s to %s" % (num_sub_old, num_sub)) + create_compress_header(md, filename + "-header", nobytes, bins, rot90=rot90) + # print( 'done for header here') + # print(data_path_, images_per_file) + results = para_segment_compress_eigerdata( + images=images, + mask=mask, + md=md, + filename=filename, + num_sub=num_sub, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + dtypes=dtypes, + num_max_para_process=num_max_para_process, + reverse=reverse, + rot90=rot90, + direct_load_data=direct_load_data, + data_path=data_path_, + images_per_file=images_per_file, + ) + + res_ = [results[k].get() for k in list(sorted(results.keys()))] + imgsum = np.zeros(N) + bad_frame_list = np.zeros(N, dtype=bool) + good_count = 1 + for i in range(Nf): + mask_, avg_img_, imgsum_, bad_frame_list_ = res_[i] + imgsum[i * num_sub : (i + 1) * num_sub] = imgsum_ + bad_frame_list[i * num_sub : (i + 1) * num_sub] = bad_frame_list_ + if i == 0: + mask = mask_ + avg_img = np.zeros_like(avg_img_) + else: + mask *= mask_ + if not np.sum(np.isnan(avg_img_)): + avg_img += avg_img_ + good_count += 1 + + bad_frame_list = np.where(bad_frame_list)[0] + avg_img /= good_count + + if len(bad_frame_list): + print("Bad frame list are: %s" % bad_frame_list) + else: + print("No bad frames are involved.") + print("Combining the seperated compressed files together...") + combine_compressed(filename, Nf, del_old=True) + del results + del res_ + if with_pickle: + pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) + if copy_rawdata: + delete_data(data_path, new_path) + return mask, avg_img, imgsum, bad_frame_list + + +def combine_compressed(filename, Nf, del_old=True): + old_files = [filename + "-header"] + for i in range(Nf): + old_files.append(filename + "_temp-%i.tmp" % i) + combine_binary_files(filename, old_files, del_old) + + +def combine_binary_files(filename, old_files, del_old=False): + """Combine binary files together""" + fn_ = open(filename, "wb") + for ftemp in old_files: + shutil.copyfileobj(open(ftemp, "rb"), fn_) + if del_old: + os.remove(ftemp) + fn_.close() + + +def para_segment_compress_eigerdata( + images, + mask, + md, + filename, + num_sub=100, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + dtypes="images", + reverse=True, + rot90=False, + num_max_para_process=50, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + parallelly compressed eiger data without header, this function is for parallel compress + """ + if dtypes == "uid": + uid = md["uid"] # images + if not direct_load_data: + detector = get_detector(db[uid]) + images_ = load_data(uid, detector, reverse=reverse, rot90=rot90) + else: + images_ = EigerImages(data_path, images_per_file, md) + if reverse: + images_ = reverse_updown(images_) + if rot90: + images_ = rot90_clockwise(images_) + + N = len(images_) + + else: + N = len(images) + + # N = int( np.ceil( N/ bins ) ) + num_sub *= bins + if N % num_sub: + Nf = N // num_sub + 1 + print("The average image intensity would be slightly not correct, about 1% error.") + print("Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image") + else: + Nf = N // num_sub + print("It will create %i temporary files for parallel compression." % Nf) + + if Nf > num_max_para_process: + N_runs = np.int(np.ceil(Nf / float(num_max_para_process))) + print("The parallel run number: %s is larger than num_max_para_process: %s" % (Nf, num_max_para_process)) + else: + N_runs = 1 + result = {} + # print( mask_filename )# + '*'* 10 + 'here' ) + for nr in range(N_runs): + if (nr + 1) * num_max_para_process > Nf: + inputs = range(num_max_para_process * nr, Nf) + else: + inputs = range(num_max_para_process * nr, num_max_para_process * (nr + 1)) + fns = [filename + "_temp-%i.tmp" % i for i in inputs] + # print( nr, inputs, ) + pool = Pool(processes=len(inputs)) # , maxtasksperchild=1000 ) + # print( inputs ) + for i in inputs: + if i * num_sub <= N: + result[i] = pool.apply_async( + segment_compress_eigerdata, + [ + images, + mask, + md, + filename + "_temp-%i.tmp" % i, + bad_pixel_threshold, + hot_pixel_threshold, + bad_pixel_low_threshold, + nobytes, + bins, + i * num_sub, + (i + 1) * num_sub, + dtypes, + reverse, + rot90, + direct_load_data, + data_path, + images_per_file, + ], + ) + + pool.close() + pool.join() + pool.terminate() + return result + + +def segment_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + N1=None, + N2=None, + dtypes="images", + reverse=True, + rot90=False, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + Create a compressed eiger data without header, this function is for parallel compress + for parallel compress don't pass any non-scalar parameters + """ + if dtypes == "uid": + uid = md["uid"] # images + if not direct_load_data: + detector = get_detector(db[uid]) + images = load_data(uid, detector, reverse=reverse, rot90=rot90)[N1:N2] + else: + images = EigerImages(data_path, images_per_file, md)[N1:N2] + if reverse: + images = reverse_updown(EigerImages(data_path, images_per_file, md))[N1:N2] + if rot90: + images = rot90_clockwise(images) + + Nimg_ = len(images) + M, N = images[0].shape + avg_img = np.zeros([M, N], dtype=np.float64) + Nopix = float(avg_img.size) + n = 0 + good_count = 0 + # frac = 0.0 + if nobytes == 2: + dtype = np.int16 + elif nobytes == 4: + dtype = np.int32 + elif nobytes == 8: + dtype = np.float64 + else: + print("Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype = np.int32 + + # Nimg = Nimg_//bins + Nimg = int(np.ceil(Nimg_ / bins)) + time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bins)) + # print( time_edge, Nimg_, Nimg, bins, N1, N2 ) + imgsum = np.zeros(Nimg) + if bins != 1: + # print('The frames will be binned by %s'%bins) + dtype = np.float64 + + fp = open(filename, "wb") + for n in range(Nimg): + t1, t2 = time_edge[n] + if bins != 1: + img = np.array(np.average(images[t1:t2], axis=0), dtype=dtype) + else: + img = np.array(images[t1], dtype=dtype) + mask &= img < hot_pixel_threshold + p = np.where((np.ravel(img) > 0) * np.ravel(mask))[0] # don't use masked data + v = np.ravel(np.array(img, dtype=dtype))[p] + dlen = len(p) + imgsum[n] = v.sum() + if (dlen == 0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <= bad_pixel_low_threshold): + dlen = 0 + fp.write(struct.pack("@I", dlen)) + else: + np.ravel(avg_img)[p] += v + good_count += 1 + fp.write(struct.pack("@I", dlen)) + fp.write(struct.pack("@{}i".format(dlen), *p)) + if bins == 1: + fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) + else: + fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) # n +=1 + del p, v, img + fp.flush() + fp.close() + avg_img /= good_count + bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) + sys.stdout.write("#") + sys.stdout.flush() + # del images, mask, avg_img, imgsum, bad_frame_list + # print( 'Should release memory here') + return mask, avg_img, imgsum, bad_frame_list + + +def create_compress_header(md, filename, nobytes=4, bins=1, rot90=False): + """ + Create the head for a compressed eiger data, this function is for parallel compress + """ + fp = open(filename, "wb") + # Make Header 1024 bytes + # md = images.md + if bins != 1: + nobytes = 8 + flag = True + # print( list(md.keys()) ) + # print(md) + if "pixel_mask" in list(md.keys()): + sx, sy = md["pixel_mask"].shape[0], md["pixel_mask"].shape[1] + elif "img_shape" in list(md.keys()): + sx, sy = md["img_shape"][0], md["img_shape"][1] + else: + sx, sy = 2167, 2070 # by default for 4M + # print(flag) + klst = [ + "beam_center_x", + "beam_center_y", + "count_time", + "detector_distance", + "frame_time", + "incident_wavelength", + "x_pixel_size", + "y_pixel_size", + ] + vs = [0, 0, 0, 0, 0, 0, 75, 75] + for i, k in enumerate(klst): + if k in list(md.keys()): + vs[i] = md[k] + if flag: + if rot90: + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + vs[0], + vs[1], + vs[2], + vs[3], + vs[4], + vs[5], + vs[6], + vs[7], + nobytes, + sx, + sy, + 0, + sx, + 0, + sy, + ) + + else: + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + vs[0], + vs[1], + vs[2], + vs[3], + vs[4], + vs[5], + vs[6], + vs[7], + # md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], #md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, + sy, + sx, + 0, + sy, + 0, + sx, + ) + + fp.write(Header) + fp.close() + + +def init_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + with_pickle=True, + reverse=True, + rot90=False, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + Compress the eiger data + + Create a new mask by remove hot_pixel + Do image average + Do each image sum + Find badframe_list for where image sum above bad_pixel_threshold + Generate a compressed data with filename + + if bins!=1, will bin the images with bin number as bins + + Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ] + + Return + mask + avg_img + imsum + bad_frame_list + + """ + fp = open(filename, "wb") + # Make Header 1024 bytes + # md = images.md + if bins != 1: + nobytes = 8 + if "count_time" not in list(md.keys()): + md["count_time"] = 0 + if "detector_distance" not in list(md.keys()): + md["detector_distance"] = 0 + if "frame_time" not in list(md.keys()): + md["frame_time"] = 0 + if "incident_wavelength" not in list(md.keys()): + md["incident_wavelength"] = 0 + if "y_pixel_size" not in list(md.keys()): + md["y_pixel_size"] = 0 + if "x_pixel_size" not in list(md.keys()): + md["x_pixel_size"] = 0 + if "beam_center_x" not in list(md.keys()): + md["beam_center_x"] = 0 + if "beam_center_y" not in list(md.keys()): + md["beam_center_y"] = 0 + + if not rot90: + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["pixel_mask"].shape[1], + md["pixel_mask"].shape[0], + 0, + md["pixel_mask"].shape[1], + 0, + md["pixel_mask"].shape[0], + ) + else: + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["pixel_mask"].shape[0], + md["pixel_mask"].shape[1], + 0, + md["pixel_mask"].shape[0], + 0, + md["pixel_mask"].shape[1], + ) + + fp.write(Header) + + Nimg_ = len(images) + avg_img = np.zeros_like(images[0], dtype=np.float64) + Nopix = float(avg_img.size) + n = 0 + good_count = 0 + frac = 0.0 + if nobytes == 2: + dtype = np.int16 + elif nobytes == 4: + dtype = np.int32 + elif nobytes == 8: + dtype = np.float64 + else: + print("Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype = np.int32 + + Nimg = Nimg_ // bins + time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bins)) + + imgsum = np.zeros(Nimg) + if bins != 1: + print("The frames will be binned by %s" % bins) + + for n in tqdm(range(Nimg)): + t1, t2 = time_edge[n] + img = np.average(images[t1:t2], axis=0) + mask &= img < hot_pixel_threshold + p = np.where((np.ravel(img) > 0) & np.ravel(mask))[0] # don't use masked data + v = np.ravel(np.array(img, dtype=dtype))[p] + dlen = len(p) + imgsum[n] = v.sum() + if (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <= bad_pixel_low_threshold): + # if imgsum[n] >=bad_pixel_threshold : + dlen = 0 + fp.write(struct.pack("@I", dlen)) + else: + np.ravel(avg_img)[p] += v + good_count += 1 + frac += dlen / Nopix + # s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2]) + fp.write(struct.pack("@I", dlen)) + fp.write(struct.pack("@{}i".format(dlen), *p)) + if bins == 1: + if nobytes != 8: + fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) + else: + fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) + else: + fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) + # n +=1 + + fp.close() + frac /= good_count + print("The fraction of pixel occupied by photon is %6.3f%% " % (100 * frac)) + avg_img /= good_count + + bad_frame_list = np.where( + (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) + )[0] + # bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0] + # bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0] + # bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) ) + + if len(bad_frame_list): + print("Bad frame list are: %s" % bad_frame_list) + else: + print("No bad frames are involved.") + if with_pickle: + pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) + return mask, avg_img, imgsum, bad_frame_list + + +""" Description: + + This is code that Mark wrote to open the multifile format + in compressed mode, translated to python. + This seems to work for DALSA, FCCD and EIGER in compressed mode. + It should be included in the respective detector.i files + Currently, this refers to the compression mode being '6' + Each file is image descriptor files chunked together as follows: + Header (1024 bytes) + |--------------IMG N begin--------------| + | Dlen + |---------------------------------------| + | Pixel positions (dlen*4 bytes | + | (0 based indexing in file) | + |---------------------------------------| + | Pixel data(dlen*bytes bytes) | + | (bytes is found in header | + | at position 116) | + |--------------IMG N end----------------| + |--------------IMG N+1 begin------------| + |----------------etc.....---------------| + + + Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End, + + + +""" + + +class Multifile: + """The class representing the multifile. + The recno is in 1 based numbering scheme (first record is 1) + This is efficient for reading in increasing order. + Note: reading same image twice in a row is like reading an earlier + numbered image and means the program starts for the beginning again. + + """ + + def __init__(self, filename, beg, end, reverse=False): + """Multifile initialization. Open the file. + Here I use the read routine which returns byte objects + (everything is an object in python). I use struct.unpack + to convert the byte object to other data type (int object + etc) + NOTE: At each record n, the file cursor points to record n+1 + """ + self.FID = open(filename, "rb") + # self.FID.seek(0,os.SEEK_SET) + self.filename = filename + # br: bytes read + br = self.FID.read(1024) + self.beg = beg + self.end = end + self.reverse = reverse + ms_keys = [ + "beam_center_x", + "beam_center_y", + "count_time", + "detector_distance", + "frame_time", + "incident_wavelength", + "x_pixel_size", + "y_pixel_size", + "bytes", + "nrows", + "ncols", + "rows_begin", + "rows_end", + "cols_begin", + "cols_end", + ] + + magic = struct.unpack("@16s", br[:16]) + md_temp = struct.unpack("@8d7I916x", br[16:]) + self.md = dict(zip(ms_keys, md_temp)) + + self.imgread = 0 + self.recno = 0 + + if reverse: + nrows = self.md["nrows"] + ncols = self.md["ncols"] + self.md["nrows"] = ncols + self.md["ncols"] = nrows + rbeg = self.md["rows_begin"] + rend = self.md["rows_end"] + cbeg = self.md["cols_begin"] + cend = self.md["cols_end"] + self.md["rows_begin"] = cbeg + self.md["rows_end"] = cend + self.md["cols_begin"] = rbeg + self.md["cols_end"] = rend + + # some initialization stuff + self.byts = self.md["bytes"] + if self.byts == 2: + self.valtype = np.uint16 + elif self.byts == 4: + self.valtype = np.uint32 + elif self.byts == 8: + self.valtype = np.float64 + # now convert pieces of these bytes to our data + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + + # now read first image + # print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts) + + def _readHeader(self): + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + + def _readImageRaw(self): + + p = np.fromfile(self.FID, dtype=np.int32, count=self.dlen) + v = np.fromfile(self.FID, dtype=self.valtype, count=self.dlen) + self.imgread = 1 + return (p, v) + + def _readImage(self): + (p, v) = self._readImageRaw() + img = np.zeros((self.md["ncols"], self.md["nrows"])) + np.put(np.ravel(img), p, v) + return img + + def seekimg(self, n=None): + """Position file to read the nth image. + For now only reads first image ignores n + """ + # the logic involving finding the cursor position + if n is None: + n = self.recno + if n < self.beg or n > self.end: + raise IndexError("Error, record out of range") + # print (n, self.recno, self.FID.tell() ) + if (n == self.recno) and (self.imgread == 0): + pass # do nothing + + else: + if n <= self.recno: # ensure cursor less than search pos + self.FID.seek(1024, os.SEEK_SET) + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + self.recno = 0 + self.imgread = 0 + if n == 0: + return + # have to iterate on seeking since dlen varies + # remember for rec recno, cursor is always at recno+1 + if self.imgread == 0: # move to next header if need to + self.FID.seek(self.dlen * (4 + self.byts), os.SEEK_CUR) + for i in range(self.recno + 1, n): + # the less seeks performed the faster + # print (i) + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + # print 's',self.dlen + self.FID.seek(self.dlen * (4 + self.byts), os.SEEK_CUR) + + # we are now at recno in file, read the header and data + # self._clearImage() + self._readHeader() + self.imgread = 0 + self.recno = n + + def rdframe(self, n): + if self.seekimg(n) != -1: + return self._readImage() + + def rdrawframe(self, n): + if self.seekimg(n) != -1: + return self._readImageRaw() + + +class Multifile_Bins(object): + """ + Bin a compressed file with bins number + See Multifile for details for Multifile_class + """ + + def __init__(self, FD, bins=100): + """ + FD: the handler of a compressed Eiger frames + bins: bins number + """ + + self.FD = FD + if (FD.end - FD.beg) % bins: + print("Please give a better bins number and make the length of FD/bins= integer") + else: + self.bins = bins + self.md = FD.md + # self.beg = FD.beg + self.beg = 0 + Nimg = FD.end - FD.beg + slice_num = Nimg // bins + self.end = slice_num + self.time_edge = np.array(create_time_slice(N=Nimg, slice_num=slice_num, slice_width=bins)) + FD.beg + self.get_bin_frame() + + def get_bin_frame(self): + FD = self.FD + self.frames = np.zeros([FD.md["ncols"], FD.md["nrows"], len(self.time_edge)]) + for n in tqdm(range(len(self.time_edge))): + # print (n) + t1, t2 = self.time_edge[n] + # print( t1, t2) + self.frames[:, :, n] = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False, show_progress=False) + + def rdframe(self, n): + return self.frames[:, :, n] + + def rdrawframe(self, n): + x_ = np.ravel(self.rdframe(n)) + p = np.where(x_)[0] + v = np.array(x_[p]) + return (np.array(p, dtype=np.int32), v) + + +class MultifileBNL: + """ + Re-write multifile from scratch. + """ + + HEADER_SIZE = 1024 + + def __init__(self, filename, mode="rb"): + """ + Prepare a file for reading or writing. + mode : either 'rb' or 'wb' + """ + if mode == "wb": + raise ValueError("Write mode 'wb' not supported yet") + if mode != "rb" and mode != "wb": + raise ValueError("Error, mode must be 'rb' or 'wb'" "got : {}".format(mode)) + self._filename = filename + self._mode = mode + # open the file descriptor + # create a memmap + if mode == "rb": + self._fd = np.memmap(filename, dtype="c") + elif mode == "wb": + self._fd = open(filename, "wb") + # these are only necessary for writing + self.md = self._read_main_header() + self._cols = int(self.md["nrows"]) + self._rows = int(self.md["ncols"]) + # some initialization stuff + self.nbytes = self.md["bytes"] + if self.nbytes == 2: + self.valtype = " self.Nframes: + raise KeyError("Error, only {} frames, asked for {}".format(self.Nframes, n)) + # dlen is 4 bytes + cur = self.frame_indexes[n] + dlen = np.frombuffer(self._fd[cur : cur + 4], dtype=" nbytes + vals = self._fd[cur : cur + dlen * self.nbytes] + vals = np.frombuffer(vals, dtype=self.valtype) + return pos, vals + + def rdframe(self, n): + # read header then image + pos, vals = self._read_raw(n) + img = np.zeros((self._rows * self._cols,)) + img[pos] = vals + return img.reshape((self._rows, self._cols)) + + def rdrawframe(self, n): + # read header then image + return self._read_raw(n) + + +class MultifileBNLCustom(MultifileBNL): + def __init__(self, filename, beg=0, end=None, **kwargs): + super().__init__(filename, **kwargs) + self.beg = beg + if end is None: + end = self.Nframes - 1 + self.end = end + + def rdframe(self, n): + if n > self.end or n < self.beg: + raise IndexError("Index out of range") + # return super().rdframe(n - self.beg) + return super().rdframe(n) + + def rdrawframe(self, n): + # return super().rdrawframe(n - self.beg) + if n > self.end or n < self.beg: + raise IndexError("Index out of range") + return super().rdrawframe(n) + + +def get_avg_imgc( + FD, beg=None, end=None, sampling=100, plot_=False, bad_frame_list=None, show_progress=True, *argv, **kwargs +): + """Get average imagef from a data_series by every sampling number to save time""" + # avg_img = np.average(data_series[:: sampling], axis=0) + + if beg is None: + beg = FD.beg + if end is None: + end = FD.end + + avg_img = FD.rdframe(beg) + n = 1 + flag = True + if show_progress: + # print( sampling-1 + beg , end, sampling ) + if bad_frame_list is None: + bad_frame_list = [] + fra_num = int((end - beg) / sampling) - len(bad_frame_list) + for i in tqdm(range(sampling - 1 + beg, end, sampling), desc="Averaging %s images" % fra_num): + if bad_frame_list is not None: + if i in bad_frame_list: + flag = False + else: + flag = True + # print(i, flag) + if flag: + (p, v) = FD.rdrawframe(i) + if len(p) > 0: + np.ravel(avg_img)[p] += v + n += 1 + else: + for i in range(sampling - 1 + beg, end, sampling): + if bad_frame_list is not None: + if i in bad_frame_list: + flag = False + else: + flag = True + if flag: + (p, v) = FD.rdrawframe(i) + if len(p) > 0: + np.ravel(avg_img)[p] += v + n += 1 + + avg_img /= n + if plot_: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots() + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + im = ax.imshow(avg_img, cmap="viridis", origin="lower", norm=LogNorm(vmin=0.001, vmax=1e2)) + # ax.set_title("Masked Averaged Image") + ax.set_title("uid= %s--Masked-Averaged-Image-" % uid) + fig.colorbar(im) + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-" % uid + ".png" + plt.savefig(fp, dpi=fig.dpi) + # plt.show() + return avg_img + + +def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor=False): + """Compute the mean intensity for each ROI in the compressed file (FD), support parallel computation + + Parameters + ---------- + FD: Multifile class + compressed file + labeled_array : array + labeled array; 0 is background. + Each ROI is represented by a nonzero integer. It is not required that + the ROI labels are contiguous + index : int, list, optional + The ROI's to use. If None, this function will extract averages for all + ROIs + + Returns + ------- + mean_intensity : array + The mean intensity of each ROI for all `images` + Dimensions: + len(mean_intensity) == len(index) + len(mean_intensity[0]) == len(images) + index : list + The labels for each element of the `mean_intensity` list + """ + + qind, pixelist = roi.extract_label_indices(labeled_array) + sx, sy = (FD.rdframe(FD.beg)).shape + if labeled_array.shape != (sx, sy): + raise ValueError( + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" + % (sx, sy, labeled_array.shape[0], labeled_array.shape[1]) + ) + # handle various input for `index` + if index is None: + index = list(np.unique(labeled_array)) + index.remove(0) + else: + try: + len(index) + except TypeError: + index = [index] + + index = np.array(index) + # print ('here') + good_ind = np.zeros(max(qind), dtype=np.int32) + good_ind[index - 1] = np.arange(len(index)) + 1 + w = np.where(good_ind[qind - 1])[0] + qind = good_ind[qind[w] - 1] + pixelist = pixelist[w] + + # pre-allocate an array for performance + # might be able to use list comprehension to make this faster + + mean_intensity = np.zeros([int((FD.end - FD.beg) / sampling), len(index)]) + # fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + # maxqind = max(qind) + norm = np.bincount(qind)[1:] + n = 0 + # for i in tqdm(range( FD.beg , FD.end )): + if not multi_cor: + for i in tqdm(range(FD.beg, FD.end, sampling), desc="Get ROI intensity of each frame"): + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + mean_intensity[n] = np.bincount(qind[pxlist], weights=v[w], minlength=len(index) + 1)[1:] + n += 1 + else: + ring_masks = [np.array(labeled_array == i, dtype=np.int64) for i in np.unique(labeled_array)[1:]] + inputs = range(len(ring_masks)) + go_through_FD(FD) + pool = Pool(processes=len(inputs)) + print("Starting assign the tasks...") + results = {} + for i in tqdm(inputs): + results[i] = apply_async(pool, _get_mean_intensity_one_q, (FD, sampling, ring_masks[i])) + pool.close() + print("Starting running the tasks...") + res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] + # return res + for i in inputs: + mean_intensity[:, i] = res[i] + print("ROI mean_intensit calculation is DONE!") + del results + del res + + mean_intensity /= norm + return mean_intensity, index + + +def _get_mean_intensity_one_q(FD, sampling, labels): + mi = np.zeros(int((FD.end - FD.beg) / sampling)) + n = 0 + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + for i in range(FD.beg, FD.end, sampling): + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + mi[n] = np.bincount(qind[pxlist], weights=v[w], minlength=2)[1:] + n += 1 + return mi + + +def get_each_frame_intensityc( + FD, + sampling=1, + bad_pixel_threshold=1e10, + bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, + plot_=False, + bad_frame_list=None, + save=False, + *argv, + **kwargs +): + """Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + """ + + # print ( argv, kwargs ) + # mask &= img < hot_pixel_threshold + imgsum = np.zeros(int((FD.end - FD.beg) / sampling)) + n = 0 + for i in tqdm(range(FD.beg, FD.end, sampling), desc="Get each frame intensity"): + (p, v) = FD.rdrawframe(i) + if len(p) > 0: + imgsum[n] = np.sum(v) + n += 1 + + if plot_: + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + fig, ax = plt.subplots() + ax.plot(imgsum, "bo") + ax.set_title("uid= %s--imgsum" % uid) + ax.set_xlabel("Frame_bin_%s" % sampling) + ax.set_ylabel("Total_Intensity") + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + plt.show() + + bad_frame_list_ = ( + np.where((np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold))[0] + + FD.beg + ) + + if bad_frame_list is not None: + bad_frame_list = np.unique(np.concatenate([bad_frame_list, bad_frame_list_])) + else: + bad_frame_list = bad_frame_list_ + + if len(bad_frame_list): + print("Bad frame list length is: %s" % len(bad_frame_list)) + else: + print("No bad frames are involved.") + return imgsum, bad_frame_list diff --git a/pyCHX/backups/pyCHX-backup/backups/chx_compress_05012024.py b/pyCHX/backups/pyCHX-backup/backups/chx_compress_05012024.py new file mode 100644 index 0000000..706cf7e --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/backups/chx_compress_05012024.py @@ -0,0 +1,1189 @@ +import os,shutil +from glob import iglob + +import matplotlib.pyplot as plt +from pyCHX.chx_libs import (np, roi, time, datetime, os, getpass, db, + LogNorm, RUN_GUI) +from pyCHX.chx_generic_functions import (create_time_slice,get_detector, get_sid_filenames, + load_data,reverse_updown,rot90_clockwise, get_eigerImage_per_file,copy_data,delete_data, ) + + +import struct +from tqdm import tqdm +from contextlib import closing + +from multiprocessing import Pool +import dill +import sys +import gc +import pickle as pkl +# imports handler from CHX +# this is where the decision is made whether or not to use dask +#from chxtools.handlers import EigerImages, EigerHandler +from eiger_io.fs_handler import EigerHandler,EigerImages + +def run_dill_encoded(what): + fun, args = dill.loads(what) + return fun(*args) + +def apply_async(pool, fun, args, callback=None): + return pool.apply_async( run_dill_encoded, (dill.dumps((fun, args)),), callback= callback) + + +def map_async(pool, fun, args ): + return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),)) + + +def pass_FD(FD,n): + #FD.rdframe(n) + try: + FD.seekimg(n) + except: + pass + return False +def go_through_FD(FD): + if not pass_FD(FD,FD.beg): + for i in range(FD.beg, FD.end): + pass_FD(FD,i) + else: + pass + + + + + +def compress_eigerdata( images, mask, md, filename=None, force_compress=False, + bad_pixel_threshold=1e15, bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, nobytes=2,bins=1, bad_frame_list=None, + para_compress= False, num_sub=100, dtypes='uid',reverse =True, rot90=False, + num_max_para_process=500, with_pickle=False, direct_load_data=True, data_path=None, + images_per_file=100, copy_rawdata=True,new_path = '/tmp_data/data/'): + ''' + Init 2016, YG@CHX + DEV 2018, June, make images_per_file a dummy, will be determined by get_eigerImage_per_file if direct_load_data + Add copy_rawdata opt. + + ''' + + end= len(images)//bins + if filename is None: + filename= '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%md['uid'] + if dtypes!= 'uid': + para_compress= False + else: + if para_compress: + images='foo' + #para_compress= True + #print( dtypes ) + if direct_load_data: + images_per_file = get_eigerImage_per_file( data_path ) + if data_path is None: + sud = get_sid_filenames(db[uid]) + data_path = sud[2][0] + if force_compress: + print ("Create a new compress file with filename as :%s."%filename) + if para_compress: + # stop connection to be before forking... (let it reset again) + db.reg.disconnect() + db.mds.reset_connection() + print( 'Using a multiprocess to compress the data.') + return para_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, + bins=bins, num_sub=num_sub, dtypes=dtypes, rot90=rot90, + reverse=reverse, num_max_para_process=num_max_para_process, + with_pickle= with_pickle, direct_load_data= direct_load_data, + data_path=data_path,images_per_file=images_per_file,copy_rawdata=copy_rawdata,new_path=new_path) + else: + return init_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path, + images_per_file=images_per_file) + else: + if not os.path.exists( filename ): + print ("Create a new compress file with filename as :%s."%filename) + if para_compress: + print( 'Using a multiprocess to compress the data.') + return para_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins, + num_sub=num_sub, dtypes=dtypes, reverse=reverse,rot90=rot90, + num_max_para_process=num_max_para_process,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path,images_per_file=images_per_file,copy_rawdata=copy_rawdata) + else: + return init_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path, images_per_file=images_per_file) + else: + print ("Using already created compressed file with filename as :%s."%filename) + beg=0 + return read_compressed_eigerdata( mask, filename, beg, end, + bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold ,bad_frame_list=bad_frame_list,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path, images_per_file=images_per_file) + + + +def read_compressed_eigerdata( mask, filename, beg, end, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0,bad_frame_list=None,with_pickle= False, + direct_load_data=False,data_path=None,images_per_file=100): + ''' + Read already compress eiger data + Return + mask + avg_img + imsum + bad_frame_list + + ''' + #should use try and except instead of with_pickle in the future! + CAL = False + if not with_pickle: + CAL = True + else: + try: + mask, avg_img, imgsum, bad_frame_list_ = pkl.load( open(filename + '.pkl', 'rb' ) ) + except: + CAL = True + if CAL: + FD = Multifile( filename, beg, end) + imgsum = np.zeros( FD.end- FD.beg, dtype= np.float64 ) + avg_img = np.zeros( [FD.md['ncols'], FD.md['nrows'] ] , dtype= np.float64 ) + imgsum, bad_frame_list_ = get_each_frame_intensityc( FD, sampling = 1, + bad_pixel_threshold=bad_pixel_threshold, bad_pixel_low_threshold=bad_pixel_low_threshold, + hot_pixel_threshold=hot_pixel_threshold, plot_ = False, + bad_frame_list=bad_frame_list) + avg_img = get_avg_imgc( FD, beg=None,end=None,sampling = 1, plot_ = False,bad_frame_list=bad_frame_list_ ) + FD.FID.close() + + return mask, avg_img, imgsum, bad_frame_list_ + +def para_compress_eigerdata( images, mask, md, filename, num_sub=100, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='uid',reverse =True,rot90=False, + num_max_para_process=500, cpu_core_number=72, with_pickle=True, + direct_load_data=False, data_path=None,images_per_file=100, + copy_rawdata=True,new_path = '/tmp_data/data/'): + + data_path_ = data_path + if dtypes=='uid': + uid= md['uid'] #images + if not direct_load_data: + detector = get_detector( db[uid ] ) + images_ = load_data( uid, detector, reverse= reverse,rot90=rot90 ) + else: + #print('Here for images_per_file: %s'%images_per_file) + #images_ = EigerImages( data_path, images_per_file=images_per_file) + #print('here') + if not copy_rawdata: + images_ = EigerImages(data_path,images_per_file, md) + else: + print('Due to a IO problem running on GPFS. The raw data will be copied to /tmp_data/Data.') + print('Copying...') + copy_data( data_path, new_path ) + #print(data_path, new_path) + new_master_file = new_path + os.path.basename(data_path) + data_path_ = new_master_file + images_ = EigerImages( new_master_file, images_per_file, md) + #print(md) + if reverse: + images_ = reverse_updown( images_ ) # Why not np.flipud? + if rot90: + images_ = rot90_clockwise( images_ ) + + N= len(images_) + + else: + N = len(images) + N = int( np.ceil( N/ bins ) ) + Nf = int( np.ceil( N/ num_sub ) ) + if Nf > cpu_core_number: + print("The process number is larger than %s (XF11ID server core number)"%cpu_core_number) + num_sub_old = num_sub + num_sub = int( np.ceil(N/cpu_core_number)) + Nf = int( np.ceil( N/ num_sub ) ) + print ("The sub compressed file number was changed from %s to %s"%( num_sub_old, num_sub )) + create_compress_header( md, filename +'-header', nobytes, bins, rot90=rot90 ) + #print( 'done for header here') + #print(data_path_, images_per_file) + results = para_segment_compress_eigerdata( images=images, mask=mask, md=md,filename=filename, + num_sub=num_sub, bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes=nobytes, bins=bins, dtypes=dtypes, + num_max_para_process=num_max_para_process, + reverse = reverse,rot90=rot90, + direct_load_data=direct_load_data, data_path=data_path_, + images_per_file=images_per_file) + + res_ = [ results[k].get() for k in list(sorted(results.keys())) ] + imgsum = np.zeros( N ) + bad_frame_list = np.zeros( N, dtype=bool ) + good_count = 1 + for i in range( Nf ): + mask_, avg_img_, imgsum_, bad_frame_list_ = res_[i] + imgsum[i*num_sub: (i+1)*num_sub] = imgsum_ + bad_frame_list[i*num_sub: (i+1)*num_sub] = bad_frame_list_ + if i==0: + mask = mask_ + avg_img = np.zeros_like( avg_img_ ) + else: + mask *= mask_ + if not np.sum( np.isnan( avg_img_)): + avg_img += avg_img_ + good_count += 1 + + bad_frame_list = np.where( bad_frame_list )[0] + avg_img /= good_count + + if len(bad_frame_list): + print ('Bad frame list are: %s' %bad_frame_list) + else: + print ('No bad frames are involved.') + print( 'Combining the seperated compressed files together...') + combine_compressed( filename, Nf, del_old=True) + del results + del res_ + if with_pickle: + pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) ) + if copy_rawdata: + delete_data( data_path, new_path ) + return mask, avg_img, imgsum, bad_frame_list + +def combine_compressed( filename, Nf, del_old=True): + old_files = [filename +'-header'] + for i in range(Nf): + old_files.append(filename + '_temp-%i.tmp' % i) + combine_binary_files(filename, old_files, del_old) + +def combine_binary_files(filename, old_files, del_old = False): + '''Combine binary files together''' + fn_ = open(filename, 'wb') + for ftemp in old_files: + shutil.copyfileobj( open(ftemp, 'rb'), fn_) + if del_old: + os.remove( ftemp ) + fn_.close() + +def para_segment_compress_eigerdata( images, mask, md, filename, num_sub=100, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='images', + reverse =True, rot90=False, + num_max_para_process=50,direct_load_data=False, data_path=None, + images_per_file=100): + ''' + parallelly compressed eiger data without header, this function is for parallel compress + ''' + if dtypes=='uid': + uid= md['uid'] #images + if not direct_load_data: + detector = get_detector( db[uid ] ) + images_ = load_data( uid, detector, reverse= reverse, rot90=rot90 ) + else: + images_ = EigerImages(data_path, images_per_file, md) + if reverse: + images_ = reverse_updown( images_ ) + if rot90: + images_ = rot90_clockwise( images_ ) + + N= len(images_) + + else: + N = len(images) + + #N = int( np.ceil( N/ bins ) ) + num_sub *= bins + if N%num_sub: + Nf = N// num_sub +1 + print('The average image intensity would be slightly not correct, about 1% error.') + print( 'Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image') + else: + Nf = N//num_sub + print( 'It will create %i temporary files for parallel compression.'%Nf) + + if Nf> num_max_para_process: + N_runs = np.int( np.ceil( Nf/float(num_max_para_process))) + print('The parallel run number: %s is larger than num_max_para_process: %s'%(Nf, num_max_para_process )) + else: + N_runs= 1 + result = {} + #print( mask_filename )# + '*'* 10 + 'here' ) + for nr in range( N_runs ): + if (nr+1)*num_max_para_process > Nf: + inputs= range( num_max_para_process*nr, Nf ) + else: + inputs= range( num_max_para_process*nr, num_max_para_process*(nr + 1 ) ) + fns = [ filename + '_temp-%i.tmp'%i for i in inputs] + #print( nr, inputs, ) + pool = Pool(processes= len(inputs) ) #, maxtasksperchild=1000 ) + #print( inputs ) + for i in inputs: + if i*num_sub <= N: + result[i] = pool.apply_async( segment_compress_eigerdata, [ + images, mask, md, filename + '_temp-%i.tmp'%i,bad_pixel_threshold, hot_pixel_threshold, bad_pixel_low_threshold, nobytes, bins, i*num_sub, (i+1)*num_sub, dtypes, reverse,rot90, direct_load_data, data_path,images_per_file ] ) + + pool.close() + pool.join() + pool.terminate() + return result + +def segment_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, nobytes=4, bins=1, + N1=None, N2=None, dtypes='images',reverse =True, rot90=False,direct_load_data=False, data_path=None,images_per_file=100 ): + ''' + Create a compressed eiger data without header, this function is for parallel compress + for parallel compress don't pass any non-scalar parameters + ''' + if dtypes=='uid': + uid= md['uid'] #images + if not direct_load_data: + detector = get_detector( db[uid ] ) + images = load_data( uid, detector, reverse= reverse, rot90=rot90 )[N1:N2] + else: + images = EigerImages(data_path, images_per_file, md)[N1:N2] + if reverse: + images = reverse_updown( EigerImages(data_path, images_per_file, md) )[N1:N2] + if rot90: + images = rot90_clockwise( images ) + + Nimg_ = len( images) + M,N = images[0].shape + avg_img = np.zeros( [M,N], dtype= np.float64 ) + Nopix = float( avg_img.size ) + n=0 + good_count = 0 + #frac = 0.0 + if nobytes==2: + dtype= np.int16 + elif nobytes==4: + dtype= np.int32 + elif nobytes==8: + dtype=np.float64 + else: + print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype= np.int32 + + + #Nimg = Nimg_//bins + Nimg = int( np.ceil( Nimg_ / bins ) ) + time_edge = np.array(create_time_slice( N= Nimg_, + slice_num= Nimg, slice_width= bins )) + #print( time_edge, Nimg_, Nimg, bins, N1, N2 ) + imgsum = np.zeros( Nimg ) + if bins!=1: + #print('The frames will be binned by %s'%bins) + dtype=np.float64 + + fp = open( filename,'wb' ) + for n in range(Nimg): + t1,t2 = time_edge[n] + if bins!=1: + img = np.array( np.average( images[t1:t2], axis=0 ) , dtype= dtype) + else: + img = np.array( images[t1], dtype=dtype) + mask &= img < hot_pixel_threshold + p = np.where( (np.ravel(img)>0) * np.ravel(mask) )[0] #don't use masked data + v = np.ravel( np.array( img, dtype= dtype )) [p] + dlen = len(p) + imgsum[n] = v.sum() + if (dlen==0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold): + dlen = 0 + fp.write( struct.pack( '@I', dlen )) + else: + np.ravel( avg_img )[p] += v + good_count +=1 + fp.write( struct.pack( '@I', dlen )) + fp.write( struct.pack( '@{}i'.format( dlen), *p)) + if bins==1: + fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v)) + else: + fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) #n +=1 + del p,v, img + fp.flush() + fp.close() + avg_img /= good_count + bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) + sys.stdout.write('#') + sys.stdout.flush() + #del images, mask, avg_img, imgsum, bad_frame_list + #print( 'Should release memory here') + return mask, avg_img, imgsum, bad_frame_list + + + +def create_compress_header( md, filename, nobytes=4, bins=1, rot90=False ): + ''' + Create the head for a compressed eiger data, this function is for parallel compress + ''' + fp = open( filename,'wb' ) + #Make Header 1024 bytes + #md = images.md + if bins!=1: + nobytes=8 + flag = True + #print( list(md.keys()) ) + #print(md) + if 'pixel_mask' in list(md.keys()): + sx,sy = md['pixel_mask'].shape[0], md['pixel_mask'].shape[1] + elif 'img_shape' in list(md.keys()): + sx,sy = md['img_shape'][0], md['img_shape'][1] + else: + sx,sy= 2167, 2070 #by default for 4M + #print(flag) + klst = [ 'beam_center_x','beam_center_y', 'count_time','detector_distance', + 'frame_time','incident_wavelength', 'x_pixel_size','y_pixel_size'] + vs = [ 0 ,0, 0, 0, + 0, 0, 75, 75] + for i, k in enumerate(klst): + if k in list(md.keys()): + vs[i] = md[k] + if flag: + if rot90: + Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', + vs[0], vs[1], vs[2], vs[3], + vs[4], vs[5], vs[6], vs[7], + nobytes,sx, sy, + 0, sx, + 0,sy ) + + else: + Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', + vs[0], vs[1], vs[2], vs[3], + vs[4], vs[5], vs[6], vs[7], +#md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], #md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, sy,sx, + 0, sy, + 0, sx + ) + + + + fp.write( Header) + fp.close() + + + +def init_compress_eigerdata( images, mask, md, filename, + bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0,nobytes=4, bins=1, with_pickle=True, + reverse =True, rot90=False, + direct_load_data=False, data_path=None,images_per_file=100, + ): + ''' + Compress the eiger data + + Create a new mask by remove hot_pixel + Do image average + Do each image sum + Find badframe_list for where image sum above bad_pixel_threshold + Generate a compressed data with filename + + if bins!=1, will bin the images with bin number as bins + + Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ] + + Return + mask + avg_img + imsum + bad_frame_list + + ''' + fp = open( filename,'wb' ) + #Make Header 1024 bytes + #md = images.md + if bins!=1: + nobytes=8 + if 'count_time' not in list( md.keys() ): + md['count_time']=0 + if 'detector_distance' not in list( md.keys() ): + md['detector_distance']=0 + if 'frame_time' not in list( md.keys() ): + md['frame_time']=0 + if 'incident_wavelength' not in list( md.keys() ): + md['incident_wavelength']=0 + if 'y_pixel_size' not in list( md.keys() ): + md['y_pixel_size']=0 + if 'x_pixel_size' not in list( md.keys() ): + md['x_pixel_size']=0 + if 'beam_center_x' not in list( md.keys() ): + md['beam_center_x']=0 + if 'beam_center_y' not in list( md.keys() ): + md['beam_center_y']=0 + + if not rot90: + Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0], + 0, md['pixel_mask'].shape[1], + 0, md['pixel_mask'].shape[0] + ) + else: + Header = struct.pack('@16s8d7I916x',b'Version-COMP0001', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, md['pixel_mask'].shape[0], md['pixel_mask'].shape[1], + 0, md['pixel_mask'].shape[0], + 0, md['pixel_mask'].shape[1] + ) + + fp.write( Header) + + Nimg_ = len( images) + avg_img = np.zeros_like( images[0], dtype= np.float64 ) + Nopix = float( avg_img.size ) + n=0 + good_count = 0 + frac = 0.0 + if nobytes==2: + dtype= np.int16 + elif nobytes==4: + dtype= np.int32 + elif nobytes==8: + dtype=np.float64 + else: + print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype= np.int32 + + + Nimg = Nimg_//bins + time_edge = np.array(create_time_slice( N= Nimg_, + slice_num= Nimg, slice_width= bins )) + + imgsum = np.zeros( Nimg ) + if bins!=1: + print('The frames will be binned by %s'%bins) + + for n in tqdm( range(Nimg) ): + t1,t2 = time_edge[n] + img = np.average( images[t1:t2], axis=0 ) + mask &= img < hot_pixel_threshold + p = np.where( (np.ravel(img)>0) & np.ravel(mask) )[0] #don't use masked data + v = np.ravel( np.array( img, dtype= dtype )) [p] + dlen = len(p) + imgsum[n] = v.sum() + if (imgsum[n] >bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold): + #if imgsum[n] >=bad_pixel_threshold : + dlen = 0 + fp.write( struct.pack( '@I', dlen )) + else: + np.ravel(avg_img )[p] += v + good_count +=1 + frac += dlen/Nopix + #s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2]) + fp.write( struct.pack( '@I', dlen )) + fp.write( struct.pack( '@{}i'.format( dlen), *p)) + if bins==1: + if nobytes!=8: + fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v)) + else: + fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) + else: + fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) + #n +=1 + + fp.close() + frac /=good_count + print( "The fraction of pixel occupied by photon is %6.3f%% "%(100*frac) ) + avg_img /= good_count + + bad_frame_list = np.where( (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) )[0] + #bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0] + #bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0] + #bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) ) + + + if len(bad_frame_list): + print ('Bad frame list are: %s' %bad_frame_list) + else: + print ('No bad frames are involved.') + if with_pickle: + pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) ) + return mask, avg_img, imgsum, bad_frame_list + + + +""" Description: + + This is code that Mark wrote to open the multifile format + in compressed mode, translated to python. + This seems to work for DALSA, FCCD and EIGER in compressed mode. + It should be included in the respective detector.i files + Currently, this refers to the compression mode being '6' + Each file is image descriptor files chunked together as follows: + Header (1024 bytes) + |--------------IMG N begin--------------| + | Dlen + |---------------------------------------| + | Pixel positions (dlen*4 bytes | + | (0 based indexing in file) | + |---------------------------------------| + | Pixel data(dlen*bytes bytes) | + | (bytes is found in header | + | at position 116) | + |--------------IMG N end----------------| + |--------------IMG N+1 begin------------| + |----------------etc.....---------------| + + + Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End, + + + +""" + + +class Multifile: + '''The class representing the multifile. + The recno is in 1 based numbering scheme (first record is 1) + This is efficient for reading in increasing order. + Note: reading same image twice in a row is like reading an earlier + numbered image and means the program starts for the beginning again. + + ''' + def __init__(self,filename,beg,end, reverse=False ): + '''Multifile initialization. Open the file. + Here I use the read routine which returns byte objects + (everything is an object in python). I use struct.unpack + to convert the byte object to other data type (int object + etc) + NOTE: At each record n, the file cursor points to record n+1 + ''' + self.FID = open(filename,"rb") +# self.FID.seek(0,os.SEEK_SET) + self.filename = filename + #br: bytes read + br = self.FID.read(1024) + self.beg=beg + self.end=end + self.reverse=reverse + ms_keys = ['beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + 'bytes', + 'nrows', 'ncols', 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' + ] + + magic = struct.unpack('@16s', br[:16]) + md_temp = struct.unpack('@8d7I916x', br[16:]) + self.md = dict(zip(ms_keys, md_temp)) + + self.imgread=0 + self.recno = 0 + + if reverse: + nrows = self.md['nrows'] + ncols = self.md['ncols'] + self.md['nrows'] = ncols + self.md['ncols'] = nrows + rbeg = self.md['rows_begin'] + rend = self.md['rows_end'] + cbeg = self.md['cols_begin'] + cend = self.md['cols_end'] + self.md['rows_begin']=cbeg + self.md['rows_end']=cend + self.md['cols_begin']=rbeg + self.md['cols_end']=rend + + + + # some initialization stuff + self.byts = self.md['bytes'] + if (self.byts==2): + self.valtype = np.uint16 + elif (self.byts == 4): + self.valtype = np.uint32 + elif (self.byts == 8): + self.valtype = np.float64 + #now convert pieces of these bytes to our data + self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] + + # now read first image + #print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts) + + def _readHeader(self): + self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] + + def _readImageRaw(self): + + p= np.fromfile(self.FID, dtype = np.int32,count= self.dlen) + v= np.fromfile(self.FID, dtype = self.valtype,count= self.dlen) + self.imgread=1 + return(p,v) + + def _readImage(self): + (p,v)=self._readImageRaw() + img = np.zeros( ( self.md['ncols'], self.md['nrows'] ) ) + np.put( np.ravel(img), p, v ) + return(img) + + def seekimg(self,n=None): + + '''Position file to read the nth image. + For now only reads first image ignores n + ''' + # the logic involving finding the cursor position + if (n is None): + n = self.recno + if (n < self.beg or n > self.end): + raise IndexError('Error, record out of range') + #print (n, self.recno, self.FID.tell() ) + if ((n == self.recno) and (self.imgread==0)): + pass # do nothing + + else: + if (n <= self.recno): #ensure cursor less than search pos + self.FID.seek(1024,os.SEEK_SET) + self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] + self.recno = 0 + self.imgread=0 + if n == 0: + return + #have to iterate on seeking since dlen varies + #remember for rec recno, cursor is always at recno+1 + if(self.imgread==0 ): #move to next header if need to + self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR) + for i in range(self.recno+1,n): + #the less seeks performed the faster + #print (i) + self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0] + #print 's',self.dlen + self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR) + + # we are now at recno in file, read the header and data + #self._clearImage() + self._readHeader() + self.imgread=0 + self.recno = n + def rdframe(self,n): + if self.seekimg(n)!=-1: + return(self._readImage()) + + def rdrawframe(self,n): + if self.seekimg(n)!=-1: + return(self._readImageRaw()) + + + +class Multifile_Bins( object ): + ''' + Bin a compressed file with bins number + See Multifile for details for Multifile_class + ''' + def __init__(self, FD, bins=100): + ''' + FD: the handler of a compressed Eiger frames + bins: bins number + ''' + + self.FD=FD + if (FD.end - FD.beg)%bins: + print ('Please give a better bins number and make the length of FD/bins= integer') + else: + self.bins = bins + self.md = FD.md + #self.beg = FD.beg + self.beg = 0 + Nimg = (FD.end - FD.beg) + slice_num = Nimg//bins + self.end = slice_num + self.time_edge = np.array(create_time_slice( N= Nimg, + slice_num= slice_num, slice_width= bins )) + FD.beg + self.get_bin_frame() + + def get_bin_frame(self): + FD= self.FD + self.frames = np.zeros( [ FD.md['ncols'],FD.md['nrows'], len(self.time_edge)] ) + for n in tqdm( range(len(self.time_edge))): + #print (n) + t1,t2 = self.time_edge[n] + #print( t1, t2) + self.frames[:,:,n] = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1, + plot_ = False, show_progress = False ) + def rdframe(self,n): + return self.frames[:,:,n] + + def rdrawframe(self,n): + x_= np.ravel( self.rdframe(n) ) + p= np.where( x_ ) [0] + v = np.array( x_[ p ]) + return ( np.array(p, dtype=np.int32), v) + + +class MultifileBNL: + ''' + Re-write multifile from scratch. + ''' + HEADER_SIZE = 1024 + def __init__(self, filename, mode='rb'): + ''' + Prepare a file for reading or writing. + mode : either 'rb' or 'wb' + ''' + if mode == 'wb': + raise ValueError("Write mode 'wb' not supported yet") + if mode != 'rb' and mode != 'wb': + raise ValueError("Error, mode must be 'rb' or 'wb'" + "got : {}".format(mode)) + self._filename = filename + self._mode = mode + # open the file descriptor + # create a memmap + if mode == 'rb': + self._fd = np.memmap(filename, dtype='c') + elif mode == 'wb': + self._fd = open(filename, "wb") + # these are only necessary for writing + self.md = self._read_main_header() + self._cols = int(self.md['nrows']) + self._rows = int(self.md['ncols']) + # some initialization stuff + self.nbytes = self.md['bytes'] + if (self.nbytes==2): + self.valtype = " self.Nframes: + raise KeyError("Error, only {} frames, asked for {}".format(self.Nframes, n)) + # dlen is 4 bytes + cur = self.frame_indexes[n] + dlen = np.frombuffer(self._fd[cur:cur+4], dtype=" nbytes + vals = self._fd[cur: cur+dlen*self.nbytes] + vals = np.frombuffer(vals, dtype=self.valtype) + return pos, vals + def rdframe(self, n): + # read header then image + pos, vals = self._read_raw(n) + img = np.zeros((self._rows*self._cols,)) + img[pos] = vals + return img.reshape((self._rows, self._cols)) + def rdrawframe(self, n): + # read header then image + return self._read_raw(n) + +class MultifileBNLCustom(MultifileBNL): + def __init__(self, filename, beg=0, end=None, **kwargs): + super().__init__(filename, **kwargs) + self.beg = beg + if end is None: + end = self.Nframes-1 + self.end = end + def rdframe(self, n): + if n > self.end or n < self.beg: + raise IndexError("Index out of range") + #return super().rdframe(n - self.beg) + return super().rdframe( n ) + def rdrawframe(self, n): + #return super().rdrawframe(n - self.beg) + if n > self.end or n < self.beg: + raise IndexError("Index out of range") + return super().rdrawframe(n ) + + + +def get_avg_imgc( FD, beg=None,end=None, sampling = 100, plot_ = False, bad_frame_list=None, + show_progress=True, *argv,**kwargs): + '''Get average imagef from a data_series by every sampling number to save time''' + #avg_img = np.average(data_series[:: sampling], axis=0) + + if beg is None: + beg = FD.beg + if end is None: + end = FD.end + + avg_img = FD.rdframe(beg) + n=1 + flag=True + if show_progress: + #print( sampling-1 + beg , end, sampling ) + if bad_frame_list is None: + bad_frame_list =[] + fra_num = int( (end - beg )/sampling ) - len( bad_frame_list ) + for i in tqdm(range( sampling-1 + beg , end, sampling ), desc= 'Averaging %s images'% fra_num): + if bad_frame_list is not None: + if i in bad_frame_list: + flag= False + else: + flag=True + #print(i, flag) + if flag: + (p,v) = FD.rdrawframe(i) + if len(p)>0: + np.ravel(avg_img )[p] += v + n += 1 + else: + for i in range( sampling-1 + beg , end, sampling ): + if bad_frame_list is not None: + if i in bad_frame_list: + flag= False + else: + flag=True + if flag: + (p,v) = FD.rdrawframe(i) + if len(p)>0: + np.ravel(avg_img )[p] += v + n += 1 + + avg_img /= n + if plot_: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots() + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + im = ax.imshow(avg_img , cmap='viridis',origin='lower', + norm= LogNorm(vmin=0.001, vmax=1e2)) + #ax.set_title("Masked Averaged Image") + ax.set_title('uid= %s--Masked-Averaged-Image-'%uid) + fig.colorbar(im) + if save: + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs['path'] + if 'uid' in kwargs: + uid = kwargs['uid'] + else: + uid = 'uid' + #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-"%uid + '.png' + plt.savefig( fp, dpi=fig.dpi) + #plt.show() + return avg_img + + + +def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor = False): + """Compute the mean intensity for each ROI in the compressed file (FD), support parallel computation + + Parameters + ---------- + FD: Multifile class + compressed file + labeled_array : array + labeled array; 0 is background. + Each ROI is represented by a nonzero integer. It is not required that + the ROI labels are contiguous + index : int, list, optional + The ROI's to use. If None, this function will extract averages for all + ROIs + + Returns + ------- + mean_intensity : array + The mean intensity of each ROI for all `images` + Dimensions: + len(mean_intensity) == len(index) + len(mean_intensity[0]) == len(images) + index : list + The labels for each element of the `mean_intensity` list + """ + + qind, pixelist = roi.extract_label_indices( labeled_array ) + sx,sy = ( FD.rdframe(FD.beg) ).shape + if labeled_array.shape != ( sx,sy ): + raise ValueError( + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( sx,sy, labeled_array.shape[0], labeled_array.shape[1]) ) + # handle various input for `index` + if index is None: + index = list(np.unique(labeled_array)) + index.remove(0) + else: + try: + len(index) + except TypeError: + index = [index] + + index = np.array( index ) + #print ('here') + good_ind = np.zeros( max(qind), dtype= np.int32 ) + good_ind[ index -1 ] = np.arange( len(index) ) +1 + w = np.where( good_ind[qind -1 ] )[0] + qind = good_ind[ qind[w] -1 ] + pixelist = pixelist[w] + + + # pre-allocate an array for performance + # might be able to use list comprehension to make this faster + + mean_intensity = np.zeros( [ int( ( FD.end - FD.beg)/sampling ) , len(index)] ) + #fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + #maxqind = max(qind) + norm = np.bincount( qind )[1:] + n= 0 + #for i in tqdm(range( FD.beg , FD.end )): + if not multi_cor: + for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get ROI intensity of each frame' ): + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + mean_intensity[n] = np.bincount( qind[pxlist], weights = v[w], minlength = len(index)+1 )[1:] + n +=1 + else: + ring_masks = [ np.array(labeled_array==i, dtype = np.int64) for i in np.unique( labeled_array )[1:] ] + inputs = range( len(ring_masks) ) + go_through_FD(FD) + pool = Pool(processes= len(inputs) ) + print( 'Starting assign the tasks...') + results = {} + for i in tqdm ( inputs ): + results[i] = apply_async( pool, _get_mean_intensity_one_q, ( FD, sampling, ring_masks[i] ) ) + pool.close() + print( 'Starting running the tasks...') + res = [ results[k].get() for k in tqdm( list(sorted(results.keys())) ) ] + #return res + for i in inputs: + mean_intensity[:,i] = res[i] + print( 'ROI mean_intensit calculation is DONE!') + del results + del res + + mean_intensity /= norm + return mean_intensity, index + + +def _get_mean_intensity_one_q( FD, sampling, labels ): + mi = np.zeros( int( ( FD.end - FD.beg)/sampling ) ) + n=0 + qind, pixelist = roi.extract_label_indices( labels ) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + for i in range( FD.beg, FD.end, sampling ): + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + mi[n] = np.bincount( qind[pxlist], weights = v[w], minlength = 2 )[1:] + n +=1 + return mi + + + +def get_each_frame_intensityc( FD, sampling = 1, + bad_pixel_threshold=1e10, bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, + plot_ = False, bad_frame_list=None, save=False, *argv,**kwargs): + '''Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + ''' + + #print ( argv, kwargs ) + #mask &= img < hot_pixel_threshold + imgsum = np.zeros( int( (FD.end - FD.beg )/ sampling ) ) + n=0 + for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get each frame intensity' ): + (p,v) = FD.rdrawframe(i) + if len(p)>0: + imgsum[n] = np.sum( v ) + n += 1 + + if plot_: + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + fig, ax = plt.subplots() + ax.plot( imgsum,'bo') + ax.set_title('uid= %s--imgsum'%uid) + ax.set_xlabel( 'Frame_bin_%s'%sampling ) + ax.set_ylabel( 'Total_Intensity' ) + + if save: + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs['path'] + if 'uid' in kwargs: + uid = kwargs['uid'] + else: + uid = 'uid' + #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-"%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + + plt.show() + + bad_frame_list_ = np.where( ( np.array(imgsum) > bad_pixel_threshold ) | ( np.array(imgsum) <= bad_pixel_low_threshold) )[0] + FD.beg + + if bad_frame_list is not None: + bad_frame_list = np.unique( np.concatenate([bad_frame_list, bad_frame_list_]) ) + else: + bad_frame_list = bad_frame_list_ + + if len(bad_frame_list): + print ('Bad frame list length is: %s' %len(bad_frame_list)) + else: + print ('No bad frames are involved.') + return imgsum,bad_frame_list + + + + diff --git a/pyCHX/backups/pyCHX-backup/backups/chx_correlationc_05012024.py b/pyCHX/backups/pyCHX-backup/backups/chx_correlationc_05012024.py new file mode 100644 index 0000000..af0dbd4 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/backups/chx_correlationc_05012024.py @@ -0,0 +1,1676 @@ +""" +June 10, Developed by Y.G.@CHX with the assistance of Mark Sutton +yuzhang@bnl.gov +This module is for computation of time correlation by using compressing algorithm +""" + + +from __future__ import absolute_import, division, print_function + +from skbeam.core.utils import multi_tau_lags +from skbeam.core.roi import extract_label_indices +from collections import namedtuple +import numpy as np +import skbeam.core.roi as roi + +import logging +logger = logging.getLogger(__name__) +from tqdm import tqdm + + +def _one_time_process(buf, G, past_intensity_norm, future_intensity_norm, + label_array, num_bufs, num_pixels, img_per_level, + level, buf_no, norm, lev_len): + """Reference implementation of the inner loop of multi-tau one time + correlation + This helper function calculates G, past_intensity_norm and + future_intensity_norm at each level, symmetric normalization is used. + .. warning :: This modifies inputs in place. + Parameters + ---------- + buf : array + image data array to use for correlation + G : array + matrix of auto-correlation function without normalizations + past_intensity_norm : array + matrix of past intensity normalizations + future_intensity_norm : array + matrix of future intensity normalizations + label_array : array + labeled array where all nonzero values are ROIs + num_bufs : int, even + number of buffers(channels) + num_pixels : array + number of pixels in certain ROI's + ROI's, dimensions are : [number of ROI's]X1 + img_per_level : array + to track how many images processed in each level + level : int + the current multi-tau level + buf_no : int + the current buffer number + norm : dict + to track bad images + lev_len : array + length of each level + Notes + ----- + .. math:: + G = + .. math:: + past_intensity_norm = + .. math:: + future_intensity_norm = + """ + img_per_level[level] += 1 + # in multi-tau correlation, the subsequent levels have half as many + # buffers as the first + i_min = num_bufs // 2 if level else 0 + #maxqind=G.shape[1] + for i in range(i_min, min(img_per_level[level], num_bufs)): + # compute the index into the autocorrelation matrix + t_index = int( level * num_bufs / 2 + i ) + delay_no = (buf_no - i) % num_bufs + # get the images for correlating + past_img = buf[level, delay_no] + future_img = buf[level, buf_no] + # find the normalization that can work both for bad_images + # and good_images + ind = int(t_index - lev_len[:level].sum()) + normalize = img_per_level[level] - i - norm[level+1][ind] + # take out the past_ing and future_img created using bad images + # (bad images are converted to np.nan array) + if np.isnan(past_img).any() or np.isnan(future_img).any(): + norm[level + 1][ind] += 1 + else: + for w, arr in zip([past_img*future_img, past_img, future_img], + [G, past_intensity_norm, future_intensity_norm]): + binned = np.bincount(label_array, weights=w)[1:] + #nonz = np.where(w)[0] + #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + arr[t_index] += ((binned / num_pixels - + arr[t_index]) / normalize) + return None # modifies arguments in place! + + + +def _one_time_process_error(buf, G, past_intensity_norm, future_intensity_norm, + label_array, num_bufs, num_pixels, img_per_level, + level, buf_no, norm, lev_len, + G_err, past_intensity_norm_err, future_intensity_norm_err ): + """Reference implementation of the inner loop of multi-tau one time + correlation with the calculation of errorbar (statistical error due to multipixel measurements ) + The statistical error: var( g2(Q) ) = sum( [g2(Qi)- g2(Q)]^2 )/N(N-1), Lumma, RSI, 2000 + This helper function calculates G, past_intensity_norm and + future_intensity_norm at each level, symmetric normalization is used. + .. warning :: This modifies inputs in place. + Parameters + ---------- + buf : array + image data array to use for correlation + G : array + matrix of auto-correlation function without normalizations + past_intensity_norm : array + matrix of past intensity normalizations + future_intensity_norm : array + matrix of future intensity normalizations + label_array : array + labeled array where all nonzero values are ROIs + num_bufs : int, even + number of buffers(channels) + num_pixels : array + number of pixels in certain ROI's + ROI's, dimensions are : [number of ROI's]X1 + img_per_level : array + to track how many images processed in each level + level : int + the current multi-tau level + buf_no : int + the current buffer number + norm : dict + to track bad images + lev_len : array + length of each level + Notes + ----- + .. math:: + G = + .. math:: + past_intensity_norm = + .. math:: + future_intensity_norm = + """ + img_per_level[level] += 1 + # in multi-tau correlation, the subsequent levels have half as many + # buffers as the first + i_min = num_bufs // 2 if level else 0 + #maxqind=G.shape[1] + for i in range(i_min, min(img_per_level[level], num_bufs)): + # compute the index into the autocorrelation matrix + t_index = int( level * num_bufs / 2 + i ) + delay_no = (buf_no - i) % num_bufs + # get the images for correlating + past_img = buf[level, delay_no] + future_img = buf[level, buf_no] + # find the normalization that can work both for bad_images + # and good_images + ind = int(t_index - lev_len[:level].sum()) + normalize = img_per_level[level] - i - norm[level+1][ind] + # take out the past_ing and future_img created using bad images + # (bad images are converted to np.nan array) + if np.isnan(past_img).any() or np.isnan(future_img).any(): + norm[level + 1][ind] += 1 + else: + + #for w, arr in zip([past_img*future_img, past_img, future_img], + # [G, past_intensity_norm, future_intensity_norm, + # ]): + # binned = np.bincount(label_array, weights=w)[1:] + # #nonz = np.where(w)[0] + # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + # arr[t_index] += ((binned / num_pixels - + # arr[t_index]) / normalize) + for w, arr in zip([past_img*future_img, past_img, future_img], + [ + G_err, past_intensity_norm_err, future_intensity_norm_err, + ]): + arr[t_index] += ( w - arr[t_index]) / normalize + return None # modifies arguments in place! + + +results = namedtuple( + 'correlation_results', + ['g2', 'lag_steps', 'internal_state'] +) + +_internal_state = namedtuple( + 'correlation_state', + ['buf', + 'G', + 'past_intensity', + 'future_intensity', + 'img_per_level', + 'label_array', + 'track_level', + 'cur', + 'pixel_list', + 'num_pixels', + 'lag_steps', + 'norm', + 'lev_len'] +) + +_internal_state_err = namedtuple( + 'correlation_state', + ['buf', + 'G', + 'past_intensity', + 'future_intensity', + 'img_per_level', + 'label_array', + 'track_level', + 'cur', + 'pixel_list', + 'num_pixels', + 'lag_steps', + 'norm', + 'lev_len', + 'G_all', + 'past_intensity_all', + 'future_intensity_all' + ] +) + + +_two_time_internal_state = namedtuple( + 'two_time_correlation_state', + ['buf', + 'img_per_level', + 'label_array', + 'track_level', + 'cur', + 'pixel_list', + 'num_pixels', + 'lag_steps', + 'g2', + 'count_level', + 'current_img_time', + 'time_ind', + 'norm', + 'lev_len'] +) + + +def _validate_and_transform_inputs(num_bufs, num_levels, labels): + """ + This is a helper function to validate inputs and create initial state + inputs for both one time and two time correlation + Parameters + ---------- + num_bufs : int + num_levels : int + labels : array + labeled array of the same shape as the image stack; + each ROI is represented by a distinct label (i.e., integer) + Returns + ------- + label_array : array + labels of the required region of interests(ROI's) + pixel_list : array + 1D array of indices into the raveled image for all + foreground pixels (labeled nonzero) + e.g., [5, 6, 7, 8, 14, 15, 21, 22] + num_rois : int + number of region of interests (ROI) + num_pixels : array + number of pixels in each ROI + lag_steps : array + the times at which the correlation was computed + buf : array + image data for correlation + img_per_level : array + to track how many images processed in each level + track_level : array + to track processing each level + cur : array + to increment the buffer + norm : dict + to track bad images + lev_len : array + length of each levels + """ + if num_bufs % 2 != 0: + raise ValueError("There must be an even number of `num_bufs`. You " + "provided %s" % num_bufs) + label_array, pixel_list = extract_label_indices(labels) + + # map the indices onto a sequential list of integers starting at 1 + label_mapping = {label: n+1 + for n, label in enumerate(np.unique(label_array))} + # remap the label array to go from 1 -> max(_labels) + for label, n in label_mapping.items(): + label_array[label_array == label] = n + + # number of ROI's + num_rois = len(label_mapping) + + # stash the number of pixels in the mask + num_pixels = np.bincount(label_array)[1:] + + # Convert from num_levels, num_bufs to lag frames. + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + + # these norm and lev_len will help to find the one time correlation + # normalization norm will updated when there is a bad image + norm = {key: [0] * len(dict_lag[key]) for key in (dict_lag.keys())} + lev_len = np.array([len(dict_lag[i]) for i in (dict_lag.keys())]) + + # Ring buffer, a buffer with periodic boundary conditions. + # Images must be keep for up to maximum delay in buf. + buf = np.zeros((num_levels, num_bufs, len(pixel_list)), + dtype=np.float64) + # to track how many images processed in each level + img_per_level = np.zeros(num_levels, dtype=np.int64) + # to track which levels have already been processed + track_level = np.zeros(num_levels, dtype=bool) + # to increment buffer + cur = np.ones(num_levels, dtype=np.int64) + + return (label_array, pixel_list, num_rois, num_pixels, + lag_steps, buf, img_per_level, track_level, cur, + norm, lev_len) + +def _init_state_one_time(num_levels, num_bufs, labels, cal_error = False): + """Initialize a stateful namedtuple for the generator-based multi-tau + for one time correlation + Parameters + ---------- + num_levels : int + num_bufs : int + labels : array + Two dimensional labeled array that contains ROI information + Returns + ------- + internal_state : namedtuple + The namedtuple that contains all the state information that + `lazy_one_time` requires so that it can be used to pick up + processing after it was interrupted + """ + (label_array, pixel_list, num_rois, num_pixels, lag_steps, buf, + img_per_level, track_level, cur, norm, + lev_len) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + + # G holds the un normalized auto- correlation result. We + # accumulate computations into G as the algorithm proceeds. + + G = np.zeros(( int( (num_levels + 1) * num_bufs / 2), num_rois), + dtype=np.float64) + + # matrix for normalizing G into g2 + past_intensity = np.zeros_like(G) + # matrix for normalizing G into g2 + future_intensity = np.zeros_like(G) + if cal_error: + G_all = np.zeros(( int( (num_levels + 1) * num_bufs / 2), len(pixel_list)), + dtype=np.float64) + + # matrix for normalizing G into g2 + past_intensity_all = np.zeros_like(G_all) + # matrix for normalizing G into g2 + future_intensity_all = np.zeros_like(G_all) + return _internal_state_err( + buf, + G, + past_intensity, + future_intensity, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + norm, + lev_len, + G_all, + past_intensity_all, + future_intensity_all + ) + else: + return _internal_state( + buf, + G, + past_intensity, + future_intensity, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + norm, + lev_len, + ) + + +def fill_pixel( p, v, pixelist): + fra_pix = np.zeros_like( pixelist ) + fra_pix[ np.in1d( pixelist,p ) ] = v[np.in1d( p, pixelist )] + return fra_pix + + + + + +def lazy_one_time(FD, num_levels, num_bufs, labels, + internal_state=None, bad_frame_list=None, imgsum=None, norm = None, cal_error=False ): + + """Generator implementation of 1-time multi-tau correlation + If you do not want multi-tau correlation, set num_levels to 1 and + num_bufs to the number of images you wish to correlate +The number of bins (of size 1) is one larger than the largest value in +`x`. If `minlength` is specified, there will be at least this number +of bins in the output array (though it will be longer if necessary, +depending on the contents of `x`). +Each bin gives the number of occurrences of its index value in `x`. +If `weights` is specified the input array is weighted by it, i.e. if a +value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead +of ``out[n] += 1``. + + Jan 2, 2018 YG. Add error bar calculation + + Parameters + ---------- + image_iterable : FD, a compressed eiger file by Multifile class + num_levels : int + how many generations of downsampling to perform, i.e., the depth of + the binomial tree of averaged frames + num_bufs : int, must be even + maximum lag step to compute in each generation of downsampling + labels : array + Labeled array of the same shape as the image stack. + Each ROI is represented by sequential integers starting at one. For + example, if you have four ROIs, they must be labeled 1, 2, 3, + 4. Background is labeled as 0 + internal_state : namedtuple, optional + internal_state is a bucket for all of the internal state of the + generator. It is part of the `results` object that is yielded from + this generator + + For the sake of normalization: + + imgsum: a list with the same length as FD, sum of each frame + qp, iq: the circular average radius (in pixel) and intensity + center: beam center + + Yields + ------ + +Returns +------- + + A `results` object is yielded after every image has been processed. + This `reults` object contains, in this order: + - `g2`: the normalized correlation + shape is (len(lag_steps), num_rois) + - `lag_steps`: the times at which the correlation was computed + - `_internal_state`: all of the internal state. Can be passed back in + to `lazy_one_time` as the `internal_state` parameter + Notes + ----- + The normalized intensity-intensity time-autocorrelation function + is defined as + .. math:: + g_2(q, t') = \\frac{ }{^2} + t' > 0 + Here, ``I(q, t)`` refers to the scattering strength at the momentum + transfer vector ``q`` in reciprocal space at time ``t``, and the brackets + ``<...>`` refer to averages over time ``t``. The quantity ``t'`` denotes + the delay time + This implementation is based on published work. [1]_ + References + ---------- + .. [1] D. Lumma, L. B. Lurio, S. G. J. Mochrie and M. Sutton, + "Area detector based photon correlation in the regime of + short data batches: Data reduction for dynamic x-ray + scattering," Rev. Sci. Instrum., vol 71, p 3274-3289, 2000. + """ + + if internal_state is None: + internal_state = _init_state_one_time(num_levels, num_bufs, labels, cal_error) + # create a shorthand reference to the results and state named tuple + s = internal_state + + qind, pixelist = roi.extract_label_indices( labels ) + # iterate over the images to compute multi-tau correlation + + fra_pix = np.zeros_like( pixelist, dtype=np.float64) + + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + + if bad_frame_list is None: + bad_frame_list=[] + for i in tqdm(range( FD.beg , FD.end )): + if i in bad_frame_list: + fra_pix[:]= np.nan + else: + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + + if imgsum is None: + if norm is None: + fra_pix[ pxlist] = v[w] + else: + S = norm.shape + if len(S)>1: + fra_pix[ pxlist] = v[w]/ norm[i,pxlist] #-1.0 + else: + fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0 + else: + if norm is None: + fra_pix[ pxlist] = v[w] / imgsum[i] + else: + S = norm.shape + if len(S)>1: + fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[i,pxlist] + else: + fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist] + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:]=0 + + #print( i, len(p), len(w), len( pixelist)) + + #print ('i= %s init fra_pix'%i ) + buf_no = s.cur[0] - 1 + # Compute the correlations between the first level + # (undownsampled) frames. This modifies G, + # past_intensity, future_intensity, + # and img_per_level in place! + if cal_error: + _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len, + s.G_all, s.past_intensity_all, s.future_intensity_all) + else: + _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len) + + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = True + processing = False + else: + prev = (1 + (s.cur[level - 1] - 2) % num_bufs) + s.cur[level] = ( + 1 + s.cur[level] % num_bufs) + + s.buf[level, s.cur[level] - 1] = (( + s.buf[level - 1, prev - 1] + + s.buf[level - 1, s.cur[level - 1] - 1]) / 2) + + # make the track_level zero once that level is processed + s.track_level[level] = False + + # call processing_func for each multi-tau level greater + # than one. This is modifying things in place. See comment + # on previous call above. + buf_no = s.cur[level] - 1 + if cal_error: + _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len, + s.G_all, s.past_intensity_all, s.future_intensity_all) + else: + _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len) + + level += 1 + + # Checking whether there is next level for processing + processing = level < num_levels + + # If any past intensities are zero, then g2 cannot be normalized at + # those levels. This if/else code block is basically preventing + # divide-by-zero errors. + if not cal_error: + if len(np.where(s.past_intensity == 0)[0]) != 0: + g_max1 = np.where(s.past_intensity == 0)[0][0] + else: + g_max1 = s.past_intensity.shape[0] + if len(np.where(s.future_intensity == 0)[0]) != 0: + g_max2 = np.where(s.future_intensity == 0)[0][0] + else: + g_max2 = s.future_intensity.shape[0] + g_max = min( g_max1, g_max2) + g2 = (s.G[:g_max] / (s.past_intensity[:g_max] * + s.future_intensity[:g_max])) + yield results(g2, s.lag_steps[:g_max], s) + else: + yield results(None,s.lag_steps, s) + + + +def lazy_one_time_debug(FD, num_levels, num_bufs, labels, + internal_state=None, bad_frame_list=None, imgsum=None, norm = None, cal_error=False ): + if internal_state is None: + internal_state = _init_state_one_time(num_levels, num_bufs, labels, cal_error) + # create a shorthand reference to the results and state named tuple + s = internal_state + qind, pixelist = roi.extract_label_indices( labels ) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + if bad_frame_list is None: + bad_frame_list=[] + for i in range( FD.beg , FD.end ): + print(i) + if i in bad_frame_list: + fra_pix[:]= np.nan + else: + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + if imgsum is None: + if norm is None: + fra_pix[ pxlist] = v[w] + else: + fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0 + else: + if norm is None: + fra_pix[ pxlist] = v[w] / imgsum[i] + else: + fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist] + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:]=0 + #print( i, len(p), len(w), len( pixelist)) + #print ('i= %s init fra_pix'%i ) + buf_no = s.cur[0] - 1 + # Compute the correlations between the first level + # (undownsampled) frames. This modifies G, + # past_intensity, future_intensity, + # and img_per_level in place! + if cal_error: + _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len, + s.G_all, s.past_intensity_all, s.future_intensity_all) + else: + _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len) + + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = True + processing = False + else: + prev = (1 + (s.cur[level - 1] - 2) % num_bufs) + s.cur[level] = ( + 1 + s.cur[level] % num_bufs) + + s.buf[level, s.cur[level] - 1] = (( + s.buf[level - 1, prev - 1] + + s.buf[level - 1, s.cur[level - 1] - 1]) / 2) + # make the track_level zero once that level is processed + s.track_level[level] = False + # call processing_func for each multi-tau level greater + # than one. This is modifying things in place. See comment + # on previous call above. + buf_no = s.cur[level] - 1 + if cal_error: + _one_time_process_error(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len, + s.G_all, s.past_intensity_all, s.future_intensity_all) + else: + _one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity, + s.label_array, num_bufs, s.num_pixels, + s.img_per_level, level, buf_no, s.norm, s.lev_len) + + level += 1 + # Checking whether there is next level for processing + processing = level < num_levels + # If any past intensities are zero, then g2 cannot be normalized at + # those levels. This if/else code block is basically preventing + # divide-by-zero errors. + if not cal_error: + if len(np.where(s.past_intensity == 0)[0]) != 0: + g_max1 = np.where(s.past_intensity == 0)[0][0] + else: + g_max1 = s.past_intensity.shape[0] + if len(np.where(s.future_intensity == 0)[0]) != 0: + g_max2 = np.where(s.future_intensity == 0)[0][0] + else: + g_max2 = s.future_intensity.shape[0] + g_max = min( g_max1, g_max2) + g2 = (s.G[:g_max] / (s.past_intensity[:g_max] * + s.future_intensity[:g_max])) + yield results(g2, s.lag_steps[:g_max], s) + #yield( i ) + + else: + yield results(None,s.lag_steps, s) + + + +def auto_corr_scat_factor(lags, beta, relaxation_rate, baseline=1): + """ + This model will provide normalized intensity-intensity time + correlation data to be minimized. + Parameters + ---------- + lags : array + delay time + beta : float + optical contrast (speckle contrast), a sample-independent + beamline parameter + relaxation_rate : float + relaxation time associated with the samples dynamics. + baseline : float, optional + baseline of one time correlation + equal to one for ergodic samples + Returns + ------- + g2 : array + normalized intensity-intensity time autocorreltion + Notes : + ------- + The intensity-intensity autocorrelation g2 is connected to the intermediate + scattering factor(ISF) g1 + .. math:: + g_2(q, \\tau) = \\beta_1[g_1(q, \\tau)]^{2} + g_\infty + For a system undergoing diffusive dynamics, + .. math:: + g_1(q, \\tau) = e^{-\gamma(q) \\tau} + .. math:: + g_2(q, \\tau) = \\beta_1 e^{-2\gamma(q) \\tau} + g_\infty + These implementation are based on published work. [1]_ + References + ---------- + .. [1] L. Li, P. Kwasniewski, D. Orsi, L. Wiegart, L. Cristofolini, + C. Caronna and A. Fluerasu, " Photon statistics and speckle + visibility spectroscopy with partially coherent X-rays," + J. Synchrotron Rad. vol 21, p 1288-1295, 2014 + """ + return beta * np.exp(-2 * relaxation_rate * lags) + baseline + + +def multi_tau_auto_corr(num_levels, num_bufs, labels, images, bad_frame_list=None, + imgsum=None, norm=None,cal_error=False ): + """Wraps generator implementation of multi-tau + Original code(in Yorick) for multi tau auto correlation + author: Mark Sutton + For parameter description, please reference the docstring for + lazy_one_time. Note that there is an API difference between this function + and `lazy_one_time`. The `images` arugment is at the end of this function + signature here for backwards compatibility, but is the first argument in + the `lazy_one_time()` function. The semantics of the variables remain + unchanged. + """ + gen = lazy_one_time(images, num_levels, num_bufs, labels,bad_frame_list=bad_frame_list, imgsum=imgsum, + norm=norm,cal_error=cal_error ) + for result in gen: + pass + if cal_error: + return result.g2, result.lag_steps, result.internal_state + else: + return result.g2, result.lag_steps + +def multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list =None, + imgsum= None, norm = None ): + """Wraps generator implementation of multi-tau two time correlation + This function computes two-time correlation + Original code : author: Yugang Zhang + Returns + ------- + results : namedtuple + For parameter definition, see the docstring for the `lazy_two_time()` + function in this module + """ + gen = lazy_two_time(FD, num_lev, num_buf, ring_mask, + two_time_internal_state= None, + bad_frame_list=bad_frame_list, imgsum=imgsum, norm = norm ) + for result in gen: + pass + return two_time_state_to_results(result) + + +def lazy_two_time(FD, num_levels, num_bufs, labels, + two_time_internal_state=None, bad_frame_list=None, imgsum= None, norm = None ): + +#def lazy_two_time(labels, images, num_frames, num_bufs, num_levels=1, +# two_time_internal_state=None): + """ Generator implementation of two-time correlation + If you do not want multi-tau correlation, set num_levels to 1 and + num_bufs to the number of images you wish to correlate + Multi-tau correlation uses a scheme to achieve long-time correlations + inexpensively by downsampling the data, iteratively combining successive + frames. + The longest lag time computed is num_levels * num_bufs. + ** see comments on multi_tau_auto_corr + Parameters + ---------- + FD: the handler of compressed data + num_levels : int, optional + how many generations of downsampling to perform, i.e., + the depth of the binomial tree of averaged frames + default is one + num_bufs : int, must be even + maximum lag step to compute in each generation of + downsampling + labels : array + labeled array of the same shape as the image stack; + each ROI is represented by a distinct label (i.e., integer) + two_time_internal_state: None + + + Yields + ------ + namedtuple + A ``results`` object is yielded after every image has been processed. + This `reults` object contains, in this order: + - ``g2``: the normalized correlation + shape is (num_rois, len(lag_steps), len(lag_steps)) + - ``lag_steps``: the times at which the correlation was computed + - ``_internal_state``: all of the internal state. Can be passed back in + to ``lazy_one_time`` as the ``internal_state`` parameter + Notes + ----- + The two-time correlation function is defined as + .. math:: + C(q,t_1,t_2) = \\frac{}{} + Here, the ensemble averages are performed over many pixels of detector, + all having the same ``q`` value. The average time or age is equal to + ``(t1+t2)/2``, measured by the distance along the ``t1 = t2`` diagonal. + The time difference ``t = |t1 - t2|``, with is distance from the + ``t1 = t2`` diagonal in the perpendicular direction. + In the equilibrium system, the two-time correlation functions depend only + on the time difference ``t``, and hence the two-time correlation contour + lines are parallel. + References + ---------- + .. [1] + A. Fluerasu, A. Moussaid, A. Mandsen and A. Schofield, "Slow dynamics + and aging in collodial gels studied by x-ray photon correlation + spectroscopy," Phys. Rev. E., vol 76, p 010401(1-4), 2007. + """ + + num_frames = FD.end - FD.beg + if two_time_internal_state is None: + two_time_internal_state = _init_state_two_time(num_levels, num_bufs,labels, num_frames) + # create a shorthand reference to the results and state named tuple + s = two_time_internal_state + qind, pixelist = roi.extract_label_indices( labels ) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + if bad_frame_list is None: + bad_frame_list=[] + + for i in tqdm(range( FD.beg , FD.end )): + if i in bad_frame_list: + fra_pix[:]= np.nan + else: + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + if imgsum is None: + if norm is None: + fra_pix[ pxlist] = v[w] + else: + fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0 + else: + if norm is None: + fra_pix[ pxlist] = v[w] / imgsum[i] + else: + fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist] + + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + s.count_level[0] = 1 + s.count_level[0] + # get the current image time + s = s._replace(current_img_time=(s.current_img_time + 1)) + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:]=0 + _two_time_process(s.buf, s.g2, s.label_array, num_bufs, + s.num_pixels, s.img_per_level, s.lag_steps, + s.current_img_time, + level=0, buf_no=s.cur[0] - 1) + # time frame for each level + s.time_ind[0].append(s.current_img_time) + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + # Compute the correlations for all higher levels. + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = 1 + processing = False + else: + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs + s.count_level[level] = 1 + s.count_level[level] + s.buf[level, s.cur[level] - 1] = ( s.buf[level - 1, prev - 1] + + s.buf[level - 1, s.cur[level - 1] - 1] )/2 + + t1_idx = (s.count_level[level] - 1) * 2 + + current_img_time = ((s.time_ind[level - 1])[t1_idx] + + (s.time_ind[level - 1])[t1_idx + 1])/2. + # time frame for each level + s.time_ind[level].append(current_img_time) + # make the track_level zero once that level is processed + s.track_level[level] = 0 + # call the _two_time_process function for each multi-tau level + # for multi-tau levels greater than one + # Again, this is modifying things in place. See comment + # on previous call above. + _two_time_process(s.buf, s.g2, s.label_array, num_bufs, + s.num_pixels, s.img_per_level, s.lag_steps, + current_img_time, + level=level, buf_no=s.cur[level]-1) + level += 1 + + # Checking whether there is next level for processing + processing = level < num_levels + #print (s.g2[1,:,1] ) + yield s + + +def two_time_state_to_results(state): + """Convert the internal state of the two time generator into usable results + Parameters + ---------- + state : namedtuple + The internal state that is yielded from `lazy_two_time` + Returns + ------- + results : namedtuple + A results object that contains the two time correlation results + and the lag steps + """ + for q in range(np.max(state.label_array)): + x0 = (state.g2)[q, :, :] + (state.g2)[q, :, :] = (np.tril(x0) + np.tril(x0).T - + np.diag(np.diag(x0))) + return results(state.g2, state.lag_steps, state) + + + +def _two_time_process(buf, g2, label_array, num_bufs, num_pixels, + img_per_level, lag_steps, current_img_time, + level, buf_no): + """ + Parameters + ---------- + buf: array + image data array to use for two time correlation + g2: array + two time correlation matrix + shape (number of labels(ROI), number of frames, number of frames) + label_array: array + Elements not inside any ROI are zero; elements inside each + ROI are 1, 2, 3, etc. corresponding to the order they are specified + in edges and segments + num_bufs: int, even + number of buffers(channels) + num_pixels : array + number of pixels in certain ROI's + ROI's, dimensions are len(np.unique(label_array)) + img_per_level: array + to track how many images processed in each level + lag_steps : array + delay or lag steps for the multiple tau analysis + shape num_levels + current_img_time : int + the current image number + level : int + the current multi-tau level + buf_no : int + the current buffer number + """ + img_per_level[level] += 1 + + # in multi-tau correlation other than first level all other levels + # have to do the half of the correlation + if level == 0: + i_min = 0 + else: + i_min = num_bufs//2 + + for i in range(i_min, min(img_per_level[level], num_bufs)): + t_index = level*num_bufs/2 + i + delay_no = (buf_no - i) % num_bufs + past_img = buf[level, delay_no] + future_img = buf[level, buf_no] + + #print( np.sum( past_img ), np.sum( future_img )) + + # get the matrix of correlation function without normalizations + tmp_binned = (np.bincount(label_array, + weights=past_img*future_img)[1:]) + # get the matrix of past intensity normalizations + pi_binned = (np.bincount(label_array, + weights=past_img)[1:]) + + # get the matrix of future intensity normalizations + fi_binned = (np.bincount(label_array, + weights=future_img)[1:]) + + tind1 = (current_img_time - 1) + tind2 = (current_img_time - lag_steps[int(t_index)] - 1) + #print( current_img_time ) + + if not isinstance(current_img_time, int): + nshift = 2**(level-1) + for i in range(-nshift+1, nshift+1): + g2[:, int(tind1+i), + int(tind2+i)] = (tmp_binned/(pi_binned * + fi_binned))*num_pixels + else: + g2[:, int(tind1), int(tind2)] = tmp_binned/(pi_binned * fi_binned)*num_pixels + + #print( num_pixels ) + + +def _init_state_two_time(num_levels, num_bufs, labels, num_frames): + """Initialize a stateful namedtuple for two time correlation + Parameters + ---------- + num_levels : int + num_bufs : int + labels : array + Two dimensional labeled array that contains ROI information + num_frames : int + number of images to use + default is number of images + Returns + ------- + internal_state : namedtuple + The namedtuple that contains all the state information that + `lazy_two_time` requires so that it can be used to pick up processing + after it was interrupted + """ + (label_array, pixel_list, num_rois, num_pixels, lag_steps, + buf, img_per_level, track_level, cur, norm, + lev_len) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + + # to count images in each level + count_level = np.zeros(num_levels, dtype=np.int64) + + # current image time + current_img_time = 0 + + # generate a time frame for each level + time_ind = {key: [] for key in range(num_levels)} + + # two time correlation results (array) + g2 = np.zeros((num_rois, num_frames, num_frames), dtype=np.float64) + + return _two_time_internal_state( + buf, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + g2, + count_level, + current_img_time, + time_ind, + norm, + lev_len, + ) + +def one_time_from_two_time(two_time_corr): + """ + This will provide the one-time correlation data from two-time + correlation data. + Parameters + ---------- + two_time_corr : array + matrix of two time correlation + shape (number of labels(ROI's), number of frames, number of frames) + Returns + ------- + one_time_corr : array + matrix of one time correlation + shape (number of labels(ROI's), number of frames) + """ + + one_time_corr = np.zeros((two_time_corr.shape[0], two_time_corr.shape[2])) + for g in two_time_corr: + for j in range(two_time_corr.shape[2]): + one_time_corr[:, j] = np.trace(g, offset=j)/two_time_corr.shape[2] + return one_time_corr + + +def cal_c12c( FD, ring_mask, + bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None, imgsum=None, norm=None ): + '''calculation two_time correlation by using a multi-tau algorithm''' + + #noframes = FD.end - good_start # number of frames, not "no frames" + + FD.beg = max(FD.beg, good_start) + noframes = FD.end - FD.beg # number of frames, not "no frames" + #num_buf = 8 # number of buffers + + if num_lev is None: + num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list)!=0: + print ('Bad frame involved and will be precessed!') + noframes -= len(np.where(np.in1d( bad_frame_list, + range(good_start, FD.end)))[0]) + print ('%s frames will be processed...'%(noframes)) + + c12, lag_steps, state = multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list, + imgsum=imgsum, norm = norm ) + + print( 'Two Time Calculation is DONE!') + m, n, n = c12.shape + #print( m,n,n) + c12_ = np.zeros( [n,n,m] ) + for i in range( m): + c12_[:,:,i ] = c12[i] + return c12_, lag_steps + + + +def cal_g2c( FD, ring_mask, + bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None, imgsum=None, norm=None,cal_error=False ): + '''calculation g2 by using a multi-tau algorithm''' + + #noframes = FD.end - good_start # number of frames, not "no frames" + + FD.beg = max(FD.beg, good_start) + noframes = FD.end - FD.beg # number of frames, not "no frames" + #num_buf = 8 # number of buffers + + if num_lev is None: + num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list)!=0: + print ('Bad frame involved and will be precessed!') + noframes -= len(np.where(np.in1d( bad_frame_list, + range(good_start, FD.end)))[0]) + + print ('%s frames will be processed...'%(noframes)) + if cal_error: + g2, lag_steps, s = multi_tau_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list, + imgsum=imgsum, norm = norm,cal_error=cal_error ) + + g2 = np.zeros_like( s.G ) + g2_err = np.zeros_like(g2) + qind, pixelist = extract_label_indices(ring_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + Ntau, Nq = s.G.shape + g_max = 1e30 + for qi in range(1,1+Nq): + pixelist_qi = np.where( qind == qi)[0] + s_Gall_qi = s.G_all[:,pixelist_qi] + s_Pall_qi = s.past_intensity_all[:,pixelist_qi] + s_Fall_qi = s.future_intensity_all[:,pixelist_qi] + avgGi = (np.average( s_Gall_qi, axis=1)) + devGi = (np.std( s_Gall_qi, axis=1)) + avgPi = (np.average( s_Pall_qi, axis=1)) + devPi = (np.std( s_Pall_qi, axis=1)) + avgFi = (np.average( s_Fall_qi, axis=1)) + devFi = (np.std( s_Fall_qi, axis=1)) + + if len(np.where(avgPi == 0)[0]) != 0: + g_max1 = np.where(avgPi == 0)[0][0] + else: + g_max1 = avgPi.shape[0] + if len(np.where(avgFi == 0)[0]) != 0: + g_max2 = np.where(avgFi == 0)[0][0] + else: + g_max2 = avgFi.shape[0] + g_max = min( g_max1, g_max2) + #print(g_max) + #g2_ = (s.G[:g_max] / (s.past_intensity[:g_max] * + # s.future_intensity[:g_max])) + g2[:g_max,qi-1] = avgGi[:g_max]/( avgPi[:g_max] * avgFi[:g_max] ) + g2_err[:g_max,qi-1] = np.sqrt( + ( 1/ ( avgFi[:g_max] * avgPi[:g_max] ))**2 * devGi[:g_max] ** 2 + + ( avgGi[:g_max]/ ( avgFi[:g_max]**2 * avgPi[:g_max] ))**2 * devFi[:g_max] ** 2 + + ( avgGi[:g_max]/ ( avgFi[:g_max] * avgPi[:g_max]**2 ))**2 * devPi[:g_max] ** 2 + ) + + print( 'G2 with error bar calculation DONE!') + return g2[:g_max,:], lag_steps[:g_max], g2_err[:g_max,:]/np.sqrt(nopr), s + else: + g2, lag_steps = multi_tau_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list, + imgsum=imgsum, norm = norm,cal_error=cal_error ) + + print( 'G2 calculation DONE!') + return g2, lag_steps + + + +def get_pixelist_interp_iq( qp, iq, ring_mask, center): + + qind, pixelist = roi.extract_label_indices( ring_mask ) + #pixely = pixelist%FD.md['nrows'] -center[1] + #pixelx = pixelist//FD.md['nrows'] - center[0] + + pixely = pixelist%ring_mask.shape[1] -center[1] + pixelx = pixelist//ring_mask.shape[1] - center[0] + + r= np.hypot(pixelx, pixely) #leave as float. + #r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 + return np.interp( r, qp, iq ) + + +class Get_Pixel_Arrayc_todo(object): + ''' + a class to get intested pixels from a images sequence, + load ROI of all images into memory + get_data: to get a 2-D array, shape as (len(images), len(pixellist)) + + One example: + data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() + ''' + + def __init__(self, FD, pixelist,beg=None, end=None, norm=None, imgsum = None, + norm_inten = None, qind=None): + ''' + indexable: a images sequences + pixelist: 1-D array, interest pixel list + norm: each q-ROI of each frame is normalized by the corresponding q-ROI of time averaged intensity + imgsum: each q-ROI of each frame is normalized by the total intensity of the corresponding frame, should have the same time sequences as FD, e.g., imgsum[10] corresponding to FD[10] + norm_inten: if True, each q-ROI of each frame is normlized by total intensity of the correponding q-ROI of the corresponding frame + qind: the index of each ROI in one frame, i.e., q + if norm_inten is True: qind has to be given + + ''' + if beg is None: + self.beg = FD.beg + if end is None: + self.end = FD.end + #if self.beg ==0: + # self.length = self.end - self.beg + #else: + # self.length = self.end - self.beg + 1 + + self.length = self.end - self.beg + + self.FD = FD + self.pixelist = pixelist + self.norm = norm + self.imgsum = imgsum + self.norm_inten= norm_inten + self.qind = qind + if self.norm_inten is not None: + if self.qind is None: + print('Please give qind.') + + def get_data(self ): + ''' + To get intested pixels array + Return: 2-D array, shape as (len(images), len(pixellist)) + ''' + + data_array = np.zeros([ self.length,len(self.pixelist)], dtype=np.float64) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 + #fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( self.FD.md['ncols'] * self.FD.md['nrows'] , dtype=np.int32 ) + timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) + + if self.norm_inten is not None: + #Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) + Mean_Int_Qind = np.ones( len( self.qind), dtype = np.float64) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 + noqs = len(np.unique( self.qind )) + nopr = np.bincount(self.qind-1) + noprs = np.concatenate( [ np.array([0]), np.cumsum(nopr) ] ) + qind_ = np.zeros_like( self.qind ) + for j in range(noqs): + qind_[ noprs[j]: noprs[j+1] ] = np.where(self.qind==j+1)[0] + + n=0 + for i in tqdm(range( self.beg , self.end )): + (p,v) = self.FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + #np.bincount( qind[pxlist], weight= + + + if self.mean_int_sets is not None:#for each frame will normalize each ROI by it's averaged value + for j in range(noqs): + #if i ==100: + # if j==0: + # print( self.mean_int_sets[i][j] ) + # print( qind_[ noprs[j]: noprs[j+1] ] ) + Mean_Int_Qind[ qind_[ noprs[j]: noprs[j+1] ] ] = self.mean_int_sets[i][j] + norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] #self.mean_int_set or Mean_Int_Qind[pxlist] + + #if i==100: + # print( i, Mean_Int_Qind[ self.qind== 11 ]) + + #print('Do norm_mean_int here') + #if i ==10: + # print( norm_Mean_Int_Qind ) + else: + norm_Mean_Int_Qind = 1.0 + if self.imgsum is not None: + norm_imgsum = self.imgsum[i] + else: + norm_imgsum = 1.0 + if self.norm is not None: + norm_avgimg_roi = self.norm[pxlist] + else: + norm_avgimg_roi = 1.0 + + norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi + #if i==100: + # print(norm_Mean_Int_Qind[:100]) + data_array[n][ pxlist] = v[w]/ norms + n +=1 + + return data_array + + + + + +class Get_Pixel_Arrayc(object): + ''' + a class to get intested pixels from a images sequence, + load ROI of all images into memory + get_data: to get a 2-D array, shape as (len(images), len(pixellist)) + + One example: + data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() + ''' + + def __init__(self, FD, pixelist,beg=None, end=None, norm=None, imgsum = None, + mean_int_sets = None, qind=None ): + ''' + indexable: a images sequences + pixelist: 1-D array, interest pixel list + norm: each q-ROI of each frame is normalized by the corresponding q-ROI of time averaged intensity + imgsum: each q-ROI of each frame is normalized by the total intensity of the corresponding frame, should have the same time sequences as FD, e.g., imgsum[10] corresponding to FD[10] + mean_int_sets: each q-ROI of each frame is normlized by total intensity of the correponding q-ROI of the corresponding frame + qind: the index of each ROI in one frame, i.e., q + if mean_int_sets is not None: qind has to be not None + + ''' + if beg is None: + self.beg = FD.beg + if end is None: + self.end = FD.end + #if self.beg ==0: + # self.length = self.end - self.beg + #else: + # self.length = self.end - self.beg + 1 + + self.length = self.end - self.beg + + self.FD = FD + self.pixelist = pixelist + self.norm = norm + self.imgsum = imgsum + self.mean_int_sets= mean_int_sets + self.qind = qind + if self.mean_int_sets is not None: + if self.qind is None: + print('Please give qind.') + + def get_data(self ): + ''' + To get intested pixels array + Return: 2-D array, shape as (len(images), len(pixellist)) + ''' + + data_array = np.zeros([ self.length,len(self.pixelist)], dtype=np.float64) + #fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( self.FD.md['ncols'] * self.FD.md['nrows'] , dtype=np.int32 ) + timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) + + if self.mean_int_sets is not None: + #Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) + Mean_Int_Qind = np.ones( len( self.qind), dtype = np.float64) + noqs = len(np.unique( self.qind )) + nopr = np.bincount(self.qind-1) + noprs = np.concatenate( [ np.array([0]), np.cumsum(nopr) ] ) + qind_ = np.zeros_like( self.qind ) + for j in range(noqs): + qind_[ noprs[j]: noprs[j+1] ] = np.where(self.qind==j+1)[0] + + n=0 + for i in tqdm(range( self.beg , self.end )): + (p,v) = self.FD.rdrawframe(i) + w = np.where( timg[p] )[0] + pxlist = timg[ p[w] ] -1 + + if self.mean_int_sets is not None:#for normalization of each averaged ROI of each frame + for j in range(noqs): + #if i ==100: + # if j==0: + # print( self.mean_int_sets[i][j] ) + # print( qind_[ noprs[j]: noprs[j+1] ] ) + Mean_Int_Qind[ qind_[ noprs[j]: noprs[j+1] ] ] = self.mean_int_sets[i][j] + norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] #self.mean_int_set or Mean_Int_Qind[pxlist] + + #if i==100: + # print( i, Mean_Int_Qind[ self.qind== 11 ]) + + #print('Do norm_mean_int here') + #if i ==10: + # print( norm_Mean_Int_Qind ) + else: + norm_Mean_Int_Qind = 1.0 + if self.imgsum is not None: + norm_imgsum = self.imgsum[i] + else: + norm_imgsum = 1.0 + if self.norm is not None: + if len( (self.norm).shape )>1: + norm_avgimg_roi = self.norm[i][pxlist] + #print('here') + + else: + norm_avgimg_roi = self.norm[pxlist] + else: + norm_avgimg_roi = 1.0 + + norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi + #if i==100: + # print(norm_Mean_Int_Qind[:100]) + data_array[n][ pxlist] = v[w]/ norms + n +=1 + + return data_array + + +def auto_two_Arrayc( data_pixel, rois, index=None): + + ''' + Dec 16, 2015, Y.G.@CHX + a numpy operation method to get two-time correlation function + + Parameters: + data: images sequence, shape as [img[0], img[1], imgs_length] + rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs + Options: + + data_pixel: if not None, + 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + + + Return: + g12: a 3-D array, shape as ( imgs_length, imgs_length, q) + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + ''' + + qind, pixelist = roi.extract_label_indices( rois ) + noqs = len( np.unique(qind) ) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + noframes = data_pixel.shape[0] + + if index is None: + index = np.arange( 1, noqs + 1 ) + else: + try: + len(index) + index = np.array( index ) + except TypeError: + index = np.array( [index] ) + #print( index ) + qlist = np.arange( 1, noqs + 1 )[ index -1 ] + #print( qlist ) + try: + g12b = np.zeros( [noframes, noframes, len(qlist) ] ) + DO = True + except: + print("The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely") + '''TO be done here ''' + DO = False + + if DO: + i = 0 + for qi in tqdm(qlist ): + #print (qi-1) + pixelist_qi = np.where( qind == qi)[0] + #print (pixelist_qi.shape, data_pixel[qi].shape) + data_pixel_qi = data_pixel[:,pixelist_qi] + sum1 = (np.average( data_pixel_qi, axis=1)).reshape( 1, noframes ) + sum2 = sum1.T + #print( qi, qlist, ) + #print( g12b[:,:,qi -1 ] ) + g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2 / nopr[qi -1] + i +=1 + return g12b + +def auto_two_Arrayc_ExplicitNorm( data_pixel, rois, norm=None, index=None): + + ''' + Dec 16, 2015, Y.G.@CHX + a numpy operation method to get two-time correlation function by giving explict normalization + + Parameters: + data: images sequence, shape as [img[0], img[1], imgs_length] + rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs + norm: if not None, shoud be the shape as data_pixel, will normalize two time by this norm + if None, will return two time without normalization + + Options: + + data_pixel: if not None, + 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + + + Return: + g12: a 3-D array, shape as ( imgs_length, imgs_length, q) + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + ''' + + qind, pixelist = roi.extract_label_indices( rois ) + noqs = len( np.unique(qind) ) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + noframes = data_pixel.shape[0] + + if index is None: + index = np.arange( 1, noqs + 1 ) + else: + try: + len(index) + index = np.array( index ) + except TypeError: + index = np.array( [index] ) + #print( index ) + qlist = np.arange( 1, noqs + 1 )[ index -1 ] + #print( qlist ) + try: + g12b = np.zeros( [noframes, noframes, len(qlist) ] ) + DO = True + except: + print("The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely") + '''TO be done here ''' + DO = False + if DO: + i = 0 + for qi in tqdm(qlist ): + pixelist_qi = np.where( qind == qi)[0] + data_pixel_qi = data_pixel[:,pixelist_qi] + if norm is not None: + norm1 = norm[:,pixelist_qi] + sum1 = (np.average( norm1, axis=1)).reshape( 1, noframes ) + sum2 = sum1.T + else: + sum1=1 + sum2=1 + g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2/ nopr[qi -1] + i +=1 + return g12b + + +def two_time_norm( data_pixel, rois, index=None): + + ''' + Dec 16, 2015, Y.G.@CHX + a numpy operation method to get two-time correlation function + + Parameters: + data: images sequence, shape as [img[0], img[1], imgs_length] + rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs + + Options: + + data_pixel: if not None, + 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + + + Return: + g12: a 3-D array, shape as ( imgs_length, imgs_length, q) + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + ''' + + qind, pixelist = roi.extract_label_indices( rois ) + noqs = len( np.unique(qind) ) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + noframes = data_pixel.shape[0] + + if index is None: + index = np.arange( 1, noqs + 1 ) + else: + try: + len(index) + index = np.array( index ) + except TypeError: + index = np.array( [index] ) + #print( index ) + qlist = np.arange( 1, noqs + 1 )[ index -1 ] + #print( qlist ) + try: + norm = np.zeros( len(qlist) ) + DO = True + except: + print("The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely") + '''TO be done here ''' + DO = False + + if DO: + i = 0 + for qi in tqdm(qlist ): + #print (qi-1) + pixelist_qi = np.where( qind == qi)[0] + #print (pixelist_qi.shape, data_pixel[qi].shape) + data_pixel_qi = data_pixel[:,pixelist_qi] + sum1 = (np.average( data_pixel_qi, axis=1)).reshape( 1, noframes ) + norm[i] = np.average(sum1 ) + #sum2 = sum1.T + #print( qi, qlist, ) + #print( g12b[:,:,qi -1 ] ) + #g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2 / nopr[qi -1] + i +=1 + return norm + + + + +def check_normalization( frame_num, q_list, imgsa, data_pixel ): + '''check the ROI intensity before and after normalization + Input: + frame_num: integer, the number of frame to be checked + q_list: list of integer, the list of q to be checked + imgsa: the raw data + data_pixel: the normalized data, caculated by fucntion Get_Pixel_Arrayc + Plot the intensities + ''' + fig,ax=plt.subplots(2) + n=0 + for q in q_list: + norm_data = data_pixel[frame_num][qind==q] + raw_data = np.ravel( np.array(imgsa[frame_num]) )[pixelist[qind==q]] + #print(raw_data.mean()) + plot1D( raw_data,ax=ax[0], legend='q=%s'%(q), m=markers[n], + title='fra=%s_raw_data'%(frame_num)) + + #plot1D( raw_data/mean_int_sets_[frame_num][q-1], ax=ax[1], legend='q=%s'%(q), m=markers[n], + # xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) + #print( mean_int_sets_[frame_num][q-1] ) + plot1D( norm_data, ax=ax[1], legend='q=%s'%(q), m=markers[n], + xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) + n +=1 + diff --git a/pyCHX/backups/pyCHX-backup/backups/chx_generic_functions_01252025.py b/pyCHX/backups/pyCHX-backup/backups/chx_generic_functions_01252025.py new file mode 100644 index 0000000..d4c811b --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/backups/chx_generic_functions_01252025.py @@ -0,0 +1,6365 @@ +import copy +from datetime import datetime +from os import listdir +from shutil import copyfile + +import matplotlib.cm as mcm +import numpy as np +import PIL +import pytz +import scipy +from matplotlib import cm +from modest_image import imshow +from scipy.special import erf +from skbeam.core.utils import angle_grid, radial_grid, radius_to_twotheta, twotheta_to_q +from skimage.draw import disk, ellipse, line, line_aa, polygon +from skimage.filters import prewitt + +# from tqdm import * +from pyCHX.chx_libs import * +from pyCHX.chx_libs import colors, markers + +markers = [ + "o", + "D", + "v", + "^", + "<", + ">", + "p", + "s", + "H", + "h", + "*", + "d", + "8", + "1", + "3", + "2", + "4", + "+", + "x", + "_", + "|", + ",", + "1", +] +markers = np.array(markers * 100) + + +flatten_nestlist = lambda l: [item for sublist in l for item in sublist] +""" +a function to flatten a nest list +e.g., flatten( [ ['sg','tt'],'ll' ] ) +gives ['sg', 'tt', 'l', 'l'] +""" + + +def get_frames_from_dscan(uid, detector="eiger4m_single_image"): + """Get frames from a dscan by giving uid and detector""" + hdr = db[uid] + return db.get_images(hdr, detector) + + +def get_roi_intensity(img, roi_mask): + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + avgs = np.zeros(noqs) + for i in tqdm(range(1, 1 + noqs)): + avgs[i - 1] = np.average(img[roi_mask == i]) + return avgs + + +def generate_h5_list(inDir, filename): + """YG DEV at 9/19/2019@CHX generate a lst file containing all h5 fiels in inDir + Input: + inDir: the input direction + filename: the filename for output (have to lst as extension) + Output: + Save the all h5 filenames in a lst file + """ + fp_list = listdir(inDir) + if filename[-4:] != ".lst": + filename += ".lst" + for FP in fp_list: + FP_ = inDir + FP + if os.path.isdir(FP_): + fp = listdir(FP_) + for fp_ in fp: + if ".h5" in fp_: + append_txtfile(filename=filename, data=np.array([FP_ + "/" + fp_])) + print("The full path of all the .h5 in %s has been saved in %s." % (inDir, filename)) + print("You can use ./analysis/run_gui to visualize all the h5 file.") + + +def fit_one_peak_curve(x, y, fit_range=None): + """YG Dev@Aug 10, 2019 fit a curve with a single Lorentzian shape + Parameters: + x: one-d array, x-axis data + y: one-d array, y-axis data + fit_range: [x1, x2], a list of index, to define the x-range for fit + Return: + center: float, center of the peak + center_std: float, error bar of center in the fitting + fwhm: float, full width at half max intensity of the peak, 2*sigma + fwhm_std:float, error bar of the full width at half max intensity of the peak + xf: the x in the fit + out: the fitting class resutled from lmfit + + """ + from lmfit.models import LinearModel, LorentzianModel + + peak = LorentzianModel() + background = LinearModel() + model = peak + background + if fit_range != None: + x1, x2 = fit_range + xf = x[x1:x2] + yf = y[x1:x2] + else: + xf = x + yf = y + model.set_param_hint("slope", value=5) + model.set_param_hint("intercept", value=0) + model.set_param_hint("center", value=0.005) + model.set_param_hint("amplitude", value=0.1) + model.set_param_hint("sigma", value=0.003) + # out=model.fit(yf, x=xf)#, method='nelder') + out = model.fit(yf, x=xf, method="leastsq") + cen = out.params["center"].value + cen_std = out.params["center"].stderr + wid = out.params["sigma"].value * 2 + wid_std = out.params["sigma"].stderr * 2 + return cen, cen_std, wid, wid_std, xf, out + + +def plot_xy_with_fit( + x, + y, + xf, + out, + cen, + cen_std, + wid, + wid_std, + xlim=[1e-3, 0.01], + xlabel="q (" r"$\AA^{-1}$)", + ylabel="I(q)", + filename=None, +): + """YG Dev@Aug 10, 2019 to plot x,y with fit, + currently this code is dedicated to plot q-Iq with fit and show the fittign parameter, peak pos, peak wid""" + + yf2 = out.model.eval(params=out.params, x=xf) + fig, ax = plt.subplots() + plot1D(x=x, y=y, ax=ax, m="o", ls="", c="k", legend="data") + plot1D(x=xf, y=yf2, ax=ax, m="", ls="-", c="r", legend="fit", logy=True) + ax.set_xlim(xlim) + # ax.set_ylim( 0.1, 4) + # ax.set_title(uid+'--t=%.2f'%tt) + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + txts = r"peak" + r" = %.5f +/- %.5f " % (cen, cen_std) + ax.text(x=0.02, y=0.2, s=txts, fontsize=14, transform=ax.transAxes) + txts = r"wid" + r" = %.4f +/- %.4f" % (wid, wid_std) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=0.02, y=0.1, s=txts, fontsize=14, transform=ax.transAxes) + plt.tight_layout() + if filename != None: + plt.savefig(filename) + return ax + + +def get_touched_qwidth(qcenters): + """YG Dev@CHX April 2019, get touched qwidth by giving qcenters""" + qwX = np.zeros_like(qcenters) + qW = qcenters[1:] - qcenters[:-1] + qwX[0] = qW[0] + for i in range(1, len(qcenters) - 1): + # print(i) + qwX[i] = min(qW[i - 1], qW[i]) + qwX[-1] = qW[-1] + qwX *= 0.9999 + return qwX + + +def append_txtfile(filename, data, fmt="%s", *argv, **kwargs): + """YG. Dev May 10, 2109 append data to a file + Create an empty file if the file dose not exist, otherwise, will append the data to it + Input: + fp: filename + data: the data to be append + fmt: the parameter defined in np.savetxt + + """ + from numpy import savetxt + + exists = os.path.isfile(filename) + if not exists: + np.savetxt( + filename, + [], + fmt="%s", + ) + print("create new file") + + f = open(filename, "a") + savetxt(f, data, fmt=fmt, *argv, **kwargs) + f.close() + + +def get_roi_mask_qval_qwid_by_shift( + new_cen, new_mask, old_cen, old_roi_mask, setup_pargs, geometry, limit_qnum=None +): + """YG Dev April 22, 2019 Get roi_mask, qval_dict, qwid_dict by shift the pre-defined big roi_mask""" + center = setup_pargs["center"] + roi_mask1 = shift_mask( + new_cen=center, new_mask=new_mask, old_cen=old_cen, old_roi_mask=old_roi_mask, limit_qnum=limit_qnum + ) + qval_dict_, qwid_dict_ = get_masked_qval_qwid_dict_using_Rmax( + new_mask=new_mask, setup_pargs=setup_pargs, old_roi_mask=old_roi_mask, old_cen=old_cen, geometry=geometry + ) + w, w1 = get_zero_nozero_qind_from_roi_mask(roi_mask1, new_mask) + # print(w,w1) + qval_dictx = {k: v for (k, v) in list(qval_dict_.items()) if k in w1} + qwid_dictx = {k: v for (k, v) in list(qwid_dict_.items()) if k in w1} + qval_dict = {} + qwid_dict = {} + for i, k in enumerate(list(qval_dictx.keys())): + qval_dict[i] = qval_dictx[k] + qwid_dict[i] = qwid_dictx[k] + return roi_mask1, qval_dict, qwid_dict + + +def get_zero_nozero_qind_from_roi_mask(roi_mask, mask): + """YG Dev April 22, 2019 Get unique qind of roi_mask with zero and non-zero pixel number""" + qind, pixelist = roi.extract_label_indices(roi_mask * mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + w = np.where(nopr == 0)[0] + w1 = np.where(nopr != 0)[0] + return w, w1 + + +def get_masked_qval_qwid_dict_using_Rmax(new_mask, setup_pargs, old_roi_mask, old_cen, geometry): + """YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask using a Rmax method""" + cy, cx = setup_pargs["center"] + my, mx = new_mask.shape + Rmax = int( + np.ceil(max(np.hypot(cx, cy), np.hypot(cx - mx, cy - my), np.hypot(cx, cy - my), np.hypot(cx - mx, cy))) + ) + Fmask = np.zeros([Rmax * 2, Rmax * 2], dtype=int) + Fmask[Rmax - cy : Rmax - cy + my, Rmax - cx : Rmax - cx + mx] = new_mask + roi_mask1 = shift_mask( + new_cen=[Rmax, Rmax], + new_mask=np.ones_like(Fmask), + old_cen=old_cen, + old_roi_mask=old_roi_mask, + limit_qnum=None, + ) + setup_pargs_ = { + "center": [Rmax, Rmax], + "dpix": setup_pargs["dpix"], + "Ldet": setup_pargs["Ldet"], + "lambda_": setup_pargs["lambda_"], + } + qval_dict1, qwid_dict1 = get_masked_qval_qwid_dict(roi_mask1, Fmask, setup_pargs_, geometry) + # w = get_zero_qind_from_roi_mask(roi_mask1,Fmask) + return qval_dict1, qwid_dict1 # ,w + + +def get_masked_qval_qwid_dict(roi_mask, mask, setup_pargs, geometry): + """YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask""" + + qval_dict_, qwid_dict_ = get_qval_qwid_dict(roi_mask, setup_pargs, geometry=geometry) + w, w1 = get_zero_nozero_qind_from_roi_mask(roi_mask, mask) + qval_dictx = {k: v for (k, v) in list(qval_dict_.items()) if k not in w} + qwid_dictx = {k: v for (k, v) in list(qwid_dict_.items()) if k not in w} + qval_dict = {} + qwid_dict = {} + for i, k in enumerate(list(qval_dictx.keys())): + qval_dict[i] = qval_dictx[k] + qwid_dict[i] = qwid_dictx[k] + return qval_dict, qwid_dict + + +def get_qval_qwid_dict(roi_mask, setup_pargs, geometry="saxs"): + """YG Dev April 6, 2019 + Get qval_dict and qwid_dict by giving roi_mask, setup_pargs + Input: + roi_mask: integer type 2D array + setup_pargs: dict, should at least contains, center (direct beam center), dpix (in mm), + lamda_: in A-1, Ldet: in mm + e.g., + {'Ldet': 1495.0, abs #essential + 'center': [-4469, 363], #essential + 'dpix': 0.075000003562308848, #essential + 'exposuretime': 0.99999702, + 'lambda_': 0.9686265, #essential + 'path': '/XF11ID/analysis/2018_1/jianheng/Results/b85dad/', + 'timeperframe': 1.0, + 'uid': 'uid=b85dad'} + geometry: support saxs for isotropic transmission SAXS + ang_saxs for anisotropic transmission SAXS + flow_saxs for anisotropic transmission SAXS under flow (center symetric) + + Return: + qval_dict: dict, key as q-number, val: q val + qwid_dict: dict, key as q-number, val: q width (qmax - qmin) + + TODOLIST: to make GiSAXS work + + """ + + origin = setup_pargs["center"] # [::-1] + shape = roi_mask.shape + qp_map = radial_grid(origin, shape) + phi_map = np.degrees(angle_grid(origin, shape)) + two_theta = radius_to_twotheta(setup_pargs["Ldet"], setup_pargs["dpix"] * qp_map) + q_map = utils.twotheta_to_q(two_theta, setup_pargs["lambda_"]) + qind, pixelist = roi.extract_label_indices(roi_mask) + Qval = np.unique(qind) + qval_dict_ = {} + qwid_dict_ = {} + for j, i in enumerate(Qval): + qval = q_map[roi_mask == i] + # print( qval ) + if geometry == "saxs": + qval_dict_[j] = [(qval.max() + qval.min()) / 2] # np.mean(qval) + qwid_dict_[j] = [(qval.max() - qval.min())] + + elif geometry == "ang_saxs": + aval = phi_map[roi_mask == i] + # print(j,i,qval, aval) + qval_dict_[j] = np.zeros(2) + qwid_dict_[j] = np.zeros(2) + + qval_dict_[j][0] = (qval.max() + qval.min()) / 2 # np.mean(qval) + qwid_dict_[j][0] = qval.max() - qval.min() + + if ((aval.max() * aval.min()) < 0) & (aval.max() > 90): + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 - 180 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min() - 360) + # print('here -- %s'%j) + else: + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min()) + + elif geometry == "flow_saxs": + sx, sy = roi_mask.shape + cx, cy = origin + aval = (phi_map[cx:])[roi_mask[cx:] == i] + if len(aval) == 0: + aval = (phi_map[:cx])[roi_mask[:cx] == i] + 180 + + qval_dict_[j] = np.zeros(2) + qwid_dict_[j] = np.zeros(2) + qval_dict_[j][0] = (qval.max() + qval.min()) / 2 # np.mean(qval) + qwid_dict_[j][0] = qval.max() - qval.min() + # print(aval) + if ((aval.max() * aval.min()) < 0) & (aval.max() > 90): + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 - 180 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min() - 360) + # print('here -- %s'%j) + else: + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min()) + + return qval_dict_, qwid_dict_ + + +def get_SG_norm(FD, pixelist, bins=1, mask=None, window_size=11, order=5): + """Get normalization of a time series by SavitzkyGolay filter + Input: + FD: file handler for a compressed data + pixelist: pixel list for a roi_mask + bins: the bin number for the time series, if number = total number of the time frame, + it means SG of the time averaged image + mask: the additional mask + window_size, order, for the control of SG filter, see chx_generic_functions.py/sgolay2d for details + Return: + norm: shape as ( length of FD, length of pixelist ) + """ + if mask == None: + mask = 1 + beg = FD.beg + end = FD.end + N = end - beg + BEG = beg + if bins == 1: + END = end + NB = N + MOD = 0 + else: + END = N // bins + MOD = N % bins + NB = END + norm = np.zeros([end, len(pixelist)]) + for i in tqdm(range(NB)): + if bins == 1: + img = FD.rdframe(i + BEG) + else: + for j in range(bins): + ct = i * bins + j + BEG + # print(ct) + if j == 0: + img = FD.rdframe(ct) + n = 1.0 + else: + (p, v) = FD.rdrawframe(ct) + np.ravel(img)[p] += v + # img += FD.rdframe( ct ) + n += 1 + img /= n + avg_imgf = sgolay2d(img, window_size=window_size, order=order) * mask + normi = np.ravel(avg_imgf)[pixelist] + if bins == 1: + norm[i + beg] = normi + else: + norm[i * bins + beg : (i + 1) * bins + beg] = normi + if MOD: + for j in range(MOD): + ct = (1 + i) * bins + j + BEG + if j == 0: + img = FD.rdframe(ct) + n = 1.0 + else: + (p, v) = FD.rdrawframe(ct) + np.ravel(img)[p] += v + n += 1 + img /= n + # print(ct,n) + img = FD.rdframe(ct) + avg_imgf = sgolay2d(img, window_size=window_size, order=order) * mask + normi = np.ravel(avg_imgf)[pixelist] + norm[(i + 1) * bins + beg : (i + 2) * bins + beg] = normi + return norm + + +def shift_mask(new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None): + """Y.G. Dev April 2019@CHX to make a new roi_mask by shift and crop the old roi_mask, which is much bigger than the new mask + Input: + new_cen: [x,y] in uint of pixel + new_mask: provide the shape of the new roi_mask and also multiply this mask to the shifted mask + old_cen: [x,y] in uint of pixel + old_roi_mask: the roi_mask to be shifted + limit_qnum: integer, if not None, defines the max number of unique values of nroi_mask + + Output: + the shifted/croped roi_mask + """ + nsx, nsy = new_mask.shape + down, up, left, right = new_cen[0], nsx - new_cen[0], new_cen[1], nsy - new_cen[1] + x1, x2, y1, y2 = [old_cen[0] - down, old_cen[0] + up, old_cen[1] - left, old_cen[1] + right] + nroi_mask_ = old_roi_mask[x1:x2, y1:y2] * new_mask + nroi_mask = np.zeros_like(nroi_mask_) + qind, pixelist = roi.extract_label_indices(nroi_mask_) + qu = np.unique(qind) + # noqs = len( qu ) + # nopr = np.bincount(qind, minlength=(noqs+1))[1:] + # qm = nopr>0 + for j, qv in enumerate(qu): + nroi_mask[nroi_mask_ == qv] = j + 1 + if limit_qnum != None: + nroi_mask[nroi_mask > limit_qnum] = 0 + return nroi_mask + + +def plot_q_g2fitpara_general( + g2_dict, + g2_fitpara, + geometry="saxs", + ylim=None, + plot_all_range=True, + plot_index_range=None, + show_text=True, + return_fig=False, + show_fit=True, + ylabel="g2", + qth_interest=None, + max_plotnum_fig=1600, + qphi_analysis=False, + *argv, + **kwargs, +): + """ + Mar 29,2019, Y.G.@CHX + + plot q~fit parameters + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + """ + + if "uid" in kwargs.keys(): + uid_ = kwargs["uid"] + else: + uid_ = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + data_dir = path + if ylabel == "g2": + ylabel = "g_2" + if ylabel == "g4": + ylabel = "g_4" + + if geometry == "saxs": + if qphi_analysis: + geometry = "ang_saxs" + + qval_dict_, fit_res_ = g2_dict, g2_fitpara + + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + fps = [] + + # print(qr_label, qz_label, short_ulabel, long_ulabel) + # $print( num_short, num_long ) + beta, relaxation_rate, baseline, alpha = ( + g2_fitpara["beta"], + g2_fitpara["relaxation_rate"], + g2_fitpara["baseline"], + g2_fitpara["alpha"], + ) + + fps = [] + for s_ind in range(num_short): + ind_long_i = ind_long[s_ind] + num_long_i = len(ind_long_i) + betai, relaxation_ratei, baselinei, alphai = ( + beta[ind_long_i], + relaxation_rate[ind_long_i], + baseline[ind_long_i], + alpha[ind_long_i], + ) + qi = long_ulabel + # print(s_ind, qi, np.array( betai) ) + + if RUN_GUI: + fig = Figure(figsize=(10, 12)) + else: + # fig = plt.figure( ) + if num_long_i <= 4: + if master_plot != "qz": + fig = plt.figure(figsize=(8, 6)) + else: + if num_short > 1: + fig = plt.figure(figsize=(8, 4)) + else: + fig = plt.figure(figsize=(10, 6)) + # print('Here') + elif num_long_i > max_plotnum_fig: + num_fig = int(np.ceil(num_long_i / max_plotnum_fig)) # num_long_i //16 + fig = [plt.figure(figsize=figsize) for i in range(num_fig)] + # print( figsize ) + else: + # print('Here') + if master_plot != "qz": + fig = plt.figure(figsize=figsize) + else: + fig = plt.figure(figsize=(10, 10)) + + if master_plot == "qz": + if geometry == "ang_saxs": + title_short = "Angle= %.2f" % (short_ulabel[s_ind]) + r"$^\circ$" + elif geometry == "gi_saxs": + title_short = r"$Q_z= $" + "%.4f" % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + else: + title_short = "" + else: # qr + if geometry == "ang_saxs" or geometry == "gi_saxs": + title_short = r"$Q_r= $" + "%.5f " % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + else: + title_short = "" + # print(geometry) + # filename ='' + til = "%s:--->%s" % (uid_, title_short) + if num_long_i <= 4: + plt.title(til, fontsize=14, y=1.15) + else: + plt.title(til, fontsize=20, y=1.06) + # print( num_long ) + if num_long != 1: + # print( 'here') + plt.axis("off") + # sy = min(num_long_i,4) + sy = min(num_long_i, int(np.ceil(min(max_plotnum_fig, num_long_i) / 4))) + + else: + sy = 1 + sx = min(4, int(np.ceil(min(max_plotnum_fig, num_long_i) / float(sy)))) + temp = sy + sy = sx + sx = temp + if sx == 1: + if sy == 1: + plt.axis("on") + ax1 = fig.add_subplot(4, 1, 1) + ax2 = fig.add_subplot(4, 1, 2) + ax3 = fig.add_subplot(4, 1, 3) + ax4 = fig.add_subplot(4, 1, 4) + plot1D(x=qi, y=betai, m="o", ls="--", c="k", ax=ax1, legend=r"$\beta$", title="") + plot1D(x=qi, y=alphai, m="o", ls="--", c="r", ax=ax2, legend=r"$\alpha$", title="") + plot1D(x=qi, y=baselinei, m="o", ls="--", c="g", ax=ax3, legend=r"$baseline$", title="") + plot1D(x=qi, y=relaxation_ratei, m="o", c="b", ls="--", ax=ax4, legend=r"$\gamma$ $(s^{-1})$", title="") + + ax4.set_ylabel(r"$\gamma$ $(s^{-1})$") + ax4.set_xlabel(r"$q $ $(\AA)$", fontsize=16) + ax3.set_ylabel(r"$baseline") + ax2.set_ylabel(r"$\alpha$") + ax1.set_ylabel(r"$\beta$") + fig.tight_layout() + fp = data_dir + uid_ + "g2_q_fit_para_%s.png" % short_ulabel[s_ind] + fig.savefig(fp, dpi=fig.dpi) + fps.append(fp) + outputfile = data_dir + "%s_g2_q_fitpara_plot" % uid_ + ".png" + # print(uid) + combine_images(fps, outputfile, outsize=[2000, 2400]) + + +def plot_q_rate_general( + qval_dict, + rate, + geometry="saxs", + ylim=None, + logq=True, + lograte=True, + plot_all_range=True, + plot_index_range=None, + show_text=True, + return_fig=False, + show_fit=True, + *argv, + **kwargs, +): + """ + Mar 29,2019, Y.G.@CHX + + plot q~rate in log-log scale + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + """ + + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + fig, ax = plt.subplots() + plt.title(r"$Q$" "-Rate-%s" % (uid), fontsize=20, y=1.06) + Nqz = num_short + if Nqz != 1: + ls = "--" + else: + ls = "" + # print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[i] + y = np.array(rate)[ind_long_i] + x = long_label[ind_long_i] + # print(i, x, y, D0 ) + if Nqz != 1: + label = r"$q_z=%.5f$" % short_ulabel[i] + else: + label = "" + ax.loglog(x, y, marker="o", ls=ls, label=label) + if Nqz != 1: + legend = ax.legend(loc="best") + + if plot_index_range != None: + d1, d2 = plot_index_range + d2 = min(len(x) - 1, d2) + ax.set_xlim((x**power)[d1], (x**power)[d2]) + ax.set_ylim(y[d1], y[d2]) + + if ylim != None: + ax.set_ylim(ylim) + + ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$) (log)") + ax.set_xlabel("$q$" r"($\AA$) (log)") + fp = path + "%s_Q_Rate_loglog" % (uid) + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + if return_fig: + return fig, ax + + +def plot_xy_x2( + x, + y, + x2=None, + pargs=None, + loglog=False, + logy=True, + fig_ax=None, + xlabel="q (" r"$\AA^{-1}$)", + xlabel2="q (pixel)", + title="_q_Iq", + ylabel="I(q)", + save=True, + *argv, + **kwargs, +): + """YG.@CHX 2019/10/ Plot x, y, x2, if have, will plot as twiny( same y, different x) + This funciton is primary for plot q-Iq + + Input: + x: one-d array, x in one unit + y: one-d array, + x2:one-d array, x in anoter unit + pargs: dict, could include 'uid', 'path' + loglog: if True, if plot x and y in log, by default plot in y-log + save: if True, save the plot in the path defined in pargs + kwargs: could include xlim (in unit of index), ylim (in unit of real value) + + """ + if fig_ax == None: + fig, ax1 = plt.subplots() + else: + fig, ax1 = fig_ax + if pargs != None: + uid = pargs["uid"] + path = pargs["path"] + else: + uid = "XXX" + path = "" + if loglog: + ax1.loglog(x, y, "-o") + elif logy: + ax1.semilogy(x, y, "-o") + else: + ax1.plot(x, y, "-o") + ax1.set_xlabel(xlabel) + ax1.set_ylabel(ylabel) + title = ax1.set_title("%s--" % uid + title) + Nx = len(x) + if "xlim" in kwargs.keys(): + xlim = kwargs["xlim"] + if xlim[1] > Nx: + xlim[1] = Nx - 1 + else: + xlim = [0, Nx] + if "ylim" in kwargs.keys(): + ylim = kwargs["ylim"] + else: + ylim = [y.min(), y.max()] + lx1, lx2 = xlim + ax1.set_xlim([x[lx1], x[lx2]]) + ax1.set_ylim(ylim) + if x2 != None: + ax2 = ax1.twiny() + ax2.set_xlabel(xlabel2) + ax2.set_ylabel(ylabel) + ax2.set_xlim([x2[lx1], x2[lx2]]) + title.set_y(1.1) + fig.subplots_adjust(top=0.85) + if save: + path = pargs["path"] + fp = path + "%s_q_Iq" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + +def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1, threshold=0): + """save oavs as png""" + tifs = list(db[uid].data("OAV_image"))[0] + try: + pixel_scalebar = np.ceil(scalebar_size / md["OAV resolution um_pixel"]) + except: + pixel_scalebar = None + print("No OAVS resolution is available.") + + text_string = "%s $\mu$m" % scalebar_size + h = db[uid] + oavs = tifs + + # 12/03/2023: have a problem with OAV not being detector [0]...just try and go throught the list + detectors = sorted(get_detectors(h)) + for d in range(len(detectors)): + try: + oav_period = h["descriptors"][d]["configuration"]["OAV"]["data"]["OAV_cam_acquire_period"] + oav_expt = h["descriptors"][d]["configuration"]["OAV"]["data"]["OAV_cam_acquire_time"] + except: + pass + oav_times = [] + for i in range(len(oavs)): + oav_times.append(oav_expt + i * oav_period) + fig = plt.subplots(int(np.ceil(len(oavs) / 3)), 3, figsize=(3 * 5.08, int(np.ceil(len(oavs) / 3)) * 4)) + for m in range(len(oavs)): + plt.subplot(int(np.ceil(len(oavs) / 3)), 3, m + 1) + # plt.subplots(figsize=(5.2,4)) + img = oavs[m] + try: + ind = np.flipud(img * scale)[:, :, 2] < threshold + except: + ind = np.flipud(img * scale) < threshold + rgb_cont_img = np.copy(np.flipud(img)) + # rgb_cont_img[ind,0]=1000 + if brightness_scale != 1: + rgb_cont_img = scale_rgb(rgb_cont_img, scale=brightness_scale) + + plt.imshow(rgb_cont_img, interpolation="none", resample=True, cmap="gray") + plt.axis("equal") + cross = [685, 440, 50] # definintion of direct beam: x, y, size + plt.plot([cross[0] - cross[2] / 2, cross[0] + cross[2] / 2], [cross[1], cross[1]], "r-") + plt.plot([cross[0], cross[0]], [cross[1] - cross[2] / 2, cross[1] + cross[2] / 2], "r-") + if pixel_scalebar != None: + plt.plot([1100, 1100 + pixel_scalebar], [150, 150], "r-", Linewidth=5) # scale bar. + plt.text(1000, 50, text_string, fontsize=14, color="r") + plt.text(600, 50, str(oav_times[m])[:5] + " [s]", fontsize=14, color="r") + plt.axis("off") + plt.savefig(data_dir + "uid=%s_OVA_images.png" % uid) + + +def shift_mask_old(mask, shiftx, shifty): + """YG Dev Feb 4@CHX create new mask by shift mask in x and y direction with unit in pixel + Input: + mask: int-type array, + shiftx: int scalar, shift value in x direction with unit in pixel + shifty: int scalar, shift value in y direction with unit in pixel + Output: + maskn: int-type array, shifted mask + + """ + qind, pixelist = roi.extract_label_indices(mask) + dims = mask.shape + imgwidthy = dims[1] # dimension in y, but in plot being x + imgwidthx = dims[0] # dimension in x, but in plot being y + pixely = pixelist % imgwidthy + pixelx = pixelist // imgwidthy + pixelyn = pixely + shiftx + pixelxn = pixelx + shifty + w = (pixelyn < imgwidthy) & (pixelyn >= 0) & (pixelxn < imgwidthx) & (pixelxn >= 0) + pixelist_new = pixelxn[w] * imgwidthy + pixelyn[w] + maskn = np.zeros_like(mask) + maskn.ravel()[pixelist_new] = qind[w] + return maskn + + +def get_current_time(): + """get current time in a fomart of year/month/date/hour(24)/min/sec/, + e.g. 2009-01-05 22:14:39 + """ + loc_dt = datetime.now(pytz.timezone("US/Eastern")) + fmt = "%Y-%m-%d %H:%M:%S" + return loc_dt.strftime(fmt) + + +def evalue_array(array, verbose=True): + """Y.G., Dev Nov 1, 2018 Get min, max, avg, std of an array""" + _min, _max, avg, std = np.min(array), np.max(array), np.average(array), np.std(array) + if verbose: + print( + "The min, max, avg, std of this array are: %s %s %s %s, respectively." % (_min, _max, avg, std) + ) + return _min, _max, avg, std + + +def find_good_xpcs_uids(fuids, Nlim=100, det=["4m", "1m", "500"]): + """Y.G., Dev Nov 1, 2018 Find the good xpcs series + Input: + fuids: list, a list of full uids + Nlim: integer, the smallest number of images to be considered as XCPS sereis + det: list, a list of detector (can be short string of the full name of the detector) + Return: + the xpcs uids list + + """ + guids = [] + for i, uid in enumerate(fuids): + if db[uid]["start"]["plan_name"] == "count" or db[uid]["start"]["plan_name"] == "manual_count": + head = db[uid]["start"] + for dec in head["detectors"]: + for dt in det: + if dt in dec: + if "number of images" in head: + if float(head["number of images"]) >= Nlim: + # print(i, uid) + guids.append(uid) + G = np.unique(guids) + print("Found %s uids for XPCS series." % len(G)) + return G + + +def create_fullImg_with_box( + shape, + box_nx=9, + box_ny=8, +): + """Y.G. 2018/10/26 Divide image with multi touched boxes + Input + shape: the shape of image + box_nx: the number of box in x + box_ny: the number width of box in y + Return: + roi_mask, (* mask ) + """ + + # shape = mask.shape + Wrow, Wcol = int(np.ceil(shape[0] / box_nx)), int(np.ceil(shape[1] / box_ny)) + # print(Wrow, Wcol) + roi_mask = np.zeros(shape, dtype=np.int32) + for i in range(box_nx): + for j in range(box_ny): + roi_mask[i * Wrow : (i + 1) * Wrow, j * Wcol : (j + 1) * Wcol] = i * box_ny + j + 1 + # roi_mask *= mask + return roi_mask + + +def get_refl_y0( + inc_ang, + inc_y0, + Ldet, + pixel_size, +): + """Get reflection beam center y + Input: + inc_ang: incident angle in degree + inc_y0: incident beam y center in pixel + Ldet: sample to detector distance in meter + pixel_size: pixel size in meter + Return: reflection beam center y in pixel + """ + return Ldet * np.tan(np.radians(inc_ang)) * 2 / pixel_size + inc_y0 + + +def lin2log_g2(lin_tau, lin_g2, num_points=False): + """ + Lutz developed at Aug,2018 + function to resample g2 with linear time steps into logarithmics + g2 values between consecutive logarthmic time steps are averaged to increase statistics + calling sequence: lin2log_g2(lin_tau,lin_g2,num_points=False) + num_points=False -> determine number of logortihmically sampled time points automatically (8 pts./decade) + num_points=18 -> use 18 logarithmically spaced time points + """ + # prep taus and g2s: remove nan and first data point at tau=0 + rem = lin_tau == 0 + # print('lin_tau: '+str(lin_tau.size)) + # print('lin_g2: '+str(lin_g2.size)) + lin_tau[rem] = np.nan + # lin_tau[0]=np.nan;#lin_g2[0]=np.nan + lin_g2 = lin_g2[np.isfinite(lin_tau)] + lin_tau = lin_tau[np.isfinite(lin_tau)] + # print('from lin-to-log-g2_sampling: ',lin_tau) + if num_points == False: + # automatically decide how many log-points (8/decade) + dec = int(np.ceil((np.log10(lin_tau.max()) - np.log10(lin_tau.min())) * 8)) + else: + dec = int(num_points) + log_tau = np.logspace(np.log10(lin_tau[0]), np.log10(lin_tau.max()), dec) + # re-sample correlation function: + log_g2 = [] + for i in range(log_tau.size - 1): + y = [i, log_tau[i] - (log_tau[i + 1] - log_tau[i]) / 2, log_tau[i] + (log_tau[i + 1] - log_tau[i]) / 2] + # x=lin_tau[lin_tau>y[1]] + x1 = lin_tau > y[1] + x2 = lin_tau < y[2] + x = x1 * x2 + # print(np.average(lin_g2[x])) + if np.isfinite(np.average(lin_g2[x])): + log_g2.append(np.average(lin_g2[x])) + else: + log_g2.append(np.interp(log_tau[i], lin_tau, lin_g2)) + if i == log_tau.size - 2: + # print(log_tau[i+1]) + y = [i + 1, log_tau[i + 1] - (log_tau[i + 1] - log_tau[i]) / 2, log_tau[i + 1]] + x1 = lin_tau > y[1] + x2 = lin_tau < y[2] + x = x1 * x2 + log_g2.append(np.average(lin_g2[x])) + return [log_tau, log_g2] + + +def get_eigerImage_per_file(data_fullpath): + f = h5py.File(data_fullpath) + dset_keys = list(f["/entry/data"].keys()) + dset_keys.sort() + dset_root = "/entry/data" + dset_keys = [dset_root + "/" + dset_key for dset_key in dset_keys] + dset = f[dset_keys[0]] + return len(dset) + + +def copy_data(old_path, new_path="/tmp_data/data/"): + """YG Dev July@CHX + Copy Eiger file containing master and data files to a new path + old_path: the full path of the Eiger master file + new_path: the new path + + """ + import glob + import shutil + + # old_path = sud[2][0] + # new_path = '/tmp_data/data/' + fps = glob.glob(old_path[:-10] + "*") + for fp in tqdm(fps): + if not os.path.exists(new_path + os.path.basename(fp)): + shutil.copy(fp, new_path) + print("The files %s are copied: %s." % (old_path[:-10] + "*", new_path + os.path.basename(fp))) + + +def delete_data(old_path, new_path="/tmp_data/data/"): + """YG Dev July@CHX + Delete copied Eiger file containing master and data in a new path + old_path: the full path of the Eiger master file + new_path: the new path + """ + import glob + import shutil + + # old_path = sud[2][0] + # new_path = '/tmp_data/data/' + fps = glob.glob(old_path[:-10] + "*") + for fp in tqdm(fps): + nfp = new_path + os.path.basename(fp) + if os.path.exists(nfp): + os.remove(nfp) + + +def show_tif_series( + tif_series, Nx=None, center=None, w=50, vmin=None, vmax=None, cmap=cmap_vge_hdr, logs=False, figsize=[10, 16] +): + """ + tif_series: list of 2D tiff images + Nx: the number in the row for dispalying + center: the center of iamge (or direct beam pixel) + w: the ROI half size in pixel + vmin: the min intensity value for plot + vmax: if None, will be max intensity value of the ROI + figsize: size of the plot (in inch) + + """ + + if center != None: + cy, cx = center + # infs = sorted(sample_list) + N = len(tif_series) + if Nx == None: + sy = int(np.sqrt(N)) + else: + sy = Nx + sx = int(np.ceil(N / sy)) + fig = plt.figure(figsize=figsize) + for i in range(N): + # print(i) + ax = fig.add_subplot(sx, sy, i + 1) + # d = (np.array( PIL.Image.open( infs[i] ).convert('I') ))[ cy-w:cy+w, cx-w:cx+w ] + d = tif_series[i][::-1] + # vmax= np.max(d) + # pritn(vmax) + # vmin= 10#np.min(d) + show_img( + d, + logs=logs, + show_colorbar=False, + show_ticks=False, + ax=[fig, ax], + image_name="%02d" % (i + 1), + cmap=cmap, + vmin=vmin, + vmax=vmax, + aspect=1, + save=False, + path=None, + ) + return fig, ax + + +from scipy.special import erf + + +def ps(y, shift=0.5, replot=True, logplot="off", x=None): + """ + Dev 16, 2018 + Modified ps() function in 95-utilities.py + function to determine statistic on line profile (assumes either peak or erf-profile) + Input: + y: 1D array, the data for analysis + shift: scale for peak presence (0.5 -> peak has to be taller factor 2 above background) + replot: if True, will plot data (if error func) with the fit and peak/cen/com position + logplot: if on, will plot in log scale + x: if not None, give x-data + + + """ + if x == None: + x = np.arange(len(y)) + x = np.array(x) + y = np.array(y) + + PEAK = x[np.argmax(y)] + PEAK_y = np.max(y) + COM = np.sum(x * y) / np.sum(y) + + ### from Maksim: assume this is a peak profile: + def is_positive(num): + return True if num > 0 else False + + # Normalize values first: + ym = (y - np.min(y)) / (np.max(y) - np.min(y)) - shift # roots are at Y=0 + positive = is_positive(ym[0]) + list_of_roots = [] + for i in range(len(y)): + current_positive = is_positive(ym[i]) + if current_positive != positive: + list_of_roots.append(x[i - 1] + (x[i] - x[i - 1]) / (abs(ym[i]) + abs(ym[i - 1])) * abs(ym[i - 1])) + positive = not positive + if len(list_of_roots) >= 2: + FWHM = abs(list_of_roots[-1] - list_of_roots[0]) + CEN = list_of_roots[0] + 0.5 * (list_of_roots[1] - list_of_roots[0]) + ps.fwhm = FWHM + ps.cen = CEN + yf = ym + # return { + # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), + # 'x_range': list_of_roots, + # } + else: # ok, maybe it's a step function.. + # print('no peak...trying step function...') + ym = ym + shift + + def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang + return base - A * erf(k * (x - x0)) + + mod = Model(err_func) + ### estimate starting values: + x0 = np.mean(x) + # k=0.1*(np.max(x)-np.min(x)) + pars = mod.make_params(x0=x0, k=2, A=1.0, base=0.0) + result = mod.fit(ym, pars, x=x) + CEN = result.best_values["x0"] + FWHM = result.best_values["k"] + A = result.best_values["A"] + b = result.best_values["base"] + yf_ = err_func(x, CEN, k=FWHM, A=A, base=b) # result.best_fit + yf = (yf_) * (np.max(y) - np.min(y)) + np.min(y) + + # (y - np.min(y)) / (np.max(y) - np.min(y)) - shift + + ps.cen = CEN + ps.fwhm = FWHM + + if replot: + ### re-plot results: + if logplot == "on": + fig, ax = plt.subplots() # plt.figure() + ax.semilogy([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") + ax.hold(True) + ax.semilogy([CEN, CEN], [np.min(y), np.max(y)], "r-.", label="CEN") + ax.semilogy([COM, COM], [np.min(y), np.max(y)], "g.-.", label="COM") + ax.semilogy(x, y, "bo-") + # plt.xlabel(field);plt.ylabel(intensity_field) + ax.legend() + # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + # plt.show() + else: + # plt.close(999) + fig, ax = plt.subplots() # plt.figure() + ax.plot([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") + + # ax.hold(True) + ax.plot([CEN, CEN], [np.min(y), np.max(y)], "m-.", label="CEN") + ax.plot([COM, COM], [np.min(y), np.max(y)], "g.-.", label="COM") + ax.plot(x, y, "bo--") + ax.plot(x, yf, "r-", label="Fit") + + # plt.xlabel(field);plt.ylabel(intensity_field) + ax.legend() + # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + # plt.show() + + ### assign values of interest as function attributes: + ps.peak = PEAK + ps.com = COM + return ps.cen + + +def create_seg_ring(ring_edges, ang_edges, mask, setup_pargs): + """YG Dev April 6, 2018 + Create segment ring mask + Input: + ring_edges: edges of rings (in pixel), e.g., [ [320,340], [450, 460], ] + ang_edges: edges of angles, e.g., [ [20,40], [50, 60], ] + mask: bool type 2D array + set_pargs: dict, should at least contains, center + e.g., + {'Ldet': 1495.0, abs #essential + 'center': [-4469, 363], #essential + 'dpix': 0.075000003562308848, #essential + 'exposuretime': 0.99999702, + 'lambda_': 0.9686265, #essential + 'path': '/XF11ID/analysis/2018_1/jianheng/Results/b85dad/', + 'timeperframe': 1.0, + 'uid': 'uid=b85dad'} + Return: + roi_mask: segmented ring mask: two-D array + qval_dict: dict, key as q-number, val: q val + + """ + + roi_mask_qr, qr, qr_edge = get_ring_mask( + mask, + inner_radius=None, + outer_radius=None, + width=None, + num_rings=None, + edges=np.array(ring_edges), + unit="pixel", + pargs=setup_pargs, + ) + + roi_mask_ang, ang_center, ang_edge = get_angular_mask( + mask, + inner_angle=None, + outer_angle=None, + width=None, + edges=np.array(ang_edges), + num_angles=None, + center=center, + flow_geometry=False, + ) + + roi_mask, good_ind = combine_two_roi_mask(roi_mask_qr, roi_mask_ang, pixel_num_thres=100) + qval_dict_ = get_qval_dict(qr_center=qr, qz_center=ang_center, one_qz_multi_qr=False) + qval_dict = {i: qval_dict_[k] for (i, k) in enumerate(good_ind)} + return roi_mask, qval_dict + + +def find_bad_pixels_FD(bad_frame_list, FD, img_shape=[514, 1030], threshold=15, show_progress=True): + """Designed to find bad pixel list in 500K + threshold: the max intensity in 5K + """ + bad = np.zeros(img_shape, dtype=bool) + if show_progress: + for i in tqdm(bad_frame_list[bad_frame_list >= FD.beg]): + p, v = FD.rdrawframe(i) + w = np.where(v > threshold)[0] + bad.ravel()[p[w]] = 1 + # x,y = np.where( imgsa[i] > threshold) + # bad[x[0],y[0]] = 1 + else: + for i in bad_frame_list[bad_frame_list >= FD.beg]: + p, v = FD.rdrawframe(i) + w = np.where(v > threshold)[0] + bad.ravel()[p[w]] = 1 + + return ~bad + + +def get_q_iq_using_dynamic_mask(FD, mask, setup_pargs, bin_number=1, threshold=15): + """DEV by Yugang@CHX, June 6, 2019 + Get circular average of a time series using a dynamics mask, which pixel values are defined as + zeors if above a threshold. + Return an averaged q(pix)-Iq-q(A-1) of the whole time series using bin frames with bin_number + Input: + FD: the multifile handler for the time series + mask: a two-d bool type array + setup_pargs: dict, parameters of setup for calculate q-Iq + should have keys as + 'dpix', 'Ldet','lambda_', 'center' + bin_number: bin number of the frame + threshold: define the dynamics mask, which pixel values are defined as + zeors if above this threshold + Output: + qp_saxs: q in pixel + iq_saxs: intenstity + q_saxs: q in A-1 + """ + beg = FD.beg + end = FD.end + shape = FD.rdframe(beg).shape + Nimg_ = FD.end - FD.beg + # Nimg_ = 100 + Nimg = Nimg_ // bin_number + time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bin_number)) + beg + for n in tqdm(range(Nimg)): + t1, t2 = time_edge[n] + # print(t1,t2) + if bin_number == 1: + avg_imgi = FD.rdframe(t1) + else: + avg_imgi = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False, show_progress=False) + badpi = find_bad_pixels_FD( + np.arange(t1, t2), FD, img_shape=avg_imgi.shape, threshold=threshold, show_progress=False + ) + img = avg_imgi * mask * badpi + qp_saxsi, iq_saxsi, q_saxsi = get_circular_average(img, mask * badpi, save=False, pargs=setup_pargs) + # print( img.max()) + if t1 == FD.beg: + qp_saxs, iq_saxs, q_saxs = np.zeros_like(qp_saxsi), np.zeros_like(iq_saxsi), np.zeros_like(q_saxsi) + qp_saxs += qp_saxsi + iq_saxs += iq_saxsi + q_saxs += q_saxsi + qp_saxs /= Nimg + iq_saxs /= Nimg + q_saxs /= Nimg + + return qp_saxs, iq_saxs, q_saxs + + +def get_waxs_beam_center(gamma, origin=[432, 363], Ldet=1495, pixel_size=75 * 1e-3): + """YG Feb 10, 2018 + Calculate beam center for WAXS geometry by giving beam center at gamma=0 and the target gamma + Input: + gamma: angle in degree + Ldet: sample to detector distance, 1495 mm for CHX WAXS + origin: beam center for gamma = 0, (python x,y coordinate in pixel) + pxiel size: 75 * 1e-3 mm for Eiger 1M + output: + beam center: for the target gamma, in pixel + """ + return [int(origin[0] + np.tan(np.radians(gamma)) * Ldet / pixel_size), origin[1]] + + +def get_img_from_iq(qp, iq, img_shape, center): + """YG Jan 24, 2018 + Get image from circular average + Input: + qp: q in pixel unit + iq: circular average + image_shape, e.g., [256,256] + center: [center_y, center_x] e.g., [120, 200] + Output: + img: recovered image + """ + pixelist = np.arange(img_shape[0] * img_shape[1]) + pixely = pixelist % img_shape[1] - center[1] + pixelx = pixelist // img_shape[1] - center[0] + r = np.hypot(pixelx, pixely) # leave as float. + # r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 + return (np.interp(r, qp, iq)).reshape(img_shape) + + +def average_array_withNan(array, axis=0, mask=None): + """YG. Jan 23, 2018 + Average array invovling np.nan along axis + + Input: + array: ND array, actually should be oneD or twoD at this stage..TODOLIST for ND + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + avg: averaged array along axis + """ + shape = array.shape + if mask == None: + mask = np.isnan(array) + # mask = np.ma.masked_invalid(array).mask + array_ = np.ma.masked_array(array, mask=mask) + try: + sums = np.array(np.ma.sum(array_[:, :], axis=axis)) + except: + sums = np.array(np.ma.sum(array_[:], axis=axis)) + + cts = np.sum(~mask, axis=axis) + # print(cts) + return sums / cts + + +def deviation_array_withNan(array, axis=0, mask=None): + """YG. Jan 23, 2018 + Get the deviation of array invovling np.nan along axis + + Input: + array: ND array + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + dev: the deviation of array along axis + """ + avg2 = average_array_withNan(array**2, axis=axis, mask=mask) + avg = average_array_withNan(array, axis=axis, mask=mask) + return np.sqrt(avg2 - avg**2) + + +def refine_roi_mask(roi_mask, pixel_num_thres=10): + """YG Dev Jan20,2018 + remove bad roi which pixel numbe is lower pixel_num_thres + roi_mask: array, + pixel_num_thres: integer, the low limit pixel number in each roi of the combined mask, + i.e., if the pixel number in one roi of the combined mask smaller than pixel_num_thres, + that roi will be considered as bad one and be removed. + """ + new_mask = np.zeros_like(roi_mask) + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + good_ind = np.where(nopr >= pixel_num_thres)[0] + 1 + l = len(good_ind) + new_ind = np.arange(1, l + 1) + for i, gi in enumerate(good_ind): + new_mask.ravel()[np.where(roi_mask.ravel() == gi)[0]] = new_ind[i] + return new_mask, good_ind - 1 + + +def shrink_image_stack(imgs, bins): + """shrink imgs by bins + imgs: shape as [Nimg, imx, imy]""" + Nimg, imx, imy = imgs.shape + bx, by = bins + imgsk = np.zeros([Nimg, imx // bx, imy // by]) + N = len(imgs) + for i in range(N): + imgsk[i] = shrink_image(imgs[i], bins) + return imgsk + + +def shrink_image(img, bins): + """YG Dec 12, 2017 dev@CHX shrink a two-d image by factor as bins, i.e., bins_x, bins_y + input: + img: 2d array, + bins: integer list, eg. [2,2] + output: + imgb: binned img + """ + m, n = img.shape + bx, by = bins + Nx, Ny = m // bx, n // by + # print(Nx*bx, Ny*by) + return img[: Nx * bx, : Ny * by].reshape(Nx, bx, Ny, by).mean(axis=(1, 3)) + + +def get_diff_fv(g2_fit_paras, qval_dict, ang_init=137.2): + """YG@CHX Nov 9,2017 + Get flow velocity and diff from g2_fit_paras""" + g2_fit_para_ = g2_fit_paras.copy() + qr = np.array([qval_dict[k][0] for k in sorted(qval_dict.keys())]) + qang = np.array([qval_dict[k][1] for k in sorted(qval_dict.keys())]) + # x=g2_fit_para_.pop( 'relaxation_rate' ) + # x=g2_fit_para_.pop( 'flow_velocity' ) + g2_fit_para_["diff"] = g2_fit_paras["relaxation_rate"] / qr**2 + cos_part = np.abs(np.cos(np.radians(qang - ang_init))) + g2_fit_para_["fv"] = g2_fit_paras["flow_velocity"] / cos_part / qr + return g2_fit_para_ + + +# function to get indices of local extrema (=indices of speckle echo maximum amplitudes): +def get_echos(dat_arr, min_distance=10): + """ + getting local maxima and minima from 1D data -> e.g. speckle echos + strategy: using peak_local_max (from skimage) with min_distance parameter to find well defined local maxima + using np.argmin to find absolute minima between relative maxima + returns [max_ind,min_ind] -> lists of indices corresponding to local maxima/minima + by LW 10/23/2018 + """ + from skimage.feature import peak_local_max + + max_ind = peak_local_max(dat_arr, min_distance) # !!! careful, skimage function reverses the order (wtf?) + min_ind = [] + for i in range(len(max_ind[:-1])): + min_ind.append(max_ind[i + 1][0] + np.argmin(dat_arr[max_ind[i + 1][0] : max_ind[i][0]])) + # unfortunately, skimage function fu$$s up the format: max_ind is an array of a list of lists...fix this: + mmax_ind = [] + for l in max_ind: + mmax_ind.append(l[0]) + # return [mmax_ind,min_ind] + return [list(reversed(mmax_ind)), list(reversed(min_ind))] + + +def pad_length(arr, pad_val=np.nan): + """ + arr: 2D matrix + pad_val: values being padded + adds pad_val to each row, to make the length of each row equal to the lenght of the longest row of the original matrix + -> used to convert python generic data object to HDF5 native format + function fixes python bug in padding (np.pad) integer array with np.nan + update June 2023: remove use of np.shape and np.size that doesn't work (anymore?) on arrays with inhomogenous size + by LW 12/30/2017 + """ + max_len = [] + for i in range(len(arr)): + max_len.append([len(arr[i])]) + max_len = np.max(max_len) + for l in range(len(arr)): + arr[l] = np.pad(arr[l] * 1.0, (0, max_len - np.size(arr[l])), mode="constant", constant_values=pad_val) + return arr + + +def save_array_to_tiff(array, output, verbose=True): + """Y.G. Nov 1, 2017 + Save array to a tif file + """ + img = PIL.Image.fromarray(array) + img.save(output) + if verbose: + print("The data is save to: %s." % (output)) + + +def load_pilatus(filename): + """Y.G. Nov 1, 2017 + Load a pilatus 2D image + """ + return np.array(PIL.Image.open(filename).convert("I")) + + +def ls_dir(inDir, have_list=[], exclude_list=[]): + """Y.G. Aug 1, 2019 + List all filenames in a filefolder + inDir: fullpath of the inDir + have_string: only retrun filename containing the string + exclude_string: only retrun filename not containing the string + + """ + from os import listdir + from os.path import isfile, join + + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + tifs_ = [] + for tif in tifs: + flag = 1 + for string in have_list: + if string not in tif: + flag *= 0 + for string in exclude_list: + if string in tif: + flag *= 0 + if flag: + tifs_.append(tif) + + return np.array(tifs_) + + +def ls_dir2(inDir, string=None): + """Y.G. Nov 1, 2017 + List all filenames in a filefolder (not include hidden files and subfolders) + inDir: fullpath of the inDir + string: if not None, only retrun filename containing the string + """ + from os import listdir + from os.path import isfile, join + + if string == None: + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + else: + tifs = np.array([f for f in listdir(inDir) if (isfile(join(inDir, f))) & (string in f)]) + return tifs + + +def re_filename(old_filename, new_filename, inDir=None, verbose=True): + """Y.G. Nov 28, 2017 + Rename old_filename with new_filename in a inDir + inDir: fullpath of the inDir, if None, the filename should have the fullpath + old_filename/ new_filename: string + an example: + re_filename( 'uid=run20_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', + 'uid=run17_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', + '/home/yuzhang/Analysis/Timepix/2017_3/Results/run17/run17_pos1/' + ) + """ + if inDir != None: + os.rename(inDir + old_filename, inDir + new_filename) + else: + os.rename(old_filename, new_filename) + print("The file: %s is changed to: %s." % (old_filename, new_filename)) + + +def re_filename_dir(old_pattern, new_pattern, inDir, verbose=True): + """Y.G. Nov 28, 2017 + Rename all filenames with old_pattern with new_pattern in a inDir + inDir: fullpath of the inDir, if None, the filename should have the fullpath + old_pattern, new_pattern + an example, + re_filename_dir('20_', '17_', inDir ) + """ + fps = ls_dir(inDir) + for fp in fps: + if old_pattern in fp: + old_filename = fp + new_filename = fp.replace(old_pattern, new_pattern) + re_filename(old_filename, new_filename, inDir, verbose=verbose) + + +def get_roi_nr(qdict, q, phi, q_nr=True, phi_nr=False, q_thresh=0, p_thresh=0, silent=True, qprecision=5): + """ + function to return roi number from qval_dict, corresponding Q and phi, lists (sets) of all available Qs and phis + [roi_nr,Q,phi,Q_list,phi_list]=get_roi_nr(..) + calling sequence: get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False, verbose=True) + qdict: qval_dict from analysis pipeline/hdf5 result file + q: q of interest, can be either value (q_nr=False) or q-number (q_nr=True) + q_thresh: threshold for comparing Q-values, set to 0 for exact comparison + phi: phi of interest, can be either value (phi_nr=False) or q-number (phi_nr=True) + p_thresh: threshold for comparing phi values, set to 0 for exact comparison + silent=True/False: Don't/Do print lists of available qs and phis, q and phi of interest + by LW 10/21/2017 + update by LW 08/22/2018: introduced thresholds for comparison of Q and phi values (before: exact match required) + update 2019/09/28 add qprecision to get unique Q + update 2020/3/12 explicitly order input dictionary to fix problem with environments >= 2019-3.0.1 + """ + import collections + from collections import OrderedDict + + qdict = collections.OrderedDict(sorted(qdict.items())) + qs = [] + phis = [] + for i in qdict.keys(): + qs.append(qdict[i][0]) + phis.append(qdict[i][1]) + qslist = list(OrderedDict.fromkeys(qs)) + qslist = np.unique(np.round(qslist, qprecision)) + phislist = list(OrderedDict.fromkeys(phis)) + qslist = list(np.sort(qslist)) + phislist = list(np.sort(phislist)) + if q_nr: + qinterest = qslist[q] + qindices = [i for i, x in enumerate(qs) if np.abs(x - qinterest) < q_thresh] + else: + qinterest = q + qindices = [i for i, x in enumerate(qs) if np.abs(x - qinterest) < q_thresh] # new + if phi_nr: + phiinterest = phislist[phi] + phiindices = [i for i, x in enumerate(phis) if x == phiinterest] + else: + phiinterest = phi + phiindices = [i for i, x in enumerate(phis) if np.abs(x - phiinterest) < p_thresh] # new + ret_list = [ + list(set(qindices).intersection(phiindices))[0], + qinterest, + phiinterest, + qslist, + phislist, + ] # -> this is the original + if silent == False: + print("list of available Qs:") + print(qslist) + print("list of available phis:") + print(phislist) + print("Roi number for Q= " + str(ret_list[1]) + " and phi= " + str(ret_list[2]) + ": " + str(ret_list[0])) + return ret_list + + +def get_fit_by_two_linear( + x, + y, + mid_xpoint1, + mid_xpoint2=None, + xrange=None, +): + """YG Octo 16,2017 Fit a curve with two linear func, the curve is splitted by mid_xpoint, + namely, fit the curve in two regions defined by (xmin,mid_xpoint ) and (mid_xpoint2, xmax) + Input: + x: 1D np.array + y: 1D np.array + mid_xpoint: float, the middle point of x + xrange: [x1,x2] + Return: + D1, gmfit1, D2, gmfit2 : + fit parameter (slope, background) of linear fit1 + convinent fit class, gmfit1(x) gives yvale + fit parameter (slope, background) of linear fit2 + convinent fit class, gmfit2(x) gives yvale + + """ + if xrange == None: + x1, x2 = min(x), max(x) + x1, x2 = xrange + if mid_xpoint2 == None: + mid_xpoint2 = mid_xpoint1 + D1, gmfit1 = linear_fit(x, y, xrange=[x1, mid_xpoint1]) + D2, gmfit2 = linear_fit(x, y, xrange=[mid_xpoint2, x2]) + return D1, gmfit1, D2, gmfit2 + + +def get_cross_point(x, gmfit1, gmfit2): + """YG Octo 16,2017 + Get croess point of two curve + """ + y1 = gmfit1(x) + y2 = gmfit2(x) + return x[np.argmin(np.abs(y1 - y2))] + + +def get_curve_turning_points( + x, + y, + mid_xpoint1, + mid_xpoint2=None, + xrange=None, +): + """YG Octo 16,2017 + Get a turning point of a curve by doing a two-linear fit + """ + D1, gmfit1, D2, gmfit2 = get_fit_by_two_linear(x, y, mid_xpoint1, mid_xpoint2, xrange) + return get_cross_point(x, gmfit1, gmfit2) + + +def plot_fit_two_linear_fit(x, y, gmfit1, gmfit2, ax=None): + """YG Octo 16,2017 Plot data with two fitted linear func""" + if ax == None: + fig, ax = plt.subplots() + plot1D(x=x, y=y, ax=ax, c="k", legend="data", m="o", ls="") # logx=True, logy=True ) + plot1D(x=x, y=gmfit1(x), ax=ax, c="r", m="", ls="-", legend="fit1") + plot1D(x=x, y=gmfit2(x), ax=ax, c="b", m="", ls="-", legend="fit2") + return ax + + +def linear_fit(x, y, xrange=None): + """YG Octo 16,2017 copied from XPCS_SAXS + a linear fit + """ + if xrange != None: + xmin, xmax = xrange + x1, x2 = find_index(x, xmin, tolerance=None), find_index(x, xmax, tolerance=None) + x_ = x[x1:x2] + y_ = y[x1:x2] + else: + x_ = x + y_ = y + D0 = np.polyfit(x_, y_, 1) + gmfit = np.poly1d(D0) + return D0, gmfit + + +def find_index(x, x0, tolerance=None): + """YG Octo 16,2017 copied from SAXS + find index of x0 in x + #find the position of P in a list (plist) with tolerance + """ + + N = len(x) + i = 0 + if x0 > max(x): + position = len(x) - 1 + elif x0 < min(x): + position = 0 + else: + position = np.argmin(np.abs(x - x0)) + return position + + +def find_index_old(x, x0, tolerance=None): + """YG Octo 16,2017 copied from SAXS + find index of x0 in x + #find the position of P in a list (plist) with tolerance + """ + + N = len(x) + i = 0 + position = None + if tolerance == None: + tolerance = (x[1] - x[0]) / 2.0 + if x0 > max(x): + position = len(x) - 1 + elif x0 < min(x): + position = 0 + else: + for item in x: + if abs(item - x0) <= tolerance: + position = i + # print 'Found Index!!!' + break + i += 1 + + return position + + +def sgolay2d(z, window_size, order, derivative=None): + """YG Octo 16, 2017 + Modified from http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html + Procedure for sg2D: + https://en.wikipedia.org/wiki/Savitzky%E2%80%93Golay_filter#Two-dimensional_convolution_coefficients + + Two-dimensional smoothing and differentiation can also be applied to tables of data values, such as intensity + values in a photographic image which is composed of a rectangular grid of pixels.[16] [17] The trick is to transform + part of the table into a row by a simple ordering of the indices of the pixels. Whereas the one-dimensional filter + coefficients are found by fitting a polynomial in the subsidiary variable, z to a set of m data points, the + two-dimensional coefficients are found by fitting a polynomial in subsidiary variables v and w to a set of m x m + data points. The following example, for a bicubic polynomial and m = 5, illustrates the process, which parallels the + process for the one dimensional case, above.[18] + + The square of 25 data values, d1 - d25 + becomes a vector when the rows are placed one after another. + The Jacobian has 10 columns, one for each of the parameters a00 - a03 and 25 rows, one for each pair of v and w values. + The convolution coefficients are calculated as + The first row of C contains 25 convolution coefficients which can be multiplied with the 25 data values to provide a + smoothed value for the central data point (13) of the 25. + + """ + # number of terms in the polynomial expression + n_terms = (order + 1) * (order + 2) / 2.0 + + if window_size % 2 == 0: + raise ValueError("window_size must be odd") + + if window_size**2 < n_terms: + raise ValueError("order is too high for the window size") + + half_size = window_size // 2 + + # exponents of the polynomial. + # p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ... + # this line gives a list of two item tuple. Each tuple contains + # the exponents of the k-th term. First element of tuple is for x + # second element for y. + # Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...] + exps = [(k - n, n) for k in range(order + 1) for n in range(k + 1)] + + # coordinates of points + ind = np.arange(-half_size, half_size + 1, dtype=np.float64) + dx = np.repeat(ind, window_size) + dy = np.tile(ind, [window_size, 1]).reshape( + window_size**2, + ) + + # build matrix of system of equation + A = np.empty((window_size**2, len(exps))) + for i, exp in enumerate(exps): + A[:, i] = (dx ** exp[0]) * (dy ** exp[1]) + + # pad input array with appropriate values at the four borders + new_shape = z.shape[0] + 2 * half_size, z.shape[1] + 2 * half_size + Z = np.zeros((new_shape)) + # top band + band = z[0, :] + Z[:half_size, half_size:-half_size] = band - np.abs(np.flipud(z[1 : half_size + 1, :]) - band) + # bottom band + band = z[-1, :] + Z[-half_size:, half_size:-half_size] = band + np.abs(np.flipud(z[-half_size - 1 : -1, :]) - band) + # left band + band = np.tile(z[:, 0].reshape(-1, 1), [1, half_size]) + Z[half_size:-half_size, :half_size] = band - np.abs(np.fliplr(z[:, 1 : half_size + 1]) - band) + # right band + band = np.tile(z[:, -1].reshape(-1, 1), [1, half_size]) + Z[half_size:-half_size, -half_size:] = band + np.abs(np.fliplr(z[:, -half_size - 1 : -1]) - band) + # central band + Z[half_size:-half_size, half_size:-half_size] = z + + # top left corner + band = z[0, 0] + Z[:half_size, :half_size] = band - np.abs(np.flipud(np.fliplr(z[1 : half_size + 1, 1 : half_size + 1])) - band) + # bottom right corner + band = z[-1, -1] + Z[-half_size:, -half_size:] = band + np.abs( + np.flipud(np.fliplr(z[-half_size - 1 : -1, -half_size - 1 : -1])) - band + ) + + # top right corner + band = Z[half_size, -half_size:] + Z[:half_size, -half_size:] = band - np.abs(np.flipud(Z[half_size + 1 : 2 * half_size + 1, -half_size:]) - band) + # bottom left corner + band = Z[-half_size:, half_size].reshape(-1, 1) + Z[-half_size:, :half_size] = band - np.abs(np.fliplr(Z[-half_size:, half_size + 1 : 2 * half_size + 1]) - band) + + # solve system and convolve + if derivative == None: + m = np.linalg.pinv(A)[0].reshape((window_size, -1)) + return scipy.signal.fftconvolve(Z, m, mode="valid") + elif derivative == "col": + c = np.linalg.pinv(A)[1].reshape((window_size, -1)) + return scipy.signal.fftconvolve(Z, -c, mode="valid") + elif derivative == "row": + r = np.linalg.pinv(A)[2].reshape((window_size, -1)) + return scipy.signal.fftconvolve(Z, -r, mode="valid") + elif derivative == "both": + c = np.linalg.pinv(A)[1].reshape((window_size, -1)) + r = np.linalg.pinv(A)[2].reshape((window_size, -1)) + return scipy.signal.fftconvolve(Z, -r, mode="valid"), scipy.signal.fftconvolve(Z, -c, mode="valid") + + +def load_filelines(fullpath): + """YG Develop March 10, 2018 + Load all content from a file + basepath, fname = os.path.split(os.path.abspath( fullpath )) + Input: + fullpath: str, full path of the file + Return: + list: str + """ + with open(fullpath, "r") as fin: + p = fin.readlines() + return p + + +def extract_data_from_file( + filename, + filepath, + good_line_pattern=None, + start_row=None, + good_cols=None, + labels=None, +): + """YG Develop Octo 17, 2017 + Add start_row option at March 5, 2018 + + Extract data from a file + Input: + filename: str, filename of the data + filepath: str, path of the data + good_line_pattern: str, data will be extract below this good_line_pattern + Or giving start_row: int + good_cols: list of integer, good index of cols + lables: the label of the good_cols + #save: False, if True will save the data into a csv file with filename appending csv ?? + Return: + a pds.dataframe + Example: + filepath = '/XF11ID/analysis/2017_3/lwiegart/Link_files/Exports/' + filename = 'ANPES2 15-10-17 16-31-11-84Exported.txt' + good_cols = [ 1,2,4,6,8,10 ] + labels = [ 'time', 'temperature', 'force', 'distance', 'stress', 'strain' ] + good_line_pattern = "Index\tX\tY\tX\tY\tX\tY" + df = extract_data_from_file( filename, filepath, good_line_pattern, good_cols, labels) + """ + import pandas as pds + + with open(filepath + filename, "r") as fin: + p = fin.readlines() + di = 1e20 + for i, line in enumerate(p): + if start_row != None: + di = start_row + elif good_line_pattern != None: + if good_line_pattern in line: + di = i + else: + di = 0 + if i == di + 1: + els = line.split() + if good_cols == None: + data = np.array(els, dtype=float) + else: + data = np.array([els[j] for j in good_cols], dtype=float) + elif i > di: + try: + els = line.split() + if good_cols == None: + temp = np.array(els, dtype=float) + else: + temp = np.array([els[j] for j in good_cols], dtype=float) + data = np.vstack((data, temp)) + except: + pass + if labels == None: + labels = np.arange(data.shape[1]) + df = pds.DataFrame(data, index=np.arange(data.shape[0]), columns=labels) + return df + + +def get_print_uids(start_time, stop_time, return_all_info=False): + """Update Feb 20, 2018 also return full uids + YG. Octo 3, 2017@CHX + Get full uids and print uid plus Measurement contents by giving start_time, stop_time + + """ + hdrs = list(db(start_time=start_time, stop_time=stop_time)) + fuids = np.zeros(len(hdrs), dtype=object) + uids = np.zeros(len(hdrs), dtype=object) + sids = np.zeros(len(hdrs), dtype=object) + n = 0 + all_info = np.zeros(len(hdrs), dtype=object) + for i in range(len(hdrs)): + fuid = hdrs[-i - 1]["start"]["uid"] # reverse order + uid = fuid[:6] # reverse order + sid = hdrs[-i - 1]["start"]["scan_id"] + fuids[n] = fuid + uids[n] = uid + sids[n] = sid + date = time.ctime(hdrs[-i - 1]["start"]["time"]) + try: + m = hdrs[-i - 1]["start"]["Measurement"] + except: + m = "" + info = "%3d: uid = '%s' ##%s #%s: %s-- %s " % (i, uid, date, sid, m, fuid) + print(info) + if return_all_info: + all_info[n] = info + n += 1 + if not return_all_info: + return fuids, uids, sids + else: + return fuids, uids, sids, all_info + + +def get_last_uids(n=-1): + """YG Sep 26, 2017 + A Convinient function to copy uid to jupyter for analysis""" + uid = db[n]["start"]["uid"][:8] + sid = db[n]["start"]["scan_id"] + m = db[n]["start"]["Measurement"] + return " uid = '%s' #(scan num: %s (Measurement: %s " % (uid, sid, m) + + +def get_base_all_filenames(inDir, base_filename_cut_length=-7): + """YG Sep 26, 2017 + Get base filenames and their related all filenames + Input: + inDir, str, input data dir + base_filename_cut_length: to which length the base name is unique + Output: + dict: keys, base filename + vales, all realted filename + """ + from os import listdir + from os.path import isfile, join + + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + tifsc = list(tifs.copy()) + utifs = np.sort(np.unique(np.array([f[:base_filename_cut_length] for f in tifs])))[::-1] + files = {} + for uf in utifs: + files[uf] = [] + i = 0 + reName = [] + for i in range(len(tifsc)): + if uf in tifsc[i]: + files[uf].append(tifsc[i]) + reName.append(tifsc[i]) + for fn in reName: + tifsc.remove(fn) + return files + + +def create_ring_mask(shape, r1, r2, center, mask=None): + """YG. Sep 20, 2017 Develop@CHX + Create 2D ring mask + input: + shape: two integer number list, mask shape, e.g., [100,100] + r1: the inner radius + r2: the outer radius + center: two integer number list, [cx,cy], ring center, e.g., [30,50] + output: + 2D numpy array, 0,1 type + """ + + m = np.zeros(shape, dtype=bool) + rr, cc = disk((center[1], center[0]), r2, shape=shape) + m[rr, cc] = 1 + rr, cc = disk((center[1], center[0]), r1, shape=shape) + m[rr, cc] = 0 + if mask != None: + m += mask + return m + + +def get_image_edge(img): + """ + Y.G. Developed at Sep 8, 2017 @CHX + Get sharp edges of an image + img: two-D array, e.g., a roi mask + """ + edg_ = prewitt(img / 1.0) + edg = np.zeros_like(edg_) + w = np.where(edg_ > 1e-10) + edg[w] = img[w] + edg[np.where(edg == 0)] = 1 + return edg + + +def get_image_with_roi(img, roi_mask, scale_factor=2): + """ + Y.G. Developed at Sep 8, 2017 @CHX + Get image with edges of roi_mask by doing + i) get edges of roi_mask by function get_image_edge + ii) scale img at region of interest (ROI) by scale_factor + img: two-D array for image + roi_mask: two-D array for ROI + scale_factor: scaling factor of ROI in image + """ + edg = get_image_edge(roi_mask) + img_ = img.copy() + w = np.where(roi_mask) + img_[w] = img[w] * scale_factor + return img_ * edg + + +def get_today_date(): + from time import gmtime, strftime + + return strftime("%m-%d-%Y", gmtime()) + + +def move_beamstop(mask, xshift, yshift): + """Y.G. Developed at July 18, 2017 @CHX + Create new mask by shift the old one with xshift, yshift + Input + --- + mask: 2D numpy array, 0 for bad pixels, 1 for good pixels + xshift, integer, shift value along x direction + yshift, integer, shift value along y direction + + Output + --- + mask, 2D numpy array, + """ + m = np.ones_like(mask) + W, H = mask.shape + w = np.where(mask == 0) + nx, ny = w[0] + int(yshift), w[1] + int(xshift) + gw = np.where((nx >= 0) & (nx < W) & (ny >= 0) & (ny < H)) + nx = nx[gw] + ny = ny[gw] + m[nx, ny] = 0 + return m + + +def validate_uid(uid): + """check uid whether be able to load data""" + try: + sud = get_sid_filenames(db[uid]) + print(sud) + md = get_meta_data(uid) + imgs = load_data(uid, md["detector"], reverse=True) + print(imgs) + return 1 + except: + print("Can't load this uid=%s!" % uid) + return 0 + + +def validate_uid_dict(uid_dict): + """Y.G. developed July 17, 2017 @CHX + Check each uid in a dict can load data or not + uids: dict, val: meaningful decription, key: a list of uids + + """ + badn = 0 + badlist = [] + for k in list(uids.keys()): + for uid in uids[k]: + flag = validate_uid(uid) + if not flag: + badn += 1 + badlist.append(uid) + print("There are %s bad uids:%s in this uid_dict." % (badn, badlist)) + + +def get_mass_center_one_roi(FD, roi_mask, roi_ind): + """Get the mass center (in pixel unit) of one roi in a time series FD + FD: handler for a compressed time series + roi_mask: the roi array + roi_ind: the interest index of the roi + + """ + import scipy + + m = roi_mask == roi_ind + cx, cy = np.zeros(int((FD.end - FD.beg) / 1)), np.zeros(int((FD.end - FD.beg) / 1)) + n = 0 + for i in tqdm(range(FD.beg, FD.end, 1), desc="Get mass center of one ROI of each frame"): + img = FD.rdframe(i) * m + c = scipy.ndimage.measurements.center_of_mass(img) + cx[n], cy[n] = int(c[0]), int(c[1]) + n += 1 + return cx, cy + + +def get_current_pipeline_filename(NOTEBOOK_FULL_PATH): + """Y.G. April 25, 2017 + Get the current running pipeline filename and path + Assume the piple is located in /XF11ID/ + Return, path and filename + """ + from IPython.core.magics.display import Javascript + + if False: + Javascript( + """ + var nb = IPython.notebook; + var kernel = IPython.notebook.kernel; + var command = "NOTEBOOK_FULL_PATH = '" + nb.base_url + nb.notebook_path + "'"; + kernel.execute(command); + """ + ) + print(NOTEBOOK_FULL_PATH) + filename = NOTEBOOK_FULL_PATH.split("/")[-1] + path = "/XF11ID/" + for s in NOTEBOOK_FULL_PATH.split("/")[3:-1]: + path += s + "/" + return path, filename + + +def get_current_pipeline_fullpath(NOTEBOOK_FULL_PATH): + """Y.G. April 25, 2017 + Get the current running pipeline full filepath + Assume the piple is located in /XF11ID/ + Return, the fullpath (path + filename) + """ + p, f = get_current_pipeline_filename(NOTEBOOK_FULL_PATH) + return p + f + + +def save_current_pipeline(NOTEBOOK_FULL_PATH, outDir): + """Y.G. April 25, 2017 + Save the current running pipeline to outDir + The save pipeline should be the snapshot of the current state. + """ + + import shutil + + path, fp = get_current_pipeline_filename(NOTEBOOK_FULL_PATH) + shutil.copyfile(path + fp, outDir + fp) + + print("This pipeline: %s is saved in %s." % (fp, outDir)) + + +def plot_g1(taus, g2, g2_fit_paras, qr=None, ylim=[0, 1], title=""): + """Dev Apr 19, 2017, + Plot one-time correlation, giving taus, g2, g2_fit""" + noqs = g2.shape[1] + fig, ax = plt.subplots() + if qr == None: + qr = np.arange(noqs) + for i in range(noqs): + b = g2_fit_paras["baseline"][i] + beta = g2_fit_paras["beta"][i] + y = np.sqrt(np.abs(g2[1:, i] - b) / beta) + plot1D( + x=taus[1:], + y=y, + ax=ax, + legend="q=%s" % qr[i], + ls="-", + lw=2, + m=markers[i], + c=colors[i], + title=title, + ylim=ylim, + logx=True, + legend_size=8, + ) + ax.set_ylabel(r"$g_1$" + "(" + r"$\tau$" + ")") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + return ax + + +def filter_roi_mask(filter_dict, roi_mask, avg_img, filter_type="ylim"): + """Remove bad pixels in roi_mask. The bad pixel is defined by the filter_dict, + if filter_type ='ylim', the filter_dict wit key as q and each value gives a high and low limit thresholds. The value of the pixels in avg_img above or below the limit are considered as bad pixels. + if filter_type='badpix': the filter_dict wit key as q and each value gives a list of bad pixel. + + avg_img, the averaged image + roi_mask: two-d array, the same shape as image, the roi mask, value is integer, e.g., 1 ,2 ,... + filter_dict: keys, as roi_mask integer, value, by default is [None,None], is the limit, + example, {2:[4,5], 10:[0.1,1.1]} + NOTE: first q = 1 (not 0) + """ + rm = roi_mask.copy() + rf = np.ravel(rm) + for k in list(filter_dict.keys()): + pixel = roi.roi_pixel_values(avg_img, roi_mask, [k])[0][0] + # print( np.max(pixel), np.min(pixel) ) + if filter_type == "ylim": + xmin, xmax = filter_dict[k] + badp = np.where((pixel >= xmax) | (pixel <= xmin))[0] + else: + badp = filter_dict[k] + if len(badp) != 0: + pls = np.where([rf == k])[1] + rf[pls[badp]] = 0 + return rm + + +## +# Dev at March 31 for create Eiger chip mask +def create_chip_edges_mask(det="1M"): + """Create a chip edge mask for Eiger detector""" + if det == "1M": + shape = [1065, 1030] + w = 4 + mask = np.ones(shape, dtype=np.int32) + cx = [1030 // 4 * i for i in range(1, 4)] + # cy = [ 1065//4 *i for i in range(1,4) ] + cy = [808, 257] + # print (cx, cy ) + for c in cx: + mask[:, c - w // 2 : c + w // 2] = 0 + for c in cy: + mask[c - w // 2 : c + w // 2, :] = 0 + + return mask + + +def create_ellipse_donut(cx, cy, wx_inner, wy_inner, wx_outer, wy_outer, roi_mask, gap=0): + Nmax = np.max(np.unique(roi_mask)) + rr1, cc1 = ellipse(cy, cx, wy_inner, wx_inner) + rr2, cc2 = ellipse(cy, cx, wy_inner + gap, wx_inner + gap) + rr3, cc3 = ellipse(cy, cx, wy_outer, wx_outer) + roi_mask[rr3, cc3] = 2 + Nmax + roi_mask[rr2, cc2] = 0 + roi_mask[rr1, cc1] = 1 + Nmax + return roi_mask + + +def create_box(cx, cy, wx, wy, roi_mask): + Nmax = np.max(np.unique(roi_mask)) + for i, [cx_, cy_] in enumerate(list(zip(cx, cy))): # create boxes + x = np.array([cx_ - wx, cx_ + wx, cx_ + wx, cx_ - wx]) + y = np.array([cy_ - wy, cy_ - wy, cy_ + wy, cy_ + wy]) + rr, cc = polygon(y, x) + roi_mask[rr, cc] = i + 1 + Nmax + return roi_mask + + +def create_folder(base_folder, sub_folder): + """ + Crate a subfolder under base folder + Input: + base_folder: full path of the base folder + sub_folder: sub folder name to be created + Return: + Created full path of the created folder + """ + + data_dir0 = os.path.join(base_folder, sub_folder) + ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + os.makedirs(data_dir0, exist_ok=True) + print("Results from this analysis will be stashed in the directory %s" % data_dir0) + return data_dir0 + + +def create_user_folder(CYCLE, username=None, default_dir="/XF11ID/analysis/"): + """ + Crate a folder for saving user data analysis result + Input: + CYCLE: run cycle + username: if None, get username from the jupyter username + Return: + Created folder name + """ + if username != "Default": + if username == None: + username = getpass.getuser() + data_dir0 = os.path.join(default_dir, CYCLE, username, "Results/") + else: + data_dir0 = os.path.join(default_dir, CYCLE + "/") + ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + os.makedirs(data_dir0, exist_ok=True) + print("Results from this analysis will be stashed in the directory %s" % data_dir0) + return data_dir0 + + +################################## +# ########For dose analysis ####### +# ################################# +def get_fra_num_by_dose(exp_dose, exp_time, att=1, dead_time=2): + """ + Calculate the frame number to be correlated by giving a X-ray exposure dose + + Paramters: + exp_dose: a list, the exposed dose, e.g., in unit of exp_time(ms)*N(fram num)*att( attenuation) + exp_time: float, the exposure time for a xpcs time sereies + dead_time: dead time for the fast shutter reponse time, CHX = 2ms + Return: + noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) + e.g., + + no_dose_fra = get_fra_num_by_dose( exp_dose = [ 3.34* 20, 3.34*50, 3.34*100, 3.34*502, 3.34*505 ], + exp_time = 1.34, dead_time = 2) + + --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) + """ + return np.int_(np.array(exp_dose) / (exp_time + dead_time) / att) + + +def get_multi_tau_lag_steps(fra_max, num_bufs=8): + """ + Get taus in log steps ( a multi-taus defined taus ) for a time series with max frame number as fra_max + Parameters: + fra_max: integer, the maximun frame number + buf_num (default=8), + Return: + taus_in_log, a list + + e.g., + get_multi_tau_lag_steps( 20, 8 ) --> array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]) + + """ + num_levels = int(np.log(fra_max / (num_bufs - 1)) / np.log(2) + 1) + 1 + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + return lag_steps[lag_steps < fra_max] + + +def get_series_g2_taus(fra_max_list, acq_time=1, max_fra_num=None, log_taus=True, num_bufs=8): + """ + Get taus for dose dependent analysis + Parameters: + fra_max_list: a list, a lsit of largest available frame number + acq_time: acquistion time for each frame + log_taus: if true, will use the multi-tau defined taus bu using buf_num (default=8), + otherwise, use deltau =1 + Return: + tausd, a dict, with keys as taus_max_list items + e.g., + get_series_g2_taus( fra_max_list=[20,30,40], acq_time=1, max_fra_num=None, log_taus = True, num_bufs = 8) + --> + {20: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]), + 30: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28]), + 40: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32]) + } + + """ + tausd = {} + for n in fra_max_list: + if max_fra_num != None: + L = max_fra_num + else: + L = np.infty + if n > L: + warnings.warn( + "Warning: the dose value is too large, and please" + "check the maxium dose in this data set and give a smaller dose value." + "We will use the maxium dose of the data." + ) + n = L + if log_taus: + lag_steps = get_multi_tau_lag_steps(n, num_bufs) + else: + lag_steps = np.arange(n) + tausd[n] = lag_steps * acq_time + return tausd + + +def check_lost_metadata(md, Nimg=None, inc_x0=None, inc_y0=None, pixelsize=7.5 * 10 * (-5)): + """Y.G. Dec 31, 2016, check lost metadata + + Parameter: + md: dict, meta data dictionay + Nimg: number of frames for this uid metadata + inc_x0/y0: incident beam center x0/y0, if None, will over-write the md['beam_center_x/y'] + pixelsize: if md don't have ['x_pixel_size'], the pixelsize will add it + Return: + dpix: pixelsize, in mm + lambda_: wavelegth of the X-rays in Angstroms + exposuretime: exposure time in sec + timeperframe: acquisition time is sec + center: list, [x,y], incident beam center in pixel + Will also update md + """ + mdn = md.copy() + if "number of images" not in list(md.keys()): + md["number of images"] = Nimg + if "x_pixel_size" not in list(md.keys()): + md["x_pixel_size"] = 7.5000004e-05 + dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm + try: + lambda_ = md["wavelength"] + except: + lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms + try: + Ldet = md["det_distance"] + if Ldet <= 1000: + Ldet *= 1000 + md["det_distance"] = Ldet + except: + Ldet = md["detector_distance"] + if Ldet <= 1000: + Ldet *= 1000 + md["detector_distance"] = Ldet + + try: # try exp time from detector + exposuretime = md["count_time"] # exposure time in sec + except: + exposuretime = md["cam_acquire_time"] # exposure time in sec + try: # try acq time from detector + acquisition_period = md["frame_time"] + except: + try: + acquisition_period = md["acquire period"] + except: + uid = md["uid"] + acquisition_period = float(db[uid]["start"]["acquire period"]) + timeperframe = acquisition_period + if inc_x0 != None: + mdn["beam_center_x"] = inc_y0 + print("Beam_center_x has been changed to %s. (no change in raw metadata): " % inc_y0) + if inc_y0 != None: + mdn["beam_center_y"] = inc_x0 + print("Beam_center_y has been changed to %s. (no change in raw metadata): " % inc_x0) + center = [int(mdn["beam_center_x"]), int(mdn["beam_center_y"])] # beam center [y,x] for python image + center = [center[1], center[0]] + + return dpix, lambda_, Ldet, exposuretime, timeperframe, center + + +def combine_images(filenames, outputfile, outsize=(2000, 2400)): + """Y.G. Dec 31, 2016 + Combine images together to one image using PIL.Image + Input: + filenames: list, the images names to be combined + outputfile: str, the filename to generate + outsize: the combined image size + Output: + save a combined image file + """ + N = len(filenames) + # nx = np.int( np.ceil( np.sqrt(N)) ) + # ny = np.int( np.ceil( N / float(nx) ) ) + + ny = int(np.ceil(np.sqrt(N))) + nx = int(np.ceil(N / float(ny))) + + # print(nx,ny) + result = Image.new("RGB", outsize, color=(255, 255, 255, 0)) + basewidth = int(outsize[0] / nx) + hsize = int(outsize[1] / ny) + for index, file in enumerate(filenames): + path = os.path.expanduser(file) + img = Image.open(path) + bands = img.split() + ratio = img.size[1] / img.size[0] # h/w + if hsize > basewidth * ratio: + basewidth_ = basewidth + hsize_ = int(basewidth * ratio) + else: + basewidth_ = int(hsize / ratio) + hsize_ = hsize + # print( index, file, basewidth, hsize ) + size = (basewidth_, hsize_) + bands = [b.resize(size, Image.Resampling.BILINEAR) for b in bands] + img = Image.merge("RGBA", bands) + x = index % nx * basewidth + y = index // nx * hsize + w, h = img.size + # print('pos {0},{1} size {2},{3}'.format(x, y, w, h)) + result.paste(img, (x, y, x + w, y + h)) + result.save(outputfile, quality=100, optimize=True) + print("The combined image is saved as: %s" % outputfile) + + +def get_qval_dict(qr_center, qz_center=None, qval_dict=None, multi_qr_for_one_qz=True, one_qz_multi_qr=True): + """Y.G. Dec 27, 2016 + Map the roi label array with qr or (qr,qz) or (q//, q|-) values + Parameters: + qr_center: list, a list of qr + qz_center: list, a list of qz, + multi_qr_for_one_qz: by default=True, + if one_qz_multi_qr: + one qz_center corresponds to all qr_center, in other words, there are totally, len(qr_center)* len(qz) qs + else: + one qr_center corresponds to all qz_center, + else: one qr with one qz + qval_dict: if not None, will append the new dict to the qval_dict + Return: + qval_dict, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + + """ + + if qval_dict == None: + qval_dict = {} + maxN = 0 + else: + maxN = np.max(list(qval_dict.keys())) + 1 + + if qz_center != None: + if multi_qr_for_one_qz: + if one_qz_multi_qr: + for qzind in range(len(qz_center)): + for qrind in range(len(qr_center)): + qval_dict[maxN + qzind * len(qr_center) + qrind] = np.array( + [qr_center[qrind], qz_center[qzind]] + ) + else: + for qrind in range(len(qr_center)): + for qzind in range(len(qz_center)): + qval_dict[maxN + qrind * len(qz_center) + qzind] = np.array( + [qr_center[qrind], qz_center[qzind]] + ) + + else: + for i, [qr, qz] in enumerate(zip(qr_center, qz_center)): + qval_dict[maxN + i] = np.array([qr, qz]) + else: + for qrind in range(len(qr_center)): + qval_dict[maxN + qrind] = np.array([qr_center[qrind]]) + return qval_dict + + +def update_qval_dict(qval_dict1, qval_dict2): + """Y.G. Dec 31, 2016 + Update qval_dict1 with qval_dict2 + Input: + qval_dict1, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + qval_dict2, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + Output: + qval_dict, a dict, with the same key as dict1, and all key in dict2 but which key plus max(dict1.keys()) + """ + maxN = np.max(list(qval_dict1.keys())) + 1 + qval_dict = {} + qval_dict.update(qval_dict1) + for k in list(qval_dict2.keys()): + qval_dict[k + maxN] = qval_dict2[k] + return qval_dict + + +def update_roi_mask(roi_mask1, roi_mask2): + """Y.G. Dec 31, 2016 + Update qval_dict1 with qval_dict2 + Input: + roi_mask1, 2d-array, label array, same shape as xpcs frame, + roi_mask2, 2d-array, label array, same shape as xpcs frame, + Output: + roi_mask, 2d-array, label array, same shape as xpcs frame, update roi_mask1 with roi_mask2 + """ + roi_mask = roi_mask1.copy() + w = np.where(roi_mask2) + roi_mask[w] = roi_mask2[w] + np.max(roi_mask) + return roi_mask + + +def check_bad_uids(uids, mask, img_choice_N=10, bad_uids_index=None): + """Y.G. Dec 22, 2016 + Find bad uids by checking the average intensity by a selection of the number img_choice_N of frames for the uid. If the average intensity is zeros, the uid will be considered as bad uid. + Parameters: + uids: list, a list of uid + mask: array, bool type numpy.array + img_choice_N: random select number of the uid + bad_uids_index: a list of known bad uid list, default is None + Return: + guids: list, good uids + buids, list, bad uids + """ + import random + + buids = [] + guids = list(uids) + # print( guids ) + if bad_uids_index == None: + bad_uids_index = [] + for i, uid in enumerate(uids): + # print( i, uid ) + if i not in bad_uids_index: + detector = get_detector(db[uid]) + imgs = load_data(uid, detector) + img_samp_index = random.sample(range(len(imgs)), img_choice_N) + imgsa = apply_mask(imgs, mask) + avg_img = get_avg_img(imgsa, img_samp_index, plot_=False, uid=uid) + if avg_img.max() == 0: + buids.append(uid) + guids.pop(list(np.where(np.array(guids) == uid)[0])[0]) + print("The bad uid is: %s" % uid) + else: + guids.pop(list(np.where(np.array(guids) == uid)[0])[0]) + buids.append(uid) + print("The bad uid is: %s" % uid) + print("The total and bad uids number are %s and %s, repsectively." % (len(uids), len(buids))) + return guids, buids + + +def find_uids(start_time, stop_time): + """Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end time + Return: + sids: list, scan id + uids: list, uid with 8 character length + fuids: list, uid with full length + + """ + hdrs = db(start_time=start_time, stop_time=stop_time) + try: + print("Totally %s uids are found." % (len(list(hdrs)))) + except: + pass + sids = [] + uids = [] + fuids = [] + for hdr in hdrs: + s = get_sid_filenames(hdr) + # print (s[1][:8]) + sids.append(s[0]) + uids.append(s[1][:8]) + fuids.append(s[1]) + sids = sids[::-1] + uids = uids[::-1] + fuids = fuids[::-1] + return np.array(sids), np.array(uids), np.array(fuids) + + +def ployfit(y, x=None, order=20): + """ + fit data (one-d array) by a ploynominal function + return the fitted one-d array + """ + if x == None: + x = range(len(y)) + pol = np.polyfit(x, y, order) + return np.polyval(pol, x) + + +def check_bad_data_points( + data, + fit=True, + polyfit_order=30, + legend_size=12, + plot=True, + scale=1.0, + good_start=None, + good_end=None, + path=None, + return_ylim=False, +): + """ + data: 1D array + scale: the scale of deviation + fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve + else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve + + """ + if good_start == None: + good_start = 0 + if good_end == None: + good_end = len(data) + bd1 = [i for i in range(0, good_start)] + bd3 = [i for i in range(good_end, len(data))] + + d_ = data[good_start:good_end] + + if fit: + pfit = ployfit(d_, order=polyfit_order) + d = d_ - pfit + else: + d = d_ + pfit = np.ones_like(d) * data.mean() + + ymin = d.mean() - scale * d.std() + ymax = d.mean() + scale * d.std() + + if plot: + fig = plt.figure() + ax = fig.add_subplot(2, 1, 1) + plot1D(d_, ax=ax, color="k", legend="data", legend_size=legend_size) + plot1D(pfit, ax=ax, color="b", legend="ploy-fit", title="Find Bad Points", legend_size=legend_size) + + ax2 = fig.add_subplot(2, 1, 2) + plot1D( + d, + ax=ax2, + legend="difference", + marker="s", + color="b", + ) + + # print('here') + plot1D( + x=[0, len(d_)], + y=[ymin, ymin], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="low_thresh", + legend_size=legend_size, + ) + + plot1D( + x=[0, len(d_)], + y=[ymax, ymax], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="high_thresh", + title="", + legend_size=legend_size, + ) + + if path != None: + fp = path + "%s" % (uid) + "_find_bad_points" + ".png" + plt.savefig(fp, dpi=fig.dpi) + bd2 = list(np.where(np.abs(d - d.mean()) > scale * d.std())[0] + good_start) + + if return_ylim: + return np.array(bd1 + bd2 + bd3), ymin, ymax, pfit + else: + return np.array(bd1 + bd2 + bd3), pfit + + +def get_bad_frame_list( + imgsum, + fit=True, + polyfit_order=30, + legend_size=12, + plot=True, + scale=1.0, + good_start=None, + good_end=None, + uid="uid", + path=None, + return_ylim=False, +): + """ + imgsum: the sum intensity of a time series + scale: the scale of deviation + fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve + else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve + + """ + if good_start == None: + good_start = 0 + if good_end == None: + good_end = len(imgsum) + bd1 = [i for i in range(0, good_start)] + bd3 = [i for i in range(good_end, len(imgsum))] + + imgsum_ = imgsum[good_start:good_end] + + if fit: + pfit = ployfit(imgsum_, order=polyfit_order) + data = imgsum_ - pfit + else: + data = imgsum_ + pfit = np.ones_like(data) * data.mean() + + ymin = data.mean() - scale * data.std() + ymax = data.mean() + scale * data.std() + + if plot: + fig = plt.figure() + ax = fig.add_subplot(2, 1, 1) + plot1D(imgsum_, ax=ax, color="k", legend="data", legend_size=legend_size) + plot1D(pfit, ax=ax, color="b", legend="ploy-fit", title=uid + "_imgsum", legend_size=legend_size) + + ax2 = fig.add_subplot(2, 1, 2) + plot1D( + data, + ax=ax2, + legend="difference", + marker="s", + color="b", + ) + + # print('here') + plot1D( + x=[0, len(imgsum_)], + y=[ymin, ymin], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="low_thresh", + legend_size=legend_size, + ) + + plot1D( + x=[0, len(imgsum_)], + y=[ymax, ymax], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="high_thresh", + title="imgsum_to_find_bad_frame", + legend_size=legend_size, + ) + + if path != None: + fp = path + "%s" % (uid) + "_imgsum_analysis" + ".png" + plt.savefig(fp, dpi=fig.dpi) + + bd2 = list(np.where(np.abs(data - data.mean()) > scale * data.std())[0] + good_start) + + if return_ylim: + return np.array(bd1 + bd2 + bd3), ymin, ymax + else: + return np.array(bd1 + bd2 + bd3) + + +def save_dict_csv(mydict, filename, mode="w"): + import csv + + with open(filename, mode) as csv_file: + spamwriter = csv.writer(csv_file) + for key, value in mydict.items(): + spamwriter.writerow([key, value]) + + +def read_dict_csv(filename): + import csv + + with open(filename, "r") as csv_file: + reader = csv.reader(csv_file) + mydict = dict(reader) + return mydict + + +def find_bad_pixels(FD, bad_frame_list, uid="uid"): + bpx = [] + bpy = [] + for n in bad_frame_list: + if n >= FD.beg and n <= FD.end: + f = FD.rdframe(n) + w = np.where(f == f.max()) + if len(w[0]) == 1: + bpx.append(w[0][0]) + bpy.append(w[1][0]) + + return trans_data_to_pd([bpx, bpy], label=[uid + "_x", uid + "_y"], dtype="list") + + +def mask_exclude_badpixel(bp, mask, uid): + + for i in range(len(bp)): + mask[int(bp[bp.columns[0]][i]), int(bp[bp.columns[1]][i])] = 0 + return mask + + +def print_dict(dicts, keys=None): + """ + print keys: values in a dicts + if keys is None: print all the keys + """ + if keys == None: + keys = list(dicts.keys()) + for k in keys: + try: + print("%s--> %s" % (k, dicts[k])) + except: + pass + + +def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): + """ + Jan 25, 2018 add default_dec opt + + Y.G. Dev Dec 8, 2016 + + Get metadata from a uid + + - Adds detector key with detector name + + Parameters: + uid: the unique data acquisition id + kwargs: overwrite the meta data, for example + get_meta_data( uid = uid, sample = 'test') --> will overwrtie the meta's sample to test + return: + meta data of the uid: a dictionay + with keys: + detector + suid: the simple given uid + uid: full uid + filename: the full path of the data + start_time: the data acquisition starting time in a human readable manner + And all the input metadata + """ + + if "verbose" in kwargs.keys(): # added: option to suppress output + verbose = kwargs["verbose"] + else: + verbose = True + + import time + + header = db[uid] + md = {} + + md["suid"] = uid # short uid + try: + md["filename"] = get_sid_filenames(header)[2][0] + except: + md["filename"] = "N.A." + + devices = sorted(list(header.devices())) + if len(devices) > 1: + if verbose: # added: mute output + print( + "More than one device. This would have unintented consequences.Currently, only the device contains 'default_dec=%s'." + % default_dec + ) + # raise ValueError("More than one device. This would have unintented consequences.") + dec = devices[0] + for dec_ in devices: + if default_dec in dec_: + dec = dec_ + + # print(dec) + # detector_names = sorted( header.start['detectors'] ) + detector_names = sorted(get_detectors(db[uid])) + # if len(detector_names) > 1: + # raise ValueError("More than one det. This would have unintented consequences.") + detector_name = detector_names[0] + # md['detector'] = detector_name + md["detector"] = get_detector(header) + # print( md['detector'] ) + new_dict = header.config_data(dec)["primary"][0] + for key, val in new_dict.items(): + newkey = key.replace(detector_name + "_", "") + md[newkey] = val + + # for k,v in ev['descriptor']['configuration'][dec]['data'].items(): + # md[ k[len(dec)+1:] ]= v + + try: + md.update(header.start["plan_args"].items()) + md.pop("plan_args") + except: + pass + md.update(header.start.items()) + + # print(header.start.time) + md["start_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(header.start["time"])) + md["stop_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(header.stop["time"])) + try: # added: try to handle runs that don't contain image data + if "primary" in header.v2: + descriptor = header.v2["primary"].descriptors[0] + md["img_shape"] = descriptor["data_keys"][md["detector"]]["shape"][:2][::-1] + except: + if verbose: + print("couldn't find image shape...skip!") + else: + pass + md.update(kwargs) + + # for k, v in sorted(md.items()): + # ... + # print(f'{k}: {v}') + + return md + + +def get_max_countc(FD, labeled_array): + """YG. 2016, Nov 18 + Compute the max intensity of ROIs in the compressed file (FD) + + Parameters + ---------- + FD: Multifile class + compressed file + labeled_array : array + labeled array; 0 is background. + Each ROI is represented by a nonzero integer. It is not required that + the ROI labels are contiguous + index : int, list, optional + The ROI's to use. If None, this function will extract averages for all + ROIs + + Returns + ------- + max_intensity : a float + index : list + The labels for each element of the `mean_intensity` list + """ + + qind, pixelist = roi.extract_label_indices(labeled_array) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + + if labeled_array.shape != (FD.md["ncols"], FD.md["nrows"]): + raise ValueError( + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" + % (FD.md["ncols"], FD.md["nrows"], labeled_array.shape[0], labeled_array.shape[1]) + ) + + max_inten = 0 + for i in tqdm(range(FD.beg, FD.end, 1), desc="Get max intensity of ROIs in all frames"): + try: + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + max_inten = max(max_inten, np.max(v[w])) + except: + pass + return max_inten + + +def create_polygon_mask(image, xcorners, ycorners): + """ + Give image and x/y coners to create a polygon mask + image: 2d array + xcorners, list, points of x coners + ycorners, list, points of y coners + Return: + the polygon mask: 2d array, the polygon pixels with values 1 and others with 0 + + Example: + + + """ + from skimage.draw import disk, line, line_aa, polygon + + imy, imx = image.shape + bst_mask = np.zeros_like(image, dtype=bool) + rr, cc = polygon(ycorners, xcorners, shape=image.shape) + bst_mask[rr, cc] = 1 + # full_mask= ~bst_mask + return bst_mask + + +def create_rectangle_mask(image, xcorners, ycorners): + """ + Give image and x/y coners to create a rectangle mask + image: 2d array + xcorners, list, points of x coners + ycorners, list, points of y coners + Return: + the polygon mask: 2d array, the polygon pixels with values 1 and others with 0 + + Example: + + + """ + from skimage.draw import disk, line, line_aa, polygon + + imy, imx = image.shape + bst_mask = np.zeros_like(image, dtype=bool) + rr, cc = polygon(ycorners, xcorners, shape=image.shape) + bst_mask[rr, cc] = 1 + # full_mask= ~bst_mask + return bst_mask + + +def create_multi_rotated_rectangle_mask(image, center=None, length=100, width=50, angles=[0]): + """Developed at July 10, 2017 by Y.G.@CHX, NSLS2 + Create multi rectangle-shaped mask by rotating a rectangle with a list of angles + The original rectangle is defined by four corners, i.e., + [ (center[1] - width//2, center[0]), + (center[1] + width//2, center[0]), + (center[1] + width//2, center[0] + length), + (center[1] - width//2, center[0] + length) + ] + + Parameters: + image: 2D numpy array, to give mask shape + center: integer list, if None, will be the center of the image + length: integer, the length of the non-ratoted rectangle + width: integer, the width of the non-ratoted rectangle + angles: integer list, a list of rotated angles + + Return: + mask: 2D bool-type numpy array + """ + + from skimage.draw import polygon + from skimage.transform import rotate + + cx, cy = center + imy, imx = image.shape + mask = np.zeros(image.shape, dtype=bool) + wy = length + wx = width + x = np.array([max(0, cx - wx // 2), min(imx, cx + wx // 2), min(imx, cx + wx // 2), max(0, cx - wx // 2)]) + y = np.array([cy, cy, min(imy, cy + wy), min(imy, cy + wy)]) + rr, cc = polygon(y, x, shape=image.shape) + mask[rr, cc] = 1 + mask_rot = np.zeros(image.shape, dtype=bool) + for angle in angles: + mask_rot += np.array(rotate(mask, angle, center=center), dtype=bool) # , preserve_range=True) + return ~mask_rot + + +def create_wedge(image, center, radius, wcors, acute_angle=True): + """YG develop at June 18, 2017, @CHX + Create a wedge by a combination of disk and a triangle defined by center and wcors + wcors: [ [x1,x2,x3...], [y1,y2,y3..] + + """ + from skimage.draw import disk, line, line_aa, polygon + + imy, imx = image.shape + cy, cx = center + x = [cx] + list(wcors[0]) + y = [cy] + list(wcors[1]) + + maskc = np.zeros_like(image, dtype=bool) + rr, cc = disk((cy, cx), radius, shape=image.shape) + maskc[rr, cc] = 1 + + maskp = np.zeros_like(image, dtype=bool) + x = np.array(x) + y = np.array(y) + print(x, y) + rr, cc = polygon(y, x, shape=image.shape) + maskp[rr, cc] = 1 + if acute_angle: + return maskc * maskp + else: + return maskc * ~maskp + + +def create_cross_mask( + image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4, center_disk=True, center_radius=10 +): + """ + Give image and the beam center to create a cross-shaped mask + wy_left: the width of left h-line + wy_right: the width of rigth h-line + wx_up: the width of up v-line + wx_down: the width of down v-line + center_disk: if True, create a disk with center and center_radius + + Return: + the cross mask + """ + from skimage.draw import disk, line, line_aa, polygon + + imy, imx = image.shape + cx, cy = center + bst_mask = np.zeros_like(image, dtype=bool) + ### + # for right part + wy = wy_right + x = np.array([cx, imx, imx, cx]) + y = np.array([cy - wy, cy - wy, cy + wy, cy + wy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 + + ### + # for left part + wy = wy_left + x = np.array([0, cx, cx, 0]) + y = np.array([cy - wy, cy - wy, cy + wy, cy + wy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 + + ### + # for up part + wx = wx_up + x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) + y = np.array([cy, cy, imy, imy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 + + ### + # for low part + wx = wx_down + x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) + y = np.array([0, 0, cy, cy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 + + if center_radius != 0: + rr, cc = disk((cy, cx), center_radius, shape=bst_mask.shape) + bst_mask[rr, cc] = 1 + + full_mask = ~bst_mask + + return full_mask + + +def generate_edge(centers, width): + """YG. 10/14/2016 + give centers and width (number or list) to get edges""" + edges = np.zeros([len(centers), 2]) + edges[:, 0] = centers - width + edges[:, 1] = centers + width + return edges + + +def export_scan_scalar( + uid, x="dcm_b", y=["xray_eye1_stats1_total"], path="/XF11ID/analysis/2016_3/commissioning/Results/" +): + """YG. 10/17/2016 + export uid data to a txt file + uid: unique scan id + x: the x-col + y: the y-cols + path: save path + Example: + data = export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'], + path='/XF11ID/analysis/2016_3/commissioning/Results/exported/' ) + A plot for the data: + d.plot(x='dcm_b', y = 'xray_eye1_stats1_total', marker='o', ls='-', color='r') + + """ + from databroker import DataBroker as db + + from pyCHX.chx_generic_functions import trans_data_to_pd + + hdr = db[uid] + print(hdr.fields()) + data = db[uid].table() + xp = data[x] + datap = np.zeros([len(xp), len(y) + 1]) + datap[:, 0] = xp + for i, yi in enumerate(y): + datap[:, i + 1] = data[yi] + + datap = trans_data_to_pd(datap, label=[x] + [yi for yi in y]) + datap.to_csv(path + "uid=%s.csv" % uid) + return datap + + +##### +# load data by databroker + + +def get_flatfield(uid, reverse=False): + import h5py + + detector = get_detector(db[uid]) + sud = get_sid_filenames(db[uid]) + master_path = "%s_master.h5" % (sud[2][0]) + print(master_path) + f = h5py.File(master_path, "r") + k = "entry/instrument/detector/detectorSpecific/" # data_collection_date' + d = np.array(f[k]["flatfield"]) + f.close() + if reverse: + d = reverse_updown(d) + + return d + + +def get_detector(header): + """Get the first detector image string by giving header""" + keys = get_detectors(header) + for k in keys: + if "eiger" in k: + return k + + +def get_detectors(header): + """Get all the detector image strings by giving header""" + if "primary" in header.v2: + descriptor = header.v2["primary"].descriptors[0] + keys = [k for k, v in descriptor["data_keys"].items() if "external" in v] + return sorted(set(keys)) + return [] + + +def get_full_data_path(uid): + """A dirty way to get full data path""" + header = db[uid] + d = header.db + s = list(d.get_documents(db[uid])) + # print(s[2]) + p = s[2][1]["resource_path"] + p2 = s[3][1]["datum_kwargs"]["seq_id"] + # print(p,p2) + return p + "_" + str(p2) + "_master.h5" + + +def get_sid_filenames(hdr, verbose=False): + """ + get scan_id, uid and detector filename from databroker + get_sid_filenames(hdr,verbose=False) + hdr = db[uid] + returns (scan_id, uid, filepath) + LW 04/30/2024 + """ + import glob + from time import localtime, strftime + + start_doc = hdr.start + stop_doc = hdr.stop + success = False + + ret = ( + start_doc["scan_id"], + start_doc["uid"], + glob.glob(f"{start_doc['data path']}*_{start_doc['sequence id']}_master.h5"), + ) # looking for (eiger) datafile at the path specified in metadata + if len(ret[2]) == 0: + if verbose: + print('could not find detector filename from "data_path" in metadata: %s' % start_doc["data path"]) + else: + if verbose: + print('Found detector filename from "data_path" in metadata!') + success = True + + if not success: # looking at path in metadata, but taking the date from the run start document + data_path = start_doc["data path"][:-11] + strftime("%Y/%m/%d/", localtime(start_doc["time"])) + ret = ( + start_doc["scan_id"], + start_doc["uid"], + glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5"), + ) + if len(ret[2]) == 0: + if verbose: + print("could not find detector filename in %s" % data_path) + else: + if verbose: + print("Found detector filename in %s" % data_path) + success = True + + if ( + not success + ): # looking at path in metadata, but taking the date from the run stop document (in case the date rolled over between creating the start doc and staging the detector) + data_path = start_doc["data path"][:-11] + strftime("%Y/%m/%d/", localtime(stop_doc["time"])) + ret = ( + start_doc["scan_id"], + start_doc["uid"], + glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5"), + ) + if len(ret[2]) == 0: + if verbose: + print("Sorry, could not find detector filename....") + else: + if verbose: + print("Found detector filename in %s" % data_path) + success = True + return ret + + +# def get_sid_filenames(header): +# """YG. Dev Jan, 2016 +# Get a bluesky scan_id, unique_id, filename by giveing uid + +# Parameters +# ---------- +# header: a header of a bluesky scan, e.g. db[-1] + +# Returns +# ------- +# scan_id: integer +# unique_id: string, a full string of a uid +# filename: sring + +# Usuage: +# sid,uid, filenames = get_sid_filenames(db[uid]) + +# """ +# from collections import defaultdict +# from glob import glob +# from pathlib import Path + +# filepaths = [] +# resources = {} # uid: document +# datums = defaultdict(list) # uid: List(document) +# for name, doc in header.documents(): +# if name == "resource": +# resources[doc["uid"]] = doc +# elif name == "datum": +# datums[doc["resource"]].append(doc) +# elif name == "datum_page": +# for datum in event_model.unpack_datum_page(doc): +# datums[datum["resource"]].append(datum) +# for resource_uid, resource in resources.items(): +# file_prefix = Path(resource.get('root', '/'), resource["resource_path"]) +# if 'eiger' not in resource['spec'].lower(): +# continue +# for datum in datums[resource_uid]: +# dm_kw = datum["datum_kwargs"] +# seq_id = dm_kw['seq_id'] +# new_filepaths = glob(f'{file_prefix!s}_{seq_id}*') +# filepaths.extend(new_filepaths) +# return header.start['scan_id'], header.start['uid'], filepaths + + +def load_dask_data(uid, detector, mask_path_full, reverse=False, rot90=False): + """ + load data as dask-array + get image md (direct beam, wavelength, sample-detector distance,...) from databroker documents (no need to read an actual image) + get pixel_mask and binary_mask from static location (getting it from image metadata takes forever in some conda envs...) + load_dask_data(uid,detector,reverse=False,rot90=False) + uid: uid (str) + detector: md['detector'] + mask_path_full: current standard would be _mask_path_+'pixel_masks/' + returns detector_images(dask-array), image_md + LW 04/26/2024 + """ + import json + + import dask + + hdr = db[uid] + det = detector.split("_image")[0] + # collect image metadata from loading single image + img_md_dict = { + "detector_distance": "det_distance", + "incident_wavelength": "wavelength", + "frame_time": "cam_acquire_period", + "count_time": "cam_acquire_time", + "num_images": "cam_num_images", + "beam_center_x": "beam_center_x", + "beam_center_y": "beam_center_y", + } + img_md = {} + for k in list(img_md_dict.keys()): + img_md[k] = hdr.config_data(det)["primary"][0]["%s_%s" % (det, img_md_dict[k])] + if detector in ["eiger4m_single_image", "eiger1m_single_image", "eiger500K_single_image"]: + img_md.update({"y_pixel_size": 7.5e-05, "x_pixel_size": 7.5e-05}) + got_pixel_mask = True + else: + img_md.update({"y_pixel_size": None, "x_pixel_size": None}) + got_pixel_mask = False + # load pixel mask from static location + if got_pixel_mask: + # json_open = open(_mask_path_ + "pixel_masks/pixel_mask_compression_%s.json" % detector.split("_")[0]) + json_open = open(mask_path_full + "pixel_mask_compression_%s.json" % detector.split("_")[0]) + mask_dict = json.load(json_open) + img_md["pixel_mask"] = np.array(mask_dict["pixel_mask"]) + img_md["binary_mask"] = np.array(mask_dict["binary_mask"]) + del mask_dict + + # load image data as dask-arry: + dimg = hdr.xarray_dask()[detector][0] + if reverse: + dimg = dask.array.flip(dimg, axis=(1, 1)) + if rot90: + dimg = dask.array.rot90(dimg, axes=(1, 2)) + return dimg, img_md + + +def load_data(uid, detector="eiger4m_single_image", fill=True, reverse=False, rot90=False): + """load bluesky scan data by giveing uid and detector + + Parameters + ---------- + uid: unique ID of a bluesky scan + detector: the used area detector + fill: True to fill data + reverse: if True, reverse the image upside down to match the "real" image geometry (should always be True in the future) + + Returns + ------- + image data: a pims frames series + if not success read the uid, will return image data as 0 + + Usuage: + imgs = load_data( uid, detector ) + md = imgs.md + """ + hdr = db[uid] + + if False: + ATTEMPTS = 0 + for attempt in range(ATTEMPTS): + try: + (ev,) = hdr.events(fields=[detector], fill=fill) + break + + except Exception: + print("Trying again ...!") + if attempt == ATTEMPTS - 1: + # We're out of attempts. Raise the exception to help with debugging. + raise + else: + # We didn't succeed + raise Exception("Failed after {} repeated attempts".format(ATTEMPTS)) + + # TODO(mrakitin): replace with the lazy loader (when it's implemented): + imgs = list(hdr.data(detector)) + + if len(imgs[0]) >= 1: + md = imgs[0].md + imgs = pims.pipeline(lambda img: img)(imgs[0]) + imgs.md = md + + if reverse: + md = imgs.md + imgs = reverse_updown(imgs) # Why not np.flipud? + imgs.md = md + + if rot90: + md = imgs.md + imgs = rot90_clockwise(imgs) # Why not np.flipud? + imgs.md = md + + return imgs + + +def mask_badpixels(mask, detector): + """ + Mask known bad pixel from the giveing mask + + """ + if detector == "eiger1m_single_image": + # to be determined + mask = mask + elif detector == "eiger4m_single_image" or detector == "image": + mask[513:552, :] = 0 + mask[1064:1103, :] = 0 + mask[1615:1654, :] = 0 + mask[:, 1029:1041] = 0 + mask[:, 0] = 0 + mask[0:, 2069] = 0 + mask[0] = 0 + mask[2166] = 0 + + elif detector == "eiger500K_single_image": + # to be determined + mask = mask + else: + mask = mask + return mask + + +def load_data2(uid, detector="eiger4m_single_image"): + """load bluesky scan data by giveing uid and detector + + Parameters + ---------- + uid: unique ID of a bluesky scan + detector: the used area detector + + Returns + ------- + image data: a pims frames series + if not success read the uid, will return image data as 0 + + Usuage: + imgs = load_data( uid, detector ) + md = imgs.md + """ + hdr = db[uid] + flag = 1 + while flag < 4 and flag != 0: + try: + (ev,) = hdr.events(fields=[detector]) + flag = 0 + except: + flag += 1 + print("Trying again ...!") + + if flag: + print("Can't Load Data!") + uid = "00000" # in case of failling load data + imgs = 0 + else: + imgs = ev["data"][detector] + + # print (imgs) + return imgs + + +def psave_obj(obj, filename): + """save an object with filename by pickle.dump method + This function automatically add '.pkl' as filename extension + Input: + obj: the object to be saved + filename: filename (with full path) to be saved + Return: + None + """ + with open(filename + ".pkl", "wb") as f: + pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) + + +def pload_obj(filename): + """load a pickled filename + This function automatically add '.pkl' to filename extension + Input: + filename: filename (with full path) to be saved + Return: + load the object by pickle.load method + """ + with open(filename + ".pkl", "rb") as f: + return pickle.load(f) + + +def load_mask(path, mask_name, plot_=False, reverse=False, rot90=False, *argv, **kwargs): + """load a mask file + the mask is a numpy binary file (.npy) + + Parameters + ---------- + path: the path of the mask file + mask_name: the name of the mask file + plot_: a boolen type + reverse: if True, reverse the image upside down to match the "real" image geometry (should always be True in the future) + Returns + ------- + mask: array + if plot_ =True, will show the mask + + Usuage: + mask = load_mask( path, mask_name, plot_ = True ) + """ + + mask = np.load(path + mask_name) + mask = np.array(mask, dtype=np.int32) + if reverse: + mask = mask[::-1, :] + if rot90: + mask = np.rot90(mask) + if plot_: + show_img(mask, *argv, **kwargs) + return mask + + +def create_hot_pixel_mask(img, threshold, center=None, center_radius=300, outer_radius=0): + """create a hot pixel mask by giving threshold + Input: + img: the image to create hot pixel mask + threshold: the threshold above which will be considered as hot pixels + center: optional, default=None + else, as a two-element list (beam center), i.e., [center_x, center_y] + if center is not None, the hot pixel will not include a disk region + which is defined by center and center_radius ( in unit of pixel) + Output: + a bool types numpy array (mask), 1 is good and 0 is excluded + + """ + bst_mask = np.ones_like(img, dtype=bool) + if center != None: + from skimage.draw import disk + + imy, imx = img.shape + cy, cx = center + rr, cc = disk((cy, cx), center_radius, shape=img.shape) + bst_mask[rr, cc] = 0 + if outer_radius: + bst_mask = np.zeros_like(img, dtype=bool) + rr2, cc2 = disk((cy, cx), outer_radius, shape=img.shape) + bst_mask[rr2, cc2] = 1 + bst_mask[rr, cc] = 0 + hmask = np.ones_like(img) + hmask[np.where(img * bst_mask > threshold)] = 0 + return hmask + + +def apply_mask(imgs, mask): + """apply mask to imgs to produce a generator + + Usuages: + imgsa = apply_mask( imgs, mask ) + good_series = apply_mask( imgs[good_start:], mask ) + + """ + return pims.pipeline(lambda img: np.int_(mask) * img)(imgs) # lazily apply mask + + +def reverse_updown(imgs): + """reverse imgs upside down to produce a generator + + Usuages: + imgsr = reverse_updown( imgs) + + + """ + return pims.pipeline(lambda img: img[::-1, :])(imgs) # lazily apply mask + + +def rot90_clockwise(imgs): + """reverse imgs upside down to produce a generator + + Usuages: + imgsr = rot90_clockwise( imgs) + + """ + return pims.pipeline(lambda img: np.rot90(img))(imgs) # lazily apply mask + + +def RemoveHot(img, threshold=1e7, plot_=True): + """Remove hot pixel from img""" + + mask = np.ones_like(np.array(img)) + badp = np.where(np.array(img) >= threshold) + if len(badp[0]) != 0: + mask[badp] = 0 + if plot_: + show_img(mask) + return mask + + +############ +###plot data + + +def show_img( + image, + ax=None, + label_array=None, + alpha=0.5, + interpolation="nearest", + xlim=None, + ylim=None, + save=False, + image_name=None, + path=None, + aspect=None, + logs=False, + vmin=None, + vmax=None, + return_fig=False, + cmap="viridis", + show_time=False, + file_name=None, + ylabel=None, + xlabel=None, + extent=None, + show_colorbar=True, + tight=True, + show_ticks=True, + save_format="png", + dpi=None, + center=None, + origin="lower", + lab_fontsize=16, + tick_size=12, + colorbar_fontsize=8, + use_mat_imshow=False, + *argv, + **kwargs, +): + """YG. Sep26, 2017 Add label_array/alpha option to show a mask on top of image + + a simple function to show image by using matplotlib.plt imshow + pass *argv,**kwargs to imshow + + Parameters + ---------- + image : array + Image to show + Returns + ------- + None + """ + if ax == None: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots() + else: + fig, ax = ax + + if center != None: + plot1D(center[1], center[0], ax=ax, c="b", m="o", legend="") + if not logs: + if not use_mat_imshow: + im = imshow( + ax, + image, + origin=origin, + cmap=cmap, + interpolation=interpolation, + vmin=vmin, + vmax=vmax, + extent=extent, + ) # vmin=0,vmax=1, + else: + im = ax.imshow( + image, origin=origin, cmap=cmap, interpolation=interpolation, vmin=vmin, vmax=vmax, extent=extent + ) # vmin=0,vmax=1, + else: + if not use_mat_imshow: + im = imshow( + ax, + image, + origin=origin, + cmap=cmap, + interpolation=interpolation, + norm=LogNorm(vmin, vmax), + extent=extent, + ) + else: + im = ax.imshow( + image, + origin=origin, + cmap=cmap, + interpolation=interpolation, + norm=LogNorm(vmin, vmax), + extent=extent, + ) + if label_array != None: + im2 = show_label_array(ax, label_array, alpha=alpha, cmap=cmap, interpolation=interpolation) + + ax.set_title(image_name) + if xlim != None: + ax.set_xlim(xlim) + if ylim != None: + ax.set_ylim(ylim) + + if not show_ticks: + ax.set_yticks([]) + ax.set_xticks([]) + else: + + ax.tick_params(axis="both", which="major", labelsize=tick_size) + ax.tick_params(axis="both", which="minor", labelsize=tick_size) + # mpl.rcParams['xtick.labelsize'] = tick_size + # mpl.rcParams['ytick.labelsize'] = tick_size + # print(tick_size) + + if ylabel != None: + # ax.set_ylabel(ylabel)#, fontsize = 9) + ax.set_ylabel(ylabel, fontsize=lab_fontsize) + if xlabel != None: + ax.set_xlabel(xlabel, fontsize=lab_fontsize) + + if aspect != None: + # aspect = image.shape[1]/float( image.shape[0] ) + ax.set_aspect(aspect) + else: + ax.set_aspect(aspect="auto") + + if show_colorbar: + cbar = fig.colorbar(im, extend="neither", spacing="proportional", orientation="vertical") + cbar.ax.tick_params(labelsize=colorbar_fontsize) + fig.set_tight_layout(tight) + if save: + if show_time: + dt = datetime.now() + CurTime = "_%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) + fp = path + "%s" % (file_name) + CurTime + "." + save_format + else: + fp = path + "%s" % (image_name) + "." + save_format + if dpi == None: + dpi = fig.dpi + plt.savefig(fp, dpi=dpi) + # fig.set_tight_layout(tight) + if return_fig: + return im # fig + + +def plot1D( + y, + x=None, + yerr=None, + ax=None, + return_fig=False, + ls="-", + figsize=None, + legend=None, + legend_size=None, + lw=None, + markersize=None, + tick_size=8, + *argv, + **kwargs, +): + """a simple function to plot two-column data by using matplotlib.plot + pass *argv,**kwargs to plot + + Parameters + ---------- + y: column-y + x: column-x, by default x=None, the plot will use index of y as x-axis + the other paramaters are defined same as plt.plot + Returns + ------- + None + """ + if ax == None: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + if figsize != None: + fig, ax = plt.subplots(figsize=figsize) + else: + fig, ax = plt.subplots() + + if legend == None: + legend = " " + try: + logx = kwargs["logx"] + except: + logx = False + try: + logy = kwargs["logy"] + except: + logy = False + + try: + logxy = kwargs["logxy"] + except: + logxy = False + + if logx == True and logy == True: + logxy = True + + try: + marker = kwargs["marker"] + except: + try: + marker = kwargs["m"] + except: + marker = next(markers_) + try: + color = kwargs["color"] + except: + try: + color = kwargs["c"] + except: + color = next(colors_) + + if x == None: + x = range(len(y)) + if yerr == None: + ax.plot( + x, + y, + marker=marker, + color=color, + ls=ls, + label=legend, + lw=lw, + markersize=markersize, + ) # ,*argv,**kwargs) + else: + ax.errorbar( + x, + y, + yerr, + marker=marker, + color=color, + ls=ls, + label=legend, + lw=lw, + markersize=markersize, + ) # ,*argv,**kwargs) + if logx: + ax.set_xscale("log") + if logy: + ax.set_yscale("log") + if logxy: + ax.set_xscale("log") + ax.set_yscale("log") + + ax.tick_params(axis="both", which="major", labelsize=tick_size) + ax.tick_params(axis="both", which="minor", labelsize=tick_size) + + if "xlim" in kwargs.keys(): + ax.set_xlim(kwargs["xlim"]) + if "ylim" in kwargs.keys(): + ax.set_ylim(kwargs["ylim"]) + if "xlabel" in kwargs.keys(): + ax.set_xlabel(kwargs["xlabel"]) + if "ylabel" in kwargs.keys(): + ax.set_ylabel(kwargs["ylabel"]) + + if "title" in kwargs.keys(): + title = kwargs["title"] + else: + title = "plot" + ax.set_title(title) + # ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)') + if (legend != "") and (legend != None): + ax.legend(loc="best", fontsize=legend_size) + if "save" in kwargs.keys(): + if kwargs["save"]: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + # fp = kwargs['path'] + '%s'%( title ) + CurTime + '.png' + fp = kwargs["path"] + "%s" % (title) + ".png" + plt.savefig(fp, dpi=fig.dpi) + if return_fig: + return fig + + +### + + +def check_shutter_open(data_series, min_inten=0, time_edge=[0, 10], plot_=False, *argv, **kwargs): + """Check the first frame with shutter open + + Parameters + ---------- + data_series: a image series + min_inten: the total intensity lower than min_inten is defined as shtter close + time_edge: the searching frame number range + + return: + shutter_open_frame: a integer, the first frame number with open shutter + + Usuage: + good_start = check_shutter_open( imgsa, min_inten=5, time_edge = [0,20], plot_ = False ) + + """ + imgsum = np.array([np.sum(img) for img in data_series[time_edge[0] : time_edge[1] : 1]]) + if plot_: + fig, ax = plt.subplots() + ax.plot(imgsum, "bo") + ax.set_title("uid=%s--imgsum" % uid) + ax.set_xlabel("Frame") + ax.set_ylabel("Total_Intensity") + # plt.show() + shutter_open_frame = np.where(np.array(imgsum) > min_inten)[0][0] + print("The first frame with open shutter is : %s" % shutter_open_frame) + return shutter_open_frame + + +def get_each_frame_intensity( + data_series, sampling=50, bad_pixel_threshold=1e10, plot_=False, save=False, *argv, **kwargs +): + """Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + """ + + # print ( argv, kwargs ) + imgsum = np.array([np.sum(img) for img in tqdm(data_series[::sampling], leave=True)]) + if plot_: + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + fig, ax = plt.subplots() + ax.plot(imgsum, "bo") + ax.set_title("uid= %s--imgsum" % uid) + ax.set_xlabel("Frame_bin_%s" % sampling) + ax.set_ylabel("Total_Intensity") + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "Uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() + + bad_frame_list = np.where(np.array(imgsum) > bad_pixel_threshold)[0] + if len(bad_frame_list): + print("Bad frame list are: %s" % bad_frame_list) + else: + print("No bad frames are involved.") + return imgsum, bad_frame_list + + +def create_time_slice(N, slice_num, slice_width, edges=None): + """create a ROI time regions""" + if edges != None: + time_edge = edges + else: + if slice_num == 1: + time_edge = [[0, N]] + else: + tstep = N // slice_num + te = np.arange(0, slice_num + 1) * tstep + tc = np.int_((te[:-1] + te[1:]) / 2)[1:-1] + if slice_width % 2: + sw = slice_width // 2 + 1 + time_edge = ( + [ + [0, slice_width], + ] + + [[s - sw + 1, s + sw] for s in tc] + + [[N - slice_width, N]] + ) + else: + sw = slice_width // 2 + time_edge = ( + [ + [0, slice_width], + ] + + [[s - sw, s + sw] for s in tc] + + [[N - slice_width, N]] + ) + + return np.array(time_edge) + + +def show_label_array(ax, label_array, cmap=None, aspect=None, interpolation="nearest", **kwargs): + """ + YG. Sep 26, 2017 + Modified show_label_array(ax, label_array, cmap=None, **kwargs) + from https://github.com/Nikea/xray-vision/blob/master/xray_vision/mpl_plotting/roi.py + Display a labeled array nicely + Additional kwargs are passed through to `ax.imshow`. + If `vmin` is in kwargs, it is clipped to minimum of 0.5. + Parameters + ---------- + ax : Axes + The `Axes` object to add the artist too + label_array: ndarray + Expected to be an unsigned integer array. 0 is background, + positive integers label region of interest + cmap : str or colormap, optional + Color map to use, defaults to 'Paired' + Returns + ------- + img : AxesImage + The artist added to the axes + """ + if cmap == None: + cmap = "viridis" + # print(cmap) + _cmap = copy.copy((mcm.get_cmap(cmap))) + _cmap.set_under("w", 0) + vmin = max(0.5, kwargs.pop("vmin", 0.5)) + im = ax.imshow(label_array, cmap=cmap, interpolation=interpolation, vmin=vmin, **kwargs) + if aspect == None: + ax.set_aspect(aspect="auto") + # ax.set_aspect('equal') + return im + + +def show_label_array_on_image( + ax, + image, + label_array, + cmap=None, + norm=None, + log_img=True, + alpha=0.3, + vmin=0.1, + vmax=5, + imshow_cmap="gray", + **kwargs, +): # norm=LogNorm(), + """ + This will plot the required ROI's(labeled array) on the image + + Additional kwargs are passed through to `ax.imshow`. + If `vmin` is in kwargs, it is clipped to minimum of 0.5. + Parameters + ---------- + ax : Axes + The `Axes` object to add the artist too + image : array + The image array + label_array : array + Expected to be an unsigned integer array. 0 is background, + positive integers label region of interest + cmap : str or colormap, optional + Color map to use for plotting the label_array, defaults to 'None' + imshow_cmap : str or colormap, optional + Color map to use for plotting the image, defaults to 'gray' + norm : str, optional + Normalize scale data, defaults to 'Lognorm()' + Returns + ------- + im : AxesImage + The artist added to the axes + im_label : AxesImage + The artist added to the axes + """ + ax.set_aspect("equal") + + # print (vmin, vmax ) + if log_img: + im = ax.imshow( + image, cmap=imshow_cmap, interpolation="none", norm=LogNorm(vmin, vmax), **kwargs + ) # norm=norm, + else: + im = ax.imshow(image, cmap=imshow_cmap, interpolation="none", vmin=vmin, vmax=vmax, **kwargs) # norm=norm, + + im_label = mpl_plot.show_label_array( + ax, label_array, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha, **kwargs + ) # norm=norm, + + return im, im_label + + +def show_ROI_on_image( + image, + ROI, + center=None, + rwidth=400, + alpha=0.3, + label_on=True, + save=False, + return_fig=False, + rect_reqion=None, + log_img=True, + vmin=0.01, + vmax=5, + show_ang_cor=False, + cmap=cmap_albula, + fig_ax=None, + uid="uid", + path="", + aspect=1, + show_colorbar=True, + show_roi_edge=False, + *argv, + **kwargs, +): + """show ROI on an image + image: the data frame + ROI: the interested region + center: the plot center + rwidth: the plot range around the center + + """ + + if RUN_GUI: + fig = Figure(figsize=(8, 8)) + axes = fig.add_subplot(111) + elif fig_ax != None: + fig, axes = fig_ax + else: + fig, axes = plt.subplots() # plt.subplots(figsize=(8,8)) + + # print( vmin, vmax) + # norm=LogNorm(vmin, vmax) + + axes.set_title("%s_ROI_on_Image" % uid) + if log_img: + if vmin == 0: + vmin += 1e-10 + + vmax = max(1, vmax) + if not show_roi_edge: + # print('here') + im, im_label = show_label_array_on_image( + axes, + image, + ROI, + imshow_cmap="viridis", + cmap=cmap, + alpha=alpha, + log_img=log_img, + vmin=vmin, + vmax=vmax, + origin="lower", + ) + else: + edg = get_image_edge(ROI) + image_ = get_image_with_roi(image, ROI, scale_factor=2) + # fig, axes = plt.subplots( ) + show_img( + image_, + ax=[fig, axes], + vmin=vmin, + vmax=vmax, + logs=log_img, + image_name="%s_ROI_on_Image" % uid, + cmap=cmap, + ) + + if rect_reqion == None: + if center != None: + x1, x2 = [center[1] - rwidth, center[1] + rwidth] + y1, y2 = [center[0] - rwidth, center[0] + rwidth] + axes.set_xlim([x1, x2]) + axes.set_ylim([y1, y2]) + else: + x1, x2, y1, y2 = rect_reqion + axes.set_xlim([x1, x2]) + axes.set_ylim([y1, y2]) + + if label_on: + num_qzr = len(np.unique(ROI)) - 1 + for i in range(1, num_qzr + 1): + ind = np.where(ROI == i)[1] + indz = np.where(ROI == i)[0] + c = "%i" % i + y_val = int(indz.mean()) + x_val = int(ind.mean()) + # print (xval, y) + axes.text(x_val, y_val, c, color="b", va="center", ha="center") + if show_ang_cor: + axes.text(-0.0, 0.5, "-/+180" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(1.0, 0.5, "0" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(0.5, -0.0, "-90" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(0.5, 1.0, "90" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + + axes.set_aspect(aspect) + # fig.colorbar(im_label) + if show_colorbar: + if not show_roi_edge: + fig.colorbar(im) + if save: + fp = path + "%s_ROI_on_Image" % uid + ".png" + plt.savefig(fp, dpi=fig.dpi) + # plt.show() + if return_fig: + return fig, axes, im + + +def crop_image(image, crop_mask): + """Crop the non_zeros pixels of an image to a new image""" + from skimage.util import crop, pad + + pxlst = np.where(crop_mask.ravel())[0] + dims = crop_mask.shape + imgwidthy = dims[1] # dimension in y, but in plot being x + imgwidthx = dims[0] # dimension in x, but in plot being y + # x and y are flipped??? + # matrix notation!!! + pixely = pxlst % imgwidthy + pixelx = pxlst // imgwidthy + + minpixelx = np.min(pixelx) + minpixely = np.min(pixely) + maxpixelx = np.max(pixelx) + maxpixely = np.max(pixely) + crops = crop_mask * image + img_crop = crop(crops, ((minpixelx, imgwidthx - maxpixelx - 1), (minpixely, imgwidthy - maxpixely - 1))) + return img_crop + + +def get_avg_img(data_series, img_samp_index=None, sampling=100, plot_=False, save=False, *argv, **kwargs): + """Get average imagef from a data_series by every sampling number to save time""" + if img_samp_index == None: + avg_img = np.average(data_series[::sampling], axis=0) + else: + avg_img = np.zeros_like(data_series[0]) + n = 0 + for i in img_samp_index: + avg_img += data_series[i] + n += 1 + avg_img = np.array(avg_img) / n + + if plot_: + fig, ax = plt.subplots() + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + + im = ax.imshow(avg_img, cmap="viridis", origin="lower", norm=LogNorm(vmin=0.001, vmax=1e2)) + # ax.set_title("Masked Averaged Image") + ax.set_title("uid= %s--Masked Averaged Image" % uid) + fig.colorbar(im) + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() + + return avg_img + + +def check_ROI_intensity(avg_img, ring_mask, ring_number=3, save=False, plot=True, *argv, **kwargs): + """plot intensity versus pixel of a ring + Parameters + ---------- + avg_img: 2D-array, the image + ring_mask: 2D-array + ring_number: which ring to plot + + Returns + ------- + + + """ + # print('here') + + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + pixel = roi.roi_pixel_values(avg_img, ring_mask, [ring_number]) + + if plot: + fig, ax = plt.subplots() + ax.set_title("%s--check-RIO-%s-intensity" % (uid, ring_number)) + ax.plot(pixel[0][0], "bo", ls="-") + ax.set_ylabel("Intensity") + ax.set_xlabel("pixel") + if save: + path = kwargs["path"] + fp = path + "%s_Mean_intensity_of_one_ROI" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + if save: + path = kwargs["path"] + save_lists( + [range(len(pixel[0][0])), pixel[0][0]], + label=["pixel_list", "roi_intensity"], + filename="%s_Mean_intensity_of_one_ROI" % uid, + path=path, + ) + # plt.show() + return pixel[0][0] + + +# from tqdm import tqdm + + +def cal_g2(image_series, ring_mask, bad_image_process, bad_frame_list=None, good_start=0, num_buf=8, num_lev=None): + """calculation g2 by using a multi-tau algorithm""" + + noframes = len(image_series) # number of frames, not "no frames" + # num_buf = 8 # number of buffers + + if bad_image_process: + import skbeam.core.mask as mask_image + + bad_img_list = np.array(bad_frame_list) - good_start + new_imgs = mask_image.bad_to_nan_gen(image_series, bad_img_list) + + if num_lev == None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + print("%s frames will be processed..." % (noframes)) + print("Bad Frames involved!") + + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(new_imgs)) + print("G2 calculation DONE!") + + else: + + if num_lev == None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + print("%s frames will be processed..." % (noframes)) + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(image_series)) + print("G2 calculation DONE!") + + return g2, lag_steps + + +def run_time(t0): + """Calculate running time of a program + Parameters + ---------- + t0: time_string, t0=time.time() + The start time + Returns + ------- + Print the running time + + One usage + --------- + t0=time.time() + .....(the running code) + run_time(t0) + """ + + elapsed_time = time.time() - t0 + if elapsed_time < 60: + print("Total time: %.3f sec" % (elapsed_time)) + else: + print("Total time: %.3f min" % (elapsed_time / 60.0)) + + +def trans_data_to_pd(data, label=None, dtype="array"): + """ + convert data into pandas.DataFrame + Input: + data: list or np.array + label: the coloum label of the data + dtype: list or array [[NOT WORK or dict (for dict only save the scalar not arrays values)]] + Output: + a pandas.DataFrame + """ + # lists a [ list1, list2...] all the list have the same length + import sys + + import pandas as pd + from numpy import arange, array + + if dtype == "list": + data = array(data).T + N, M = data.shape + elif dtype == "array": + data = array(data) + N, M = data.shape + else: + print("Wrong data type! Now only support 'list' and 'array' tpye") + + index = arange(N) + if label == None: + label = ["data%s" % i for i in range(M)] + # print label + df = pd.DataFrame(data, index=index, columns=label) + return df + + +def save_lists(data, label=None, filename=None, path=None, return_res=False, verbose=False): + """ + save_lists( data, label=None, filename=None, path=None) + + save lists to a CSV file with filename in path + Parameters + ---------- + data: list + label: the column name, the length should be equal to the column number of list + filename: the filename to be saved + path: the filepath to be saved + + Example: + save_arrays( [q,iq], label= ['q_A-1', 'Iq'], filename='uid=%s-q-Iq'%uid, path= data_dir ) + """ + + M, N = len(data[0]), len(data) + d = np.zeros([N, M]) + for i in range(N): + d[i] = data[i] + + df = trans_data_to_pd(d.T, label, "array") + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + if filename == None: + filename = "data" + filename = os.path.join(path, filename) # +'.csv') + df.to_csv(filename) + if verbose: + print("The data was saved in: %s." % filename) + if return_res: + return df + + +def get_pos_val_overlap(p1, v1, p2, v2, Nl): + """get the overlap of v1 and v2 + p1: the index of array1 in array with total length as Nl + v1: the corresponding value of p1 + p2: the index of array2 in array with total length as Nl + v2: the corresponding value of p2 + Return: + The values in v1 with the position in overlap of p1 and p2 + The values in v2 with the position in overlap of p1 and p2 + + An example: + Nl =10 + p1= np.array( [1,3,4,6,8] ) + v1 = np.array( [10,20,30,40,50]) + p2= np.array( [ 0,2,3,5,7,8]) + v2=np.array( [10,20,30,40,50,60,70]) + + get_pos_val_overlap( p1, v1, p2,v2, Nl) + + """ + ind = np.zeros(Nl, dtype=np.int32) + ind[p1] = np.arange(len(p1)) + 1 + w2 = np.where(ind[p2])[0] + w1 = ind[p2[w2]] - 1 + return v1[w1], v2[w2] + + +def save_arrays(data, label=None, dtype="array", filename=None, path=None, return_res=False, verbose=False): + """ + July 10, 2016, Y.G.@CHX + save_arrays( data, label=None, dtype='array', filename=None, path=None): + save data to a CSV file with filename in path + Parameters + ---------- + data: arrays + label: the column name, the length should be equal to the column number of data + dtype: array or list + filename: the filename to be saved + path: the filepath to be saved + + Example: + + save_arrays( qiq, label= ['q_A-1', 'Iq'], dtype='array', filename='uid=%s-q-Iq'%uid, path= data_dir ) + + + """ + df = trans_data_to_pd(data, label, dtype) + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + if filename == None: + filename = "data" + filename_ = os.path.join(path, filename) # +'.csv') + df.to_csv(filename_) + if verbose: + print("The file: %s is saved in %s" % (filename, path)) + # print( 'The g2 of uid= %s is saved in %s with filename as g2-%s-%s.csv'%(uid, path, uid, CurTime)) + if return_res: + return df + + +def cal_particle_g2(radius, viscosity, qr, taus, beta=0.2, T=298): + """YG Dev Nov 20, 2017@CHX + calculate particle g2 fucntion by giving particle radius, Q , and solution viscosity using a simple + exponetional model + Input: + radius: m + qr, list, in A-1 + visocity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) + T: temperture, in K + e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: + 1.38064852*10**(-123)*298 / ( 6*np.pi * 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10**5 A2/s + taus: time + beta: contrast + + cal_particle_g2( radius=125 *10**(-9), qr=[0.01,0.015], viscosity= 8.9*1e-4) + + """ + D0 = get_diffusion_coefficient(viscosity, radius, T=T) + g2_q1 = np.zeros(len(qr), dtype=object) + for i, q1 in enumerate(qr): + relaxation_rate = D0 * q1**2 + g2_q1[i] = simple_exponential(taus, beta=beta, relaxation_rate=relaxation_rate, baseline=1) + return g2_q1 + + +def get_Reynolds_number(flow_rate, flow_radius, fluid_density, fluid_viscosity): + """May 10, 2019, Y.G.@CHX + get Reynolds_number , the ratio of the inertial to viscous forces, V*Dia*density/eta + Reynolds_number << 1000 gives a laminar flow + flow_rate: ul/s + flow_radius: mm + fluid_density: Kg/m^3 ( for water, 1000 Kg/m^3 = 1 g/cm^3 ) + fliud_viscosity: N*s/m^2 ( Kg /(s*m) ) + + return Reynolds_number + """ + return flow_rate * 1e-6 * flow_radius * 1e-3 * 2 * fluid_density / fluid_viscosity + + +def get_Deborah_number(flow_rate, beam_size, q_vector, diffusion_coefficient): + """May 10, 2019, Y.G.@CHX + get Deborah_number, the ratio of transit time to diffusion time, (V/beam_size)/ ( D*q^2) + flow_rate: ul/s + beam_size: ul + q_vector: A-1 + diffusion_coefficient: A^2/s + + return Deborah_number + """ + return (flow_rate / beam_size) / (diffusion_coefficient * q_vector**2) + + +def get_viscosity(diffusion_coefficient, radius, T=298): + """May 10, 2019, Y.G.@CHX + get visocity of a Brownian motion particle with radius in fuild with diffusion_coefficient + diffusion_coefficient in unit of A^2/s + radius: m + T: K + k: 1.38064852(79)*10**(−23) J/T, Boltzmann constant + + return visosity: N*s/m^2 (water at 25K = 8.9*10**(-4) ) + """ + + k = 1.38064852 * 10 ** (-23) + return k * T / (6 * np.pi * diffusion_coefficient * radius) * 10**20 + + +def get_diffusion_coefficient(viscosity, radius, T=298): + """July 10, 2016, Y.G.@CHX + get diffusion_coefficient of a Brownian motion particle with radius in fuild with visocity + viscosity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) + radius: m + T: K + k: 1.38064852(79)×10−23 J/T, Boltzmann constant + + return diffusion_coefficient in unit of A^2/s + e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: + 1.38064852*10**(−23) *298 / ( 6*np.pi* 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10^5 A2/s + + get_diffusion_coefficient( 0.20871, 250 *10**(-9), T=298) + + """ + + k = 1.38064852 * 10 ** (-23) + return k * T / (6 * np.pi * viscosity * radius) * 10**20 + + +def ring_edges(inner_radius, width, spacing=0, num_rings=None): + """ + Aug 02, 2016, Y.G.@CHX + ring_edges(inner_radius, width, spacing=0, num_rings=None) + + Calculate the inner and outer radius of a set of rings. + + The number of rings, their widths, and any spacing between rings can be + specified. They can be uniform or varied. + + LW 04/02/2024: fixed checking whether width and spacing are iterable + + Parameters + ---------- + inner_radius : float + inner radius of the inner-most ring + + width : float or list of floats + ring thickness + If a float, all rings will have the same thickness. + + spacing : float or list of floats, optional + margin between rings, 0 by default + If a float, all rings will have the same spacing. If a list, + the length of the list must be one less than the number of + rings. + + num_rings : int, optional + number of rings + Required if width and spacing are not lists and number + cannot thereby be inferred. If it is given and can also be + inferred, input is checked for consistency. + + Returns + ------- + edges : array + inner and outer radius for each ring + + Example + ------- + # Make two rings starting at r=1px, each 5px wide + >>> ring_edges(inner_radius=1, width=5, num_rings=2) + [(1, 6), (6, 11)] + # Make three rings of different widths and spacings. + # Since the width and spacings are given individually, the number of + # rings here is simply inferred. + >>> ring_edges(inner_radius=1, width=(5, 4, 3), spacing=(1, 2)) + [(1, 6), (7, 11), (13, 16)] + + """ + # All of this input validation merely checks that width, spacing, and + # num_rings are self-consistent and complete. + try: + iter(width) + width_is_list = True + except: + width_is_list = False + try: + iter(spacing) + spacing_is_list = True + except: + spacing_is_list = False + + # width_is_list = isinstance(width, collections.Iterable) + # spacing_is_list = isinstance(spacing, collections.Iterable) + if width_is_list and spacing_is_list: + if len(width) != len(spacing) + 1: + raise ValueError("List of spacings must be one less than list " "of widths.") + if num_rings == None: + try: + num_rings = len(width) + except TypeError: + try: + num_rings = len(spacing) + 1 + except TypeError: + raise ValueError( + "Since width and spacing are constant, " + "num_rings cannot be inferred and must be " + "specified." + ) + else: + if width_is_list: + if num_rings != len(width): + raise ValueError("num_rings does not match width list") + if spacing_is_list: + if num_rings - 1 != len(spacing): + raise ValueError("num_rings does not match spacing list") + # Now regularlize the input. + if not width_is_list: + width = np.ones(num_rings) * width + + if spacing == None: + spacing = [] + else: + if not spacing_is_list: + spacing = np.ones(num_rings - 1) * spacing + # The inner radius is the first "spacing." + all_spacings = np.insert(spacing, 0, inner_radius) + steps = np.array([all_spacings, width]).T.ravel() + edges = np.cumsum(steps).reshape(-1, 2) + return edges + + +def get_non_uniform_edges( + centers, + width=4, + number_rings=1, + spacing=0, +): + """ + YG CHX Spe 6 + get_non_uniform_edges( centers, width = 4, number_rings=3 ) + + Calculate the inner and outer radius of a set of non uniform distributed + rings by giving ring centers + For each center, there are number_rings with each of width + + LW 04/02/2024: fixed checking whether 'width' is iterable + + Parameters + ---------- + centers : float + the center of the rings + + width : float or list of floats + ring thickness + If a float, all rings will have the same thickness. + + num_rings : int, optional + number of rings + Required if width and spacing are not lists and number + cannot thereby be inferred. If it is given and can also be + inferred, input is checked for consistency. + + Returns + ------- + edges : array + inner and outer radius for each ring + """ + + if number_rings == None: + number_rings = 1 + edges = np.zeros([len(centers) * number_rings, 2]) + + try: + iter(width) + except: + width = np.ones_like(centers) * width + for i, c in enumerate(centers): + edges[i * number_rings : (i + 1) * number_rings, :] = ring_edges( + inner_radius=c - width[i] * number_rings / 2, width=width[i], spacing=spacing, num_rings=number_rings + ) + return edges + + +def trans_tf_to_td(tf, dtype="dframe"): + """July 02, 2015, Y.G.@CHX + Translate epoch time to string + """ + from datetime import datetime + + import numpy as np + import pandas as pd + + """translate time.float to time.date, + td.type dframe: a dataframe + td.type list, a list + """ + if dtype == "dframe": + ind = tf.index + else: + ind = range(len(tf)) + td = np.array([datetime.fromtimestamp(tf[i]) for i in ind]) + return td + + +def trans_td_to_tf(td, dtype="dframe"): + """July 02, 2015, Y.G.@CHX + Translate string to epoch time + + """ + import time + + import numpy as np + + """translate time.date to time.float, + td.type dframe: a dataframe + td.type list, a list + """ + if dtype == "dframe": + ind = td.index + else: + ind = range(len(td)) + # tf = np.array([ time.mktime(td[i].timetuple()) for i in range(len(td)) ]) + tf = np.array([time.mktime(td[i].timetuple()) for i in ind]) + return tf + + +def get_averaged_data_from_multi_res( + multi_res, keystr="g2", different_length=True, verbose=False, cal_errorbar=False +): + """Y.G. Dec 22, 2016 + get average data from multi-run analysis result + Parameters: + multi_res: dict, generated by function run_xpcs_xsvs_single + each key is a uid, inside each uid are also dict with key as 'g2','g4' et.al. + keystr: string, get the averaged keystr + different_length: if True, do careful average for different length results + return: + array, averaged results + + """ + maxM = 0 + mkeys = multi_res.keys() + if not different_length: + n = 0 + for i, key in enumerate(list(mkeys)): + keystri = multi_res[key][keystr] + if i == 0: + keystr_average = keystri + else: + keystr_average += keystri + n += 1 + keystr_average /= n + + else: + length_dict = {} + D = 1 + for i, key in enumerate(list(mkeys)): + if verbose: + print(i, key) + shapes = multi_res[key][keystr].shape + M = shapes[0] + if i == 0: + if len(shapes) == 2: + D = 2 + maxN = shapes[1] + elif len(shapes) == 3: + D = 3 + maxN = shapes[2] # in case of two-time correlation + if (M) not in length_dict: + length_dict[(M)] = 1 + else: + length_dict[(M)] += 1 + maxM = max(maxM, M) + # print( length_dict ) + avg_count = {} + sk = np.array(sorted(length_dict)) + for i, k in enumerate(sk): + avg_count[k] = np.sum(np.array([length_dict[k] for k in sk[i:]])) + # print(length_dict, avg_count) + if D == 2: + # print('here') + keystr_average = np.zeros([maxM, maxN]) + elif D == 3: + keystr_average = np.zeros([maxM, maxM, maxN]) + else: + keystr_average = np.zeros([maxM]) + for i, key in enumerate(list(mkeys)): + keystri = multi_res[key][keystr] + Mi = keystri.shape[0] + if D != 3: + keystr_average[:Mi] += keystri + else: + keystr_average[:Mi, :Mi, :] += keystri + if D != 3: + keystr_average[: sk[0]] /= avg_count[sk[0]] + else: + keystr_average[: sk[0], : sk[0], :] /= avg_count[sk[0]] + for i in range(0, len(sk) - 1): + if D != 3: + keystr_average[sk[i] : sk[i + 1]] /= avg_count[sk[i + 1]] + else: + keystr_average[sk[i] : sk[i + 1], sk[i] : sk[i + 1], :] /= avg_count[sk[i + 1]] + + return keystr_average + + +def save_g2_general(g2, taus, qr=None, qz=None, uid="uid", path=None, return_res=False): + """Y.G. Dec 29, 2016 + + save g2 results, + res_pargs should contain + g2: one-time correlation function + taus, lags of g2 + qr: the qr center, same length as g2 + qz: the qz or angle center, same length as g2 + path: + uid: + + """ + + df = DataFrame(np.hstack([(taus).reshape(len(g2), 1), g2])) + t, qs = g2.shape + if qr is None: + qr = range(qs) + if qz is None: + df.columns = ["tau"] + [str(qr_) for qr_ in qr] + else: + df.columns = ["tau"] + [str(qr_) + "_" + str(qz_) for (qr_, qz_) in zip(qr, qz)] + + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + + # if filename is None: + + filename = uid + # filename = 'uid=%s--g2.csv' % (uid) + # filename += '-uid=%s-%s.csv' % (uid,CurTime) + # filename += '-uid=%s.csv' % (uid) + filename1 = os.path.join(path, filename) + df.to_csv(filename1) + print("The correlation function is saved in %s with filename as %s" % (path, filename)) + if return_res: + return df + + +########### +# *for g2 fit and plot + + +def stretched_auto_corr_scat_factor(x, beta, relaxation_rate, alpha=1.0, baseline=1): + return beta * np.exp(-2 * (relaxation_rate * x) ** alpha) + baseline + + +def simple_exponential(x, beta, relaxation_rate, baseline=1): + """relation_rate: unit 1/s""" + return beta * np.exp(-2 * relaxation_rate * x) + baseline + + +def simple_exponential_with_vibration(x, beta, relaxation_rate, freq, amp, baseline=1): + return beta * (1 + amp * np.cos(2 * np.pi * freq * x)) * np.exp(-2 * relaxation_rate * x) + baseline + + +def stretched_auto_corr_scat_factor_with_vibration(x, beta, relaxation_rate, alpha, freq, amp, baseline=1): + return beta * (1 + amp * np.cos(2 * np.pi * freq * x)) * np.exp(-2 * (relaxation_rate * x) ** alpha) + baseline + + +def flow_para_function_with_vibration(x, beta, relaxation_rate, flow_velocity, freq, amp, baseline=1): + vibration_part = 1 + amp * np.cos(2 * np.pi * freq * x) + Diff_part = np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 + return beta * vibration_part * Diff_part * Flow_part + baseline + + +def flow_para_function(x, beta, relaxation_rate, flow_velocity, baseline=1): + """flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )""" + + Diff_part = np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 + return beta * Diff_part * Flow_part + baseline + + +def flow_para_function_explicitq(x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0): + """Nov 9, 2017 Basically, make q vector to (qr, angle), + ###relaxation_rate is actually a diffusion rate + flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) + Diffusion part: np.exp( -2*D q^2 *tau ) + q_ang: would be np.radians( ang - 90 ) + + """ + + Diff_part = np.exp(-2 * (diffusion * qr**2 * x) ** alpha) + if flow_velocity != 0: + if np.cos(q_ang) >= 1e-8: + Flow_part = ( + np.pi**2 + / (16 * x * flow_velocity * qr * abs(np.cos(q_ang))) + * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity * qr * abs(np.cos(q_ang))))) ** 2 + ) + else: + Flow_part = 1 + else: + Flow_part = 1 + return beta * Diff_part * Flow_part + baseline + + +def get_flow_velocity(average_velocity, shape_factor): + + return average_velocity * (1 - shape_factor) / (1 + shape_factor) + + +def stretched_flow_para_function(x, beta, relaxation_rate, alpha, flow_velocity, baseline=1): + """ + flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) + """ + Diff_part = np.exp(-2 * (relaxation_rate * x) ** alpha) + Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 + return beta * Diff_part * Flow_part + baseline + + +def get_g2_fit_general_two_steps( + g2, taus, function="simple_exponential", second_fit_range=[0, 20], sequential_fit=False, *argv, **kwargs +): + """ + Fit g2 in two steps, + i) Using the "function" to fit whole g2 to get baseline and beta (contrast) + ii) Then using the obtained baseline and beta to fit g2 in a "second_fit_range" by using simple_exponential function + """ + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general(g2, taus, function, sequential_fit, *argv, **kwargs) + guess_values = {} + for k in list(g2_fit_result[0].params.keys()): + guess_values[k] = np.array([g2_fit_result[i].params[k].value for i in range(g2.shape[1])]) + + if "guess_limits" in kwargs: + guess_limits = kwargs["guess_limits"] + else: + guess_limits = dict(baseline=[1, 1.8], alpha=[0, 2], beta=[0.0, 1], relaxation_rate=[0.001, 10000]) + + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( + g2, + taus, + function="simple_exponential", + sequential_fit=sequential_fit, + fit_range=second_fit_range, + fit_variables={"baseline": False, "beta": False, "alpha": False, "relaxation_rate": True}, + guess_values=guess_values, + guess_limits=guess_limits, + ) + + return g2_fit_result, taus_fit, g2_fit + + +def get_g2_fit_general( + g2, taus, function="simple_exponential", sequential_fit=False, qval_dict=None, ang_init=90, *argv, **kwargs +): + """ + Nov 9, 2017, give qval_dict for using function of flow_para_function_explicitq + qval_dict: a dict with qr and ang (in unit of degrees).") + + + Dec 29,2016, Y.G.@CHX + + Fit one-time correlation function + + The support functions include simple exponential and stretched/compressed exponential + Parameters + ---------- + g2: one-time correlation function for fit, with shape as [taus, qs] + taus: the time delay + sequential_fit: if True, will use the low-q fit result as initial value to fit the higher Qs + function: + supported function include: + 'simple_exponential' (or 'simple'): fit by a simple exponential function, defined as + beta * np.exp(-2 * relaxation_rate * lags) + baseline + 'streched_exponential'(or 'streched'): fit by a streched exponential function, defined as + beta * ( np.exp( -2 * ( relaxation_rate * tau )**alpha ) + baseline + 'stretched_vibration': fit by a streched exponential function with vibration, defined as + beta * (1 + amp*np.cos( 2*np.pi*60* x) )* np.exp(-2 * (relaxation_rate * x)**alpha) + baseline + 'flow_para_function' (or flow): fit by a flow function + + + kwargs: + could contains: + 'fit_variables': a dict, for vary or not, + keys are fitting para, including + beta, relaxation_rate , alpha ,baseline + values: a False or True, False for not vary + 'guess_values': a dict, for initial value of the fitting para, + the defalut values are + dict( beta=.1, alpha=1.0, relaxation_rate =0.005, baseline=1.0) + + 'guess_limits': a dict, for the limits of the fittting para, for example: + dict( beta=[0, 10],, alpha=[0,100] ) + the default is: + dict( baseline =[0.5, 2.5], alpha=[0, inf] ,beta = [0, 1], relaxation_rate= [0.0,1000] ) + Returns + ------- + fit resutls: a instance in limfit + tau_fit + fit_data by the model, it has the q number of g2 + + an example: + fit_g2_func = 'stretched' + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, + function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, + fit_variables={'baseline':True, 'beta':True, 'alpha':True,'relaxation_rate':True}, + guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,}) + + g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_g2_fit_paras.csv', path=data_dir ) + + + """ + + if "fit_range" in kwargs.keys(): + fit_range = kwargs["fit_range"] + else: + fit_range = None + + num_rings = g2.shape[1] + if "fit_variables" in kwargs: + additional_var = kwargs["fit_variables"] + _vars = [k for k in list(additional_var.keys()) if additional_var[k] == False] + else: + _vars = [] + if function == "simple_exponential" or function == "simple": + _vars = np.unique(_vars + ["alpha"]) + mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= list( _vars) ) + elif function == "stretched_exponential" or function == "stretched": + mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= _vars) + elif function == "stretched_vibration": + mod = Model(stretched_auto_corr_scat_factor_with_vibration) # , independent_vars= _vars) + elif function == "flow_para_function" or function == "flow_para": + mod = Model(flow_para_function) # , independent_vars= _vars) + elif function == "flow_para_function_explicitq" or function == "flow_para_qang": + mod = Model(flow_para_function_explicitq) # , independent_vars= _vars) + elif function == "flow_para_function_with_vibration" or function == "flow_vibration": + mod = Model(flow_para_function_with_vibration) + + else: + print( + "The %s is not supported.The supported functions include simple_exponential and stretched_exponential" + % function + ) + + mod.set_param_hint("baseline", min=0.5, max=2.5) + mod.set_param_hint("beta", min=0.0, max=1.0) + mod.set_param_hint("alpha", min=0.0) + mod.set_param_hint("relaxation_rate", min=0.0, max=1000) + mod.set_param_hint("flow_velocity", min=0) + mod.set_param_hint("diffusion", min=0.0, max=2e8) + + if "guess_limits" in kwargs: + guess_limits = kwargs["guess_limits"] + for k in list(guess_limits.keys()): + mod.set_param_hint(k, min=guess_limits[k][0], max=guess_limits[k][1]) + + if function == "flow_para_function" or function == "flow_para" or function == "flow_vibration": + mod.set_param_hint("flow_velocity", min=0) + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + mod.set_param_hint("flow_velocity", min=0) + mod.set_param_hint("diffusion", min=0.0, max=2e8) + if function == "stretched_vibration" or function == "flow_vibration": + mod.set_param_hint("freq", min=0) + mod.set_param_hint("amp", min=0) + + _guess_val = dict(beta=0.1, alpha=1.0, relaxation_rate=0.005, baseline=1.0) + if "guess_values" in kwargs: + guess_values = kwargs["guess_values"] + _guess_val.update(guess_values) + + _beta = _guess_val["beta"] + _alpha = _guess_val["alpha"] + _relaxation_rate = _guess_val["relaxation_rate"] + _baseline = _guess_val["baseline"] + if isinstance(_beta, (np.ndarray, list)): + _beta_ = _beta[0] + else: + _beta_ = _beta + if isinstance(_baseline, (np.ndarray, list)): + _baseline_ = _baseline[0] + else: + _baseline_ = _baseline + if isinstance(_relaxation_rate, (np.ndarray, list)): + _relaxation_rate_ = _relaxation_rate[0] + else: + _relaxation_rate_ = _relaxation_rate + if isinstance(_alpha, (np.ndarray, list)): + _alpha_ = _alpha[0] + else: + _alpha_ = _alpha + pars = mod.make_params(beta=_beta_, alpha=_alpha_, relaxation_rate=_relaxation_rate_, baseline=_baseline_) + + if function == "flow_para_function" or function == "flow_para": + _flow_velocity = _guess_val["flow_velocity"] + if isinstance(_flow_velocity, (np.ndarray, list)): + _flow_velocity_ = _flow_velocity[0] + else: + _flow_velocity_ = _flow_velocity + pars = mod.make_params( + beta=_beta_, + alpha=_alpha_, + flow_velocity=_flow_velocity_, + relaxation_rate=_relaxation_rate_, + baseline=_baseline_, + ) + + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + _flow_velocity = _guess_val["flow_velocity"] + _diffusion = _guess_val["diffusion"] + _guess_val["qr"] = 1 + _guess_val["q_ang"] = 0 + if isinstance(_flow_velocity, (np.ndarray, list)): + _flow_velocity_ = _flow_velocity[0] + else: + _flow_velocity_ = _flow_velocity + if isinstance(_diffusion, (np.ndarray, list)): + _diffusion_ = _diffusion[0] + else: + _diffusion_ = _diffusion + pars = mod.make_params( + beta=_beta_, + alpha=_alpha_, + flow_velocity=_flow_velocity_, + diffusion=_diffusion_, + baseline=_baseline_, + qr=1, + q_ang=0, + ) + + if function == "stretched_vibration": + _freq = _guess_val["freq"] + _amp = _guess_val["amp"] + pars = mod.make_params( + beta=_beta, alpha=_alpha, freq=_freq, amp=_amp, relaxation_rate=_relaxation_rate, baseline=_baseline + ) + + if function == "flow_vibration": + _flow_velocity = _guess_val["flow_velocity"] + _freq = _guess_val["freq"] + _amp = _guess_val["amp"] + pars = mod.make_params( + beta=_beta, + freq=_freq, + amp=_amp, + flow_velocity=_flow_velocity, + relaxation_rate=_relaxation_rate, + baseline=_baseline, + ) + for v in _vars: + pars["%s" % v].vary = False + # print( pars ) + fit_res = [] + model_data = [] + for i in range(num_rings): + if fit_range != None: + y_ = g2[1:, i][fit_range[0] : fit_range[1]] + lags_ = taus[1:][fit_range[0] : fit_range[1]] + else: + y_ = g2[1:, i] + lags_ = taus[1:] + + mm = ~np.isnan(y_) + y = y_[mm] + lags = lags_[mm] + # print( i, mm.shape, y.shape, y_.shape, lags.shape, lags_.shape ) + # y=y_ + # lags=lags_ + # print( _relaxation_rate ) + for k in list(pars.keys()): + # print(k, _guess_val[k] ) + try: + if isinstance(_guess_val[k], (np.ndarray, list)): + pars[k].value = _guess_val[k][i] + except: + pass + + if True: + if isinstance(_beta, (np.ndarray, list)): + # pars['beta'].value = _guess_val['beta'][i] + _beta_ = _guess_val["beta"][i] + if isinstance(_baseline, (np.ndarray, list)): + # pars['baseline'].value = _guess_val['baseline'][i] + _baseline_ = _guess_val["baseline"][i] + if isinstance(_relaxation_rate, (np.ndarray, list)): + # pars['relaxation_rate'].value = _guess_val['relaxation_rate'][i] + _relaxation_rate_ = _guess_val["relaxation_rate"][i] + if isinstance(_alpha, (np.ndarray, list)): + # pars['alpha'].value = _guess_val['alpha'][i] + _alpha_ = _guess_val["alpha"][i] + # for k in list(pars.keys()): + # print(k, _guess_val[k] ) + # pars[k].value = _guess_val[k][i] + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + if qval_dict == None: + print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") + else: + + pars = mod.make_params( + beta=_beta_, + alpha=_alpha_, + flow_velocity=_flow_velocity_, + diffusion=_diffusion_, + baseline=_baseline_, + qr=qval_dict[i][0], + q_ang=abs(np.radians(qval_dict[i][1] - ang_init)), + ) + + pars["qr"].vary = False + pars["q_ang"].vary = False + for v in _vars: + pars["%s" % v].vary = False + + # if i==20: + # print(pars) + # print( pars ) + result1 = mod.fit(y, pars, x=lags) + # print(qval_dict[i][0], qval_dict[i][1], y) + if sequential_fit: + for k in list(pars.keys()): + # print( pars ) + if k in list(result1.best_values.keys()): + pars[k].value = result1.best_values[k] + fit_res.append(result1) + # model_data.append( result1.best_fit ) + yf = result1.model.eval(params=result1.params, x=lags_) + model_data.append(yf) + return fit_res, lags_, np.array(model_data).T + + +def get_short_long_labels_from_qval_dict(qval_dict, geometry="saxs"): + """Y.G. 2016, Dec 26 + Get short/long labels from a qval_dict + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + geometry: + 'saxs': a saxs with Qr partition + 'ang_saxs': a saxs with Qr and angular partition + 'gi_saxs': gisaxs with Qz, Qr + """ + + Nqs = len(qval_dict.keys()) + len_qrz = len(list(qval_dict.values())[0]) + # qr_label = sorted( np.array( list( qval_dict.values() ) )[:,0] ) + qr_label = np.array(list(qval_dict.values()))[:, 0] + if geometry == "gi_saxs" or geometry == "ang_saxs": # or geometry=='gi_waxs': + if len_qrz < 2: + print("please give qz or qang for the q-label") + else: + # qz_label = sorted( np.array( list( qval_dict.values() ) )[:,1] ) + qz_label = np.array(list(qval_dict.values()))[:, 1] + else: + qz_label = np.array([0]) + + uqz_label = np.unique(qz_label) + num_qz = len(uqz_label) + + uqr_label = np.unique(qr_label) + num_qr = len(uqr_label) + + # print( uqr_label, uqz_label ) + if len(uqr_label) >= len(uqz_label): + master_plot = "qz" # one qz for many sub plots of each qr + else: + master_plot = "qr" + + mastp = master_plot + if geometry == "ang_saxs": + mastp = "ang" + num_short = min(num_qz, num_qr) + num_long = max(num_qz, num_qr) + + # print( mastp, num_short, num_long) + if num_qz != num_qr: + short_label = [qz_label, qr_label][np.argmin([num_qz, num_qr])] + long_label = [qz_label, qr_label][np.argmax([num_qz, num_qr])] + short_ulabel = [uqz_label, uqr_label][np.argmin([num_qz, num_qr])] + long_ulabel = [uqz_label, uqr_label][np.argmax([num_qz, num_qr])] + else: + short_label = qz_label + long_label = qr_label + short_ulabel = uqz_label + long_ulabel = uqr_label + # print( long_ulabel ) + # print( qz_label,qr_label ) + # print( short_label, long_label ) + + if geometry == "saxs" or geometry == "gi_waxs": + ind_long = [range(num_long)] + else: + ind_long = [np.where(short_label == i)[0] for i in short_ulabel] + + if Nqs == 1: + long_ulabel = list(qval_dict.values())[0] + long_label = list(qval_dict.values())[0] + return ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) + + +############################################ +# #a good func to plot g2 for all types of geogmetries +# ########################################### + + +def plot_g2_general( + g2_dict, + taus_dict, + qval_dict, + g2_err_dict=None, + fit_res=None, + geometry="saxs", + filename="g2", + path=None, + function="simple_exponential", + g2_labels=None, + fig_ysize=12, + qth_interest=None, + ylabel="g2", + return_fig=False, + append_name="", + outsize=(2000, 2400), + max_plotnum_fig=16, + figsize=(10, 12), + show_average_ang_saxs=True, + qphi_analysis=False, + fontsize_sublabel=12, + *argv, + **kwargs, +): + """ + Jan 10, 2018 add g2_err_dict option to plot g2 with error bar + Oct31, 2017 add qth_interest option + + Dec 26,2016, Y.G.@CHX + + Plot one/four-time correlation function (with fit) for different geometry + + The support functions include simple exponential and stretched/compressed exponential + Parameters + ---------- + g2_dict: dict, format as {1: g2_1, 2: g2_2, 3: g2_3...} one-time correlation function, g1,g2, g3,...must have the same shape + taus_dict, dict, format {1: tau_1, 2: tau_2, 3: tau_3...}, tau1,tau2, tau3,...must have the same shape + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + + fit_res: give all the fitting parameters for showing in the plot + qth_interest: if not None: should be a list, and will only plot the qth_interest qs + filename: for the title of plot + append_name: if not None, will save as filename + append_name as filename + path: the path to save data + outsize: for gi/ang_saxs, will combine all the different qz images together with outsize + function: + 'simple_exponential': fit by a simple exponential function, defined as + beta * np.exp(-2 * relaxation_rate * lags) + baseline + 'streched_exponential': fit by a streched exponential function, defined as + beta * (np.exp(-2 * relaxation_rate * lags))**alpha + baseline + geometry: + 'saxs': a saxs with Qr partition + 'ang_saxs': a saxs with Qr and angular partition + 'gi_saxs': gisaxs with Qz, Qr + + one_plot: if True, plot all images in one pannel + kwargs: + + Returns + ------- + None + + ToDoList: plot an average g2 for ang_saxs for each q + + """ + + if ylabel == "g2": + ylabel = "g_2" + if ylabel == "g4": + ylabel = "g_4" + + if geometry == "saxs": + if qphi_analysis: + geometry = "ang_saxs" + if qth_interest != None: + if not isinstance(qth_interest, list): + print("Please give a list for qth_interest") + else: + # g2_dict0, taus_dict0, qval_dict0, fit_res0= g2_dict, taus_dict, qval_dict, fit_res + qth_interest = np.array(qth_interest) - 1 + g2_dict_ = {} + # taus_dict_ = {} + for k in list(g2_dict.keys()): + g2_dict_[k] = g2_dict[k][:, [i for i in qth_interest]] + # for k in list(taus_dict.keys()): + # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] + taus_dict_ = taus_dict + qval_dict_ = {k: qval_dict[k] for k in qth_interest} + if fit_res != None: + fit_res_ = [fit_res[k] for k in qth_interest] + else: + fit_res_ = None + else: + g2_dict_, taus_dict_, qval_dict_, fit_res_ = g2_dict, taus_dict, qval_dict, fit_res + + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + fps = [] + + # $print( num_short, num_long ) + + for s_ind in range(num_short): + ind_long_i = ind_long[s_ind] + num_long_i = len(ind_long_i) + # if show_average_ang_saxs: + # if geometry=='ang_saxs': + # num_long_i += 1 + if RUN_GUI: + fig = Figure(figsize=(10, 12)) + else: + # fig = plt.figure( ) + if num_long_i <= 4: + if master_plot != "qz": + fig = plt.figure(figsize=(8, 6)) + else: + if num_short > 1: + fig = plt.figure(figsize=(8, 4)) + else: + fig = plt.figure(figsize=(10, 6)) + # print('Here') + elif num_long_i > max_plotnum_fig: + num_fig = int(np.ceil(num_long_i / max_plotnum_fig)) # num_long_i //16 + fig = [plt.figure(figsize=figsize) for i in range(num_fig)] + # print( figsize ) + else: + # print('Here') + if master_plot != "qz": + fig = plt.figure(figsize=figsize) + else: + fig = plt.figure(figsize=(10, 10)) + + if master_plot == "qz": + if geometry == "ang_saxs": + title_short = "Angle= %.2f" % (short_ulabel[s_ind]) + r"$^\circ$" + elif geometry == "gi_saxs": + title_short = r"$Q_z= $" + "%.4f" % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + else: + title_short = "" + else: # qr + if geometry == "ang_saxs" or geometry == "gi_saxs": + title_short = r"$Q_r= $" + "%.5f " % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + else: + title_short = "" + # print(geometry) + # filename ='' + til = "%s:--->%s" % (filename, title_short) + if num_long_i <= 4: + plt.title(til, fontsize=14, y=1.15) + # plt.title( til,fontsize=20, y =1.06) + # print('here') + else: + plt.title(til, fontsize=20, y=1.06) + # print( num_long ) + if num_long != 1: + # print( 'here') + plt.axis("off") + # sy = min(num_long_i,4) + sy = min(num_long_i, int(np.ceil(min(max_plotnum_fig, num_long_i) / 4))) + # fig.set_size_inches(10, 12) + # fig.set_size_inches(10, fig_ysize ) + else: + sy = 1 + # fig.set_size_inches(8,6) + # plt.axis('off') + sx = min(4, int(np.ceil(min(max_plotnum_fig, num_long_i) / float(sy)))) + + temp = sy + sy = sx + sx = temp + + # print( num_long_i, sx, sy ) + # print( master_plot ) + # print(ind_long_i, len(ind_long_i) ) + + for i, l_ind in enumerate(ind_long_i): + if num_long_i <= max_plotnum_fig: + # if s_ind ==2: + # print('Here') + # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) + ax = fig.add_subplot(sx, sy, i + 1) + if sx == 1: + if sy == 1: + plt.axis("on") + else: + # fig_subnum = l_ind//max_plotnum_fig + # ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) + fig_subnum = i // max_plotnum_fig + # print( i, sx,sy, fig_subnum, max_plotnum_fig, i + 1 - fig_subnum*max_plotnum_fig ) + ax = fig[fig_subnum].add_subplot(sx, sy, i + 1 - fig_subnum * max_plotnum_fig) + + ax.set_ylabel(r"$%s$" % ylabel + "(" + r"$\tau$" + ")") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + if master_plot == "qz" or master_plot == "angle": + if geometry != "gi_waxs": + title_long = r"$Q_r= $" + "%.5f " % (long_label[l_ind]) + r"$\AA^{-1}$" + else: + title_long = r"$Q_r= $" + "%i " % (long_label[l_ind]) + # print( title_long,long_label,l_ind ) + else: + if geometry == "ang_saxs": + # title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) + r'$^\circ$' + '( %d )'%(l_ind) + title_long = "Ang= " + "%.2f" % (long_label[l_ind]) # + r'$^\circ$' + '( %d )'%(l_ind) + elif geometry == "gi_saxs": + title_long = r"$Q_z= $" + "%.5f " % (long_label[l_ind]) + r"$\AA^{-1}$" + else: + title_long = "" + # print( master_plot ) + if master_plot != "qz": + ax.set_title(title_long + " (%s )" % (1 + l_ind), y=1.1, fontsize=12) + else: + ax.set_title(title_long + " (%s )" % (1 + l_ind), y=1.05, fontsize=fontsize_sublabel) + # print( geometry ) + # print( title_long ) + if qth_interest != None: # it might have a bug here, todolist!!! + lab = sorted(list(qval_dict_.keys())) + # print( lab, l_ind) + ax.set_title(title_long + " (%s )" % (lab[l_ind] + 1), y=1.05, fontsize=12) + for ki, k in enumerate(list(g2_dict_.keys())): + if ki == 0: + c = "b" + if fit_res == None: + m = "-o" + else: + m = "o" + elif ki == 1: + c = "r" + if fit_res == None: + m = "s" + else: + m = "-" + elif ki == 2: + c = "g" + m = "-D" + else: + c = colors[ki + 2] + m = "-%s" % markers[ki + 2] + try: + dumy = g2_dict_[k].shape + # print( 'here is the shape' ) + islist = False + except: + islist_n = len(g2_dict_[k]) + islist = True + # print( 'here is the list' ) + if islist: + for nlst in range(islist_n): + m = "-%s" % markers[nlst] + # print(m) + y = g2_dict_[k][nlst][:, l_ind] + x = taus_dict_[k][nlst] + if ki == 0: + ymin, ymax = min(y), max(y[1:]) + if g2_err_dict == None: + if g2_labels == None: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + # print('here ki ={} nlst = {}'.format( ki, nlst )) + if nlst == 0: + ax.semilogx(x, y, m, color=c, markersize=6, label=g2_labels[ki]) + else: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + yerr = g2_err_dict[k][nlst][:, l_ind] + if g2_labels == None: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) + else: + if nlst == 0: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6, label=g2_labels[ki]) + else: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) + ax.set_xscale("log", nonposx="clip") + if nlst == 0: + if l_ind == 0: + ax.legend(loc="best", fontsize=8, fancybox=True, framealpha=0.5) + + else: + y = g2_dict_[k][:, l_ind] + x = taus_dict_[k] + if ki == 0: + ymin, ymax = min(y), max(y[1:]) + if g2_err_dict == None: + if g2_labels == None: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + ax.semilogx(x, y, m, color=c, markersize=6, label=g2_labels[ki]) + else: + yerr = g2_err_dict[k][:, l_ind] + # print(x.shape, y.shape, yerr.shape) + # print(yerr) + if g2_labels == None: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) + else: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6, label=g2_labels[ki]) + ax.set_xscale("log", nonposx="clip") + if l_ind == 0: + ax.legend(loc="best", fontsize=8, fancybox=True, framealpha=0.5) + + if fit_res_ != None: + result1 = fit_res_[l_ind] + # print (result1.best_values) + + beta = result1.best_values["beta"] + baseline = result1.best_values["baseline"] + if function == "simple_exponential" or function == "simple": + rate = result1.best_values["relaxation_rate"] + alpha = 1.0 + elif function == "stretched_exponential" or function == "stretched": + rate = result1.best_values["relaxation_rate"] + alpha = result1.best_values["alpha"] + elif function == "stretched_vibration": + rate = result1.best_values["relaxation_rate"] + alpha = result1.best_values["alpha"] + freq = result1.best_values["freq"] + elif function == "flow_vibration": + rate = result1.best_values["relaxation_rate"] + freq = result1.best_values["freq"] + if function == "flow_para_function" or function == "flow_para" or function == "flow_vibration": + rate = result1.best_values["relaxation_rate"] + flow = result1.best_values["flow_velocity"] + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + diff = result1.best_values["diffusion"] + qrr = short_ulabel[s_ind] + # print(qrr) + rate = diff * qrr**2 + flow = result1.best_values["flow_velocity"] + if qval_dict_ == None: + print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") + else: + pass + + if rate != 0: + txts = r"$\tau_0$" + r"$ = %.3f$" % (1 / rate) + r"$ s$" + else: + txts = r"$\tau_0$" + r"$ = inf$" + r"$ s$" + x = 0.25 + y0 = 0.9 + fontsize = 12 + ax.text(x=x, y=y0, s=txts, fontsize=fontsize, transform=ax.transAxes) + # print(function) + dt = 0 + if ( + function != "flow_para_function" + and function != "flow_para" + and function != "flow_vibration" + and function != "flow_para_qang" + ): + txts = r"$\alpha$" + r"$ = %.3f$" % (alpha) + dt += 0.1 + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r"$baseline$" + r"$ = %.3f$" % (baseline) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + if ( + function == "flow_para_function" + or function == "flow_para" + or function == "flow_vibration" + or function == "flow_para_qang" + ): + txts = r"$flow_v$" + r"$ = %.3f$" % (flow) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + if function == "stretched_vibration" or function == "flow_vibration": + txts = r"$vibration$" + r"$ = %.1f Hz$" % (freq) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r"$\beta$" + r"$ = %.3f$" % (beta) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + try: + ax.set_ylim([ymin * vmin, ymax * vmax]) + except: + pass + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + if num_short == 1: + fp = path + filename + else: + fp = path + filename + "_%s_%s" % (mastp, s_ind) + + if append_name != "": + fp = fp + append_name + fps.append(fp + ".png") + # if num_long_i <= 16: + if num_long_i <= max_plotnum_fig: + fig.set_tight_layout(True) + # fig.tight_layout() + # print(fig) + try: + plt.savefig(fp + ".png", dpi=fig.dpi) + except: + print("Can not save figure here.") + + else: + fps = [] + for fn, f in enumerate(fig): + f.set_tight_layout(True) + fp = path + filename + "_q_%s_%s" % (fn * 16, (fn + 1) * 16) + if append_name != "": + fp = fp + append_name + fps.append(fp + ".png") + f.savefig(fp + ".png", dpi=f.dpi) + # plt.savefig( fp + '.png', dpi=fig.dpi) + # combine each saved images together + + if (num_short != 1) or (num_long_i > 16): + outputfile = path + filename + ".png" + if append_name != "": + outputfile = path + filename + append_name + "__joint.png" + else: + outputfile = path + filename + "__joint.png" + combine_images(fps, outputfile, outsize=outsize) + if return_fig: + return fig + + +def power_func(x, D0, power=2): + return D0 * x**power + + +def get_q_rate_fit_general(qval_dict, rate, geometry="saxs", weights=None, *argv, **kwargs): + """ + Dec 26,2016, Y.G.@CHX + + Fit q~rate by a power law function and fit curve pass (0,0) + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + Return: + D0 + qrate_fit_res + """ + + power_variable = False + + if "fit_range" in kwargs.keys(): + fit_range = kwargs["fit_range"] + else: + fit_range = None + + mod = Model(power_func) + # mod.set_param_hint( 'power', min=0.5, max= 10 ) + # mod.set_param_hint( 'D0', min=0 ) + pars = mod.make_params(power=2, D0=1 * 10 ^ (-5)) + if power_variable: + pars["power"].vary = True + else: + pars["power"].vary = False + + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + Nqr = num_long + Nqz = num_short + D0 = np.zeros(Nqz) + power = 2 # np.zeros( Nqz ) + qrate_fit_res = [] + # print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[i] + y = np.array(rate)[ind_long_i] + x = long_label[ind_long_i] + # print(y,x) + if fit_range != None: + y = y[fit_range[0] : fit_range[1]] + x = x[fit_range[0] : fit_range[1]] + # print (i, y,x) + _result = mod.fit(y, pars, x=x, weights=weights) + qrate_fit_res.append(_result) + D0[i] = _result.best_values["D0"] + # power[i] = _result.best_values['power'] + print("The fitted diffusion coefficient D0 is: %.3e A^2S-1" % D0[i]) + return D0, qrate_fit_res + + +def plot_q_rate_fit_general( + qval_dict, + rate, + qrate_fit_res, + geometry="saxs", + ylim=None, + plot_all_range=True, + plot_index_range=None, + show_text=True, + return_fig=False, + show_fit=True, + *argv, + **kwargs, +): + """ + Dec 26,2016, Y.G.@CHX + + plot q~rate fitted by a power law function and fit curve pass (0,0) + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + """ + + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + power = 2 + fig, ax = plt.subplots() + plt.title(r"$Q^%s$" % (power) + "-Rate-%s_Fit" % (uid), fontsize=20, y=1.06) + Nqz = num_short + if Nqz != 1: + ls = "--" + else: + ls = "" + for i in range(Nqz): + ind_long_i = ind_long[i] + y = np.array(rate)[ind_long_i] + x = long_label[ind_long_i] + D0 = qrate_fit_res[i].best_values["D0"] + # print(i, x, y, D0 ) + if Nqz != 1: + label = r"$q_z=%.5f$" % short_ulabel[i] + else: + label = "" + ax.plot(x**power, y, marker="o", ls=ls, label=label) + yfit = qrate_fit_res[i].best_fit + + if show_fit: + if plot_all_range: + ax.plot(x**power, x**power * D0, "-r") + else: + ax.plot((x**power)[: len(yfit)], yfit, "-r") + + if show_text: + txts = r"$D0: %.3e$" % D0 + r" $A^2$" + r"$s^{-1}$" + dy = 0.1 + ax.text(x=0.15, y=0.65 - dy * i, s=txts, fontsize=14, transform=ax.transAxes) + if Nqz != 1: + legend = ax.legend(loc="best") + + if plot_index_range != None: + d1, d2 = plot_index_range + d2 = min(len(x) - 1, d2) + ax.set_xlim((x**power)[d1], (x**power)[d2]) + ax.set_ylim(y[d1], y[d2]) + if ylim != None: + ax.set_ylim(ylim) + + ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$)") + ax.set_xlabel("$q^%s$" r"($\AA^{-2}$)" % power) + fp = path + "%s_Q_Rate" % (uid) + "_fit.png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + if return_fig: + return fig, ax + + +def save_g2_fit_para_tocsv(fit_res, filename, path): + """Y.G. Dec 29, 2016, + save g2 fitted parameter to csv file + """ + col = list(fit_res[0].best_values.keys()) + m, n = len(fit_res), len(col) + data = np.zeros([m, n]) + for i in range(m): + data[i] = list(fit_res[i].best_values.values()) + df = DataFrame(data) + df.columns = col + filename1 = os.path.join(path, filename) # + '.csv') + df.to_csv(filename1) + print("The g2 fitting parameters are saved in %s" % filename1) + return df + + +def R_2(ydata, fit_data): + """Calculates R squared for a particular fit - by L.W. + usage R_2(ydata,fit_data) + returns R2 + by L.W. Feb. 2019 + """ + y_ave = np.average(ydata) + SS_tot = np.sum((np.array(ydata) - y_ave) ** 2) + # print('SS_tot: %s'%SS_tot) + SS_res = np.sum((np.array(ydata) - np.array(fit_data)) ** 2) + # print('SS_res: %s'%SS_res) + return 1 - SS_res / SS_tot + + +def is_outlier(points, thresh=3.5, verbose=False): + """MAD test""" + points.tolist() + if len(points) == 1: + points = points[:, None] + if verbose: + print("input to is_outlier is a single point...") + median = np.median(points) * np.ones(np.shape(points)) # , axis=0) + + diff = (points - median) ** 2 + diff = np.sqrt(diff) + med_abs_deviation = np.median(diff) + modified_z_score = 0.6745 * diff / med_abs_deviation + return modified_z_score > thresh + + +def outlier_mask( + avg_img, mask, roi_mask, outlier_threshold=7.5, maximum_outlier_fraction=0.1, verbose=False, plot=False +): + """ + outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) + avg_img: average image data (2D) + mask: 2D array, same size as avg_img with pixels that are already masked + roi_mask: 2D array, same size as avg_img, ROI labels 'encoded' as mask values (i.e. all pixels belonging to ROI 5 have the value 5) + outlier_threshold: threshold for MAD test + maximum_outlier_fraction: maximum fraction of pixels in an ROI that can be classifed as outliers. If the detected fraction is higher, no outliers will be masked for that ROI. + verbose: 'True' enables message output + plot: 'True' enables visualization of outliers + returns: mask (dtype=float): 0 for pixels that have been classified as outliers, 1 else + dependency: is_outlier() + + function does outlier detection for each ROI separately based on pixel intensity in avg_img*mask and ROI specified by roi_mask, using the median-absolute-deviation (MAD) method + + by LW 06/21/2023 + """ + hhmask = np.ones(np.shape(roi_mask)) + pc = 1 + + for rn in np.arange(1, np.max(roi_mask) + 1, 1): + rm = np.zeros(np.shape(roi_mask)) + rm = rm - 1 + rm[np.where(roi_mask == rn)] = 1 + pixel = roi.roi_pixel_values(avg_img * rm, roi_mask, [rn]) + out_l = is_outlier((avg_img * mask * rm)[rm > -1], thresh=outlier_threshold) + if np.nanmax(out_l) > 0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l < 1]) + if verbose: + print("ROI #%s\naverage ROI intensity: %s" % (rn, ave_roi_int)) + try: + upper_outlier_threshold = np.nanmin((out_l * pixel[0][0])[out_l * pixel[0][0] > ave_roi_int]) + if verbose: + print("upper outlier threshold: %s" % upper_outlier_threshold) + except: + upper_outlier_threshold = False + if verbose: + print("no upper outlier threshold found") + ind1 = (out_l * pixel[0][0]) > 0 + ind2 = (out_l * pixel[0][0]) < ave_roi_int + try: + lower_outlier_threshold = np.nanmax((out_l * pixel[0][0])[ind1 * ind2]) + except: + lower_outlier_threshold = False + if verbose: + print("no lower outlier threshold found") + else: + if verbose: + print("ROI #%s: no outliers detected" % rn) + + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l) / len(pixel[0][0]) + if verbose: + print("fraction of pixel values detected as outliers: %s" % np.round(outlier_fraction, 2)) + if outlier_fraction > maximum_outlier_fraction: + if verbose: + print( + "fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed" + % maximum_outlier_fraction + ) + upper_outlier_threshold = False + lower_outlier_threshold = False + + if upper_outlier_threshold: + hhmask[avg_img * rm > upper_outlier_threshold] = 0 + if lower_outlier_threshold: + hhmask[avg_img * rm < lower_outlier_threshold] = 0 + + if plot: + if pc == 1: + fig, ax = plt.subplots(1, 5, figsize=(24, 4)) + plt.subplot(1, 5, pc) + pc += 1 + if pc > 5: + pc = 1 + pixel = roi.roi_pixel_values(avg_img * rm * mask, roi_mask, [rn]) + plt.plot(pixel[0][0], "bo", markersize=1.5) + if upper_outlier_threshold or lower_outlier_threshold: + x = np.arange(len(out_l)) + plt.plot( + [x[0], x[-1]], + [ave_roi_int, ave_roi_int], + "g--", + label="ROI average: %s" % np.round(ave_roi_int, 4), + ) + if upper_outlier_threshold: + ind = (out_l * pixel[0][0]) > upper_outlier_threshold + plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") + plt.plot( + [x[0], x[-1]], + [upper_outlier_threshold, upper_outlier_threshold], + "r--", + label="upper thresh.: %s" % np.round(upper_outlier_threshold, 4), + ) + if lower_outlier_threshold: + ind = (out_l * pixel[0][0]) < lower_outlier_threshold + plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") + plt.plot( + [x[0], x[-1]], + [lower_outlier_threshold, lower_outlier_threshold], + "r--", + label="lower thresh.: %s" % np.round(upper_outlier_threshold, 4), + ) + plt.ylabel("Intensity") + plt.xlabel("pixel") + plt.title("ROI #: %s" % rn) + plt.legend(loc="best", fontsize=8) + + if plot: + fig, ax = plt.subplots() + plt.imshow(hhmask) + hot_dark = np.nonzero(hhmask < 1) + cmap = plt.cm.get_cmap("viridis") + plt.plot(hot_dark[1], hot_dark[0], "+", color=cmap(0)) + plt.xlabel("pixel") + plt.ylabel("pixel") + plt.title("masked pixels with outlier threshold: %s" % outlier_threshold) + + return hhmask diff --git a/pyCHX/backups/pyCHX-backup/backups/chx_generic_functions_05012024.py b/pyCHX/backups/pyCHX-backup/backups/chx_generic_functions_05012024.py new file mode 100644 index 0000000..2b780c3 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/backups/chx_generic_functions_05012024.py @@ -0,0 +1,5809 @@ +from pyCHX.chx_libs import * +#from tqdm import * +from pyCHX.chx_libs import ( colors, markers ) +from scipy.special import erf + +from skimage.filters import prewitt +from skimage.draw import line_aa, line, polygon, ellipse, disk + +from modest_image import imshow +import matplotlib.cm as mcm +from matplotlib import cm +import copy, scipy +import PIL +from shutil import copyfile +import pytz +from datetime import datetime +from skbeam.core.utils import radial_grid, angle_grid, radius_to_twotheta, twotheta_to_q +from os import listdir +import numpy as np + + +markers = ['o', 'D', 'v', '^', '<', '>', 'p', 's', 'H', + 'h', '*', 'd', + '8', '1', '3', '2', '4', '+', 'x', '_', '|', ',', '1',] +markers = np.array( markers *100 ) + + + + +flatten_nestlist = lambda l: [item for sublist in l for item in sublist] +"""a function to flatten a nest list +e.g., flatten( [ ['sg','tt'],'ll' ] ) +gives ['sg', 'tt', 'l', 'l'] +""" + + +def get_frames_from_dscan( uid, detector = 'eiger4m_single_image' ): + '''Get frames from a dscan by giving uid and detector ''' + hdr = db[uid] + return db.get_images(hdr, detector ) + + +def get_roi_intensity( img, roi_mask): + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + avgs = np.zeros(noqs) + for i in tqdm( range(1,1+noqs)): + avgs[i-1] = ( np.average( img[roi_mask==i] ) ) + return avgs + + +def generate_h5_list(inDir, filename): + '''YG DEV at 9/19/2019@CHX generate a lst file containing all h5 fiels in inDir + Input: + inDir: the input direction + filename: the filename for output (have to lst as extension) + Output: + Save the all h5 filenames in a lst file + ''' + fp_list = listdir( inDir ) + if filename[-4:] !='.lst': + filename += '.lst' + for FP in fp_list: + FP_ = inDir+FP + if os.path.isdir(FP_): + fp = listdir( FP_ ) + for fp_ in fp: + if '.h5' in fp_: + append_txtfile( filename = filename, + data = np.array( [ FP_+'/'+fp_ ])) + print('The full path of all the .h5 in %s has been saved in %s.'%(inDir, filename)) + print( 'You can use ./analysis/run_gui to visualize all the h5 file.') + + +def fit_one_peak_curve( x,y, fit_range=None ): + '''YG Dev@Aug 10, 2019 fit a curve with a single Lorentzian shape + Parameters: + x: one-d array, x-axis data + y: one-d array, y-axis data + fit_range: [x1, x2], a list of index, to define the x-range for fit + Return: + center: float, center of the peak + center_std: float, error bar of center in the fitting + fwhm: float, full width at half max intensity of the peak, 2*sigma + fwhm_std:float, error bar of the full width at half max intensity of the peak + xf: the x in the fit + out: the fitting class resutled from lmfit + + ''' + from lmfit.models import LinearModel, LorentzianModel + peak = LorentzianModel() + background = LinearModel() + model = peak + background + if fit_range != None: + x1,x2=fit_range + xf= x[x1:x2] + yf = y[x1:x2] + else: + xf = x + yf = y + model.set_param_hint('slope', value=5 ) + model.set_param_hint('intercept', value=0 ) + model.set_param_hint('center', value=0.005 ) + model.set_param_hint('amplitude', value= 0.1 ) + model.set_param_hint('sigma', value=0.003 ) + #out=model.fit(yf, x=xf)#, method='nelder') + out=model.fit(yf, x=xf, method= 'leastsq' ) + cen = out.params['center'].value + cen_std = out.params['center'].stderr + wid = out.params['sigma'].value *2 + wid_std = out.params['sigma'].stderr *2 + return cen, cen_std, wid, wid_std , xf, out + + +def plot_xy_with_fit( x, y, xf, out, + cen, cen_std,wid, wid_std, + xlim=[1e-3,0.01],xlabel= 'q ('r'$\AA^{-1}$)', + ylabel='I(q)', filename=None): + '''YG Dev@Aug 10, 2019 to plot x,y with fit, + currently this code is dedicated to plot q-Iq with fit and show the fittign parameter, peak pos, peak wid ''' + + yf2=out.model.eval(params=out.params, x=xf) + fig, ax = plt.subplots( ) + plot1D(x=x,y=y,ax=ax,m='o', ls='',c='k', legend='data') + plot1D(x=xf,y=yf2,ax=ax,m='', ls='-',c='r', legend='fit',logy=True) + ax.set_xlim( xlim ) + #ax.set_ylim( 0.1, 4) + #ax.set_title(uid+'--t=%.2f'%tt) + ax.set_xlabel( xlabel ) + ax.set_ylabel(ylabel ) + txts = r'peak' + r' = %.5f +/- %.5f '%( cen, cen_std ) + ax.text(x =0.02, y=.2, s=txts, fontsize=14, transform=ax.transAxes) + txts = r'wid' + r' = %.4f +/- %.4f'%( wid, wid_std) + #txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x =0.02, y=.1, s=txts, fontsize=14, transform=ax.transAxes) + plt.tight_layout() + if filename != None: + plt.savefig( filename ) + return ax + + + + + +def get_touched_qwidth( qcenters ): + '''YG Dev@CHX April 2019, get touched qwidth by giving qcenters + ''' + qwX = np.zeros_like(qcenters) + qW= qcenters[1:] - qcenters[:-1] + qwX[0] = qW[0] + for i in range(1,len(qcenters)-1): + #print(i) + qwX[i] = min( qW[i-1], qW[i] ) + qwX[-1] = qW[-1] + qwX *=0.9999 + return qwX + + + +def append_txtfile( filename, data, fmt='%s', *argv,**kwargs ): + '''YG. Dev May 10, 2109 append data to a file + Create an empty file if the file dose not exist, otherwise, will append the data to it + Input: + fp: filename + data: the data to be append + fmt: the parameter defined in np.savetxt + + ''' + from numpy import savetxt + exists = os.path.isfile( filename) + if not exists: + np.savetxt( filename, [ ] , fmt='%s', ) + print('create new file') + + f=open( filename, 'a') + savetxt( f, data, fmt = fmt , *argv,**kwargs ) + f.close() + +def get_roi_mask_qval_qwid_by_shift( new_cen, new_mask, old_cen,old_roi_mask, + setup_pargs, geometry, + limit_qnum= None): + '''YG Dev April 22, 2019 Get roi_mask, qval_dict, qwid_dict by shift the pre-defined big roi_mask''' + center=setup_pargs['center'] + roi_mask1 = shift_mask( new_cen=center, new_mask=new_mask, old_cen=old_cen, + old_roi_mask=old_roi_mask, limit_qnum= limit_qnum) + qval_dict_, qwid_dict_ = get_masked_qval_qwid_dict_using_Rmax( + new_mask=new_mask, setup_pargs=setup_pargs, + old_roi_mask=old_roi_mask, old_cen=old_cen, geometry = geometry ) + w,w1 = get_zero_nozero_qind_from_roi_mask(roi_mask1,new_mask) + #print(w,w1) + qval_dictx = { k:v for (k,v) in list(qval_dict_.items()) if k in w1 } + qwid_dictx = { k:v for (k,v) in list(qwid_dict_.items()) if k in w1 } + qval_dict={} + qwid_dict={} + for i, k in enumerate( list(qval_dictx.keys())): + qval_dict[i] = qval_dictx[k] + qwid_dict[i] = qwid_dictx[k] + return roi_mask1, qval_dict, qwid_dict + + +def get_zero_nozero_qind_from_roi_mask(roi_mask,mask): + '''YG Dev April 22, 2019 Get unique qind of roi_mask with zero and non-zero pixel number''' + qind, pixelist = roi.extract_label_indices(roi_mask*mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + w=np.where(nopr==0)[0] + w1=np.where(nopr!=0)[0] + return w, w1 + + + +def get_masked_qval_qwid_dict_using_Rmax( new_mask, setup_pargs, old_roi_mask, old_cen, geometry ): + '''YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask using a Rmax method ''' + cy,cx= setup_pargs['center'] + my,mx=new_mask.shape + Rmax = int(np.ceil(max( np.hypot(cx,cy),np.hypot(cx-mx,cy-my),np.hypot(cx,cy-my),np.hypot(cx-mx,cy) ))) + Fmask = np.zeros([Rmax*2,Rmax*2],dtype=int) + Fmask[ Rmax-cy : Rmax-cy+my, Rmax-cx: Rmax-cx + mx]=new_mask + roi_mask1 = shift_mask( new_cen=[Rmax,Rmax], new_mask=np.ones_like(Fmask), old_cen=old_cen, + old_roi_mask=old_roi_mask, limit_qnum= None) + setup_pargs_={ 'center':[Rmax,Rmax], 'dpix': setup_pargs['dpix'], 'Ldet': setup_pargs['Ldet'], + 'lambda_': setup_pargs['lambda_'], } + qval_dict1, qwid_dict1 = get_masked_qval_qwid_dict( roi_mask1, Fmask, setup_pargs_, geometry ) + #w = get_zero_qind_from_roi_mask(roi_mask1,Fmask) + return qval_dict1, qwid_dict1#,w + + + +def get_masked_qval_qwid_dict( roi_mask, mask, setup_pargs, geometry ): + '''YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask ''' + + qval_dict_, qwid_dict_ = get_qval_qwid_dict( roi_mask, setup_pargs, geometry= geometry) + w,w1 = get_zero_nozero_qind_from_roi_mask(roi_mask,mask) + qval_dictx = { k:v for (k,v) in list(qval_dict_.items()) if k not in w } + qwid_dictx = { k:v for (k,v) in list(qwid_dict_.items()) if k not in w } + qval_dict={} + qwid_dict={} + for i, k in enumerate( list(qval_dictx.keys())): + qval_dict[i] = qval_dictx[k] + qwid_dict[i] = qwid_dictx[k] + return qval_dict, qwid_dict + + +def get_qval_qwid_dict( roi_mask, setup_pargs, geometry='saxs'): + '''YG Dev April 6, 2019 + Get qval_dict and qwid_dict by giving roi_mask, setup_pargs + Input: + roi_mask: integer type 2D array + setup_pargs: dict, should at least contains, center (direct beam center), dpix (in mm), + lamda_: in A-1, Ldet: in mm + e.g., + {'Ldet': 1495.0, abs #essential + 'center': [-4469, 363], #essential + 'dpix': 0.075000003562308848, #essential + 'exposuretime': 0.99999702, + 'lambda_': 0.9686265, #essential + 'path': '/XF11ID/analysis/2018_1/jianheng/Results/b85dad/', + 'timeperframe': 1.0, + 'uid': 'uid=b85dad'} + geometry: support saxs for isotropic transmission SAXS + ang_saxs for anisotropic transmission SAXS + flow_saxs for anisotropic transmission SAXS under flow (center symetric) + + Return: + qval_dict: dict, key as q-number, val: q val + qwid_dict: dict, key as q-number, val: q width (qmax - qmin) + + TODOLIST: to make GiSAXS work + + ''' + + origin = setup_pargs['center']#[::-1] + shape = roi_mask.shape + qp_map = radial_grid(origin, shape) + phi_map = np.degrees( angle_grid(origin, shape) ) + two_theta = radius_to_twotheta( setup_pargs['Ldet'], setup_pargs['dpix'] * qp_map ) + q_map = utils.twotheta_to_q(two_theta, setup_pargs['lambda_']) + qind, pixelist = roi.extract_label_indices(roi_mask) + Qval = np.unique(qind) + qval_dict_ = {} + qwid_dict_ = {} + for j, i in enumerate( Qval): + qval = q_map[ roi_mask == i ] + #print( qval ) + if geometry=='saxs': + qval_dict_[j] = [( qval.max() + qval.min() )/2] # np.mean(qval) + qwid_dict_[j] = [( qval.max() - qval.min() ) ] + + elif geometry=='ang_saxs': + aval = phi_map[ roi_mask == i ] + #print(j,i,qval, aval) + qval_dict_[j] = np.zeros(2) + qwid_dict_[j] = np.zeros(2) + + qval_dict_[j][0] = ( qval.max() + qval.min() )/2 # np.mean(qval) + qwid_dict_[j][0] = ( qval.max() - qval.min() ) + + if ( (aval.max() * aval.min())<0 ) & ( aval.max() > 90 ): + qval_dict_[j][1] = ( aval.max() + aval.min() )/2 -180 # np.mean(qval) + qwid_dict_[j][1] = abs( aval.max() - aval.min() -360 ) + #print('here -- %s'%j) + else: + qval_dict_[j][1] = ( aval.max() + aval.min() )/2 # np.mean(qval) + qwid_dict_[j][1] = abs( aval.max() - aval.min() ) + + elif geometry=='flow_saxs': + sx,sy = roi_mask.shape + cx,cy = origin + aval = (phi_map[cx:])[ roi_mask[cx:] == i ] + if len(aval)==0: + aval = (phi_map[:cx])[ roi_mask[:cx] == i ] + 180 + + qval_dict_[j] = np.zeros(2) + qwid_dict_[j] = np.zeros(2) + qval_dict_[j][0] = ( qval.max() + qval.min() )/2 # np.mean(qval) + qwid_dict_[j][0] = ( qval.max() - qval.min() ) + #print(aval) + if ( (aval.max() * aval.min())<0 ) & ( aval.max() > 90 ): + qval_dict_[j][1] = ( aval.max() + aval.min() )/2 -180 # np.mean(qval) + qwid_dict_[j][1] = abs( aval.max() - aval.min() -360 ) + #print('here -- %s'%j) + else: + qval_dict_[j][1] = ( aval.max() + aval.min() )/2 # np.mean(qval) + qwid_dict_[j][1] = abs( aval.max() - aval.min() ) + + return qval_dict_, qwid_dict_ + + + +def get_SG_norm( FD, pixelist, bins=1, mask=None, window_size= 11, order= 5 ): + '''Get normalization of a time series by SavitzkyGolay filter + Input: + FD: file handler for a compressed data + pixelist: pixel list for a roi_mask + bins: the bin number for the time series, if number = total number of the time frame, + it means SG of the time averaged image + mask: the additional mask + window_size, order, for the control of SG filter, see chx_generic_functions.py/sgolay2d for details + Return: + norm: shape as ( length of FD, length of pixelist ) + ''' + if mask == None: + mask = 1 + beg = FD.beg + end = FD.end + N = end-beg + BEG = beg + if bins==1: + END = end + NB = N + MOD=0 + else: + END = N//bins + MOD = N%bins + NB = END + norm = np.zeros( [ end, len(pixelist) ] ) + for i in tqdm( range( NB ) ): + if bins == 1: + img = FD.rdframe(i + BEG) + else: + for j in range( bins): + ct = i * bins + j + BEG + #print(ct) + if j==0: + img = FD.rdframe( ct ) + n = 1.0 + else: + (p,v) = FD.rdrawframe(ct) + np.ravel( img )[p] += v + #img += FD.rdframe( ct ) + n += 1 + img /= n + avg_imgf = sgolay2d( img, window_size= window_size, order= order) * mask + normi = np.ravel(avg_imgf)[pixelist] + if bins==1: + norm[i+beg] = normi + else: + norm[ i*bins+beg: (i+1)*bins+beg ] = normi + if MOD: + for j in range(MOD): + ct = (1+i) * bins + j + BEG + if j==0: + img = FD.rdframe( ct ) + n = 1.0 + else: + (p,v) = FD.rdrawframe(ct) + np.ravel( img )[p] += v + n += 1 + img /= n + #print(ct,n) + img = FD.rdframe( ct ) + avg_imgf = sgolay2d( img, window_size= window_size, order= order) * mask + normi = np.ravel(avg_imgf)[pixelist] + norm[ (i+1)*bins + beg: (i+2)*bins + beg ] = normi + return norm + +def shift_mask( new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None ): + '''Y.G. Dev April 2019@CHX to make a new roi_mask by shift and crop the old roi_mask, which is much bigger than the new mask + Input: + new_cen: [x,y] in uint of pixel + new_mask: provide the shape of the new roi_mask and also multiply this mask to the shifted mask + old_cen: [x,y] in uint of pixel + old_roi_mask: the roi_mask to be shifted + limit_qnum: integer, if not None, defines the max number of unique values of nroi_mask + + Output: + the shifted/croped roi_mask + ''' + nsx,nsy = new_mask.shape + down, up, left, right = new_cen[0], nsx - new_cen[0], new_cen[1], nsy - new_cen[1] + x1,x2,y1,y2 = [ old_cen[0] - down, old_cen[0] + up , old_cen[1] - left, old_cen[1] + right ] + nroi_mask_ = old_roi_mask[ x1:x2, y1:y2 ] * new_mask + nroi_mask = np.zeros_like( nroi_mask_ ) + qind, pixelist = roi.extract_label_indices(nroi_mask_) + qu = np.unique(qind) + #noqs = len( qu ) + #nopr = np.bincount(qind, minlength=(noqs+1))[1:] + #qm = nopr>0 + for j, qv in enumerate(qu): + nroi_mask[nroi_mask_ == qv] = j +1 + if limit_qnum != None: + nroi_mask[ nroi_mask > limit_qnum ]=0 + return nroi_mask + + +def plot_q_g2fitpara_general( g2_dict, g2_fitpara, geometry ='saxs', ylim = None, + plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False, + show_fit=True, ylabel='g2', qth_interest = None, max_plotnum_fig=1600,qphi_analysis=False, + *argv,**kwargs): + ''' + Mar 29,2019, Y.G.@CHX + + plot q~fit parameters + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + ''' + + if 'uid' in kwargs.keys(): + uid_ = kwargs['uid'] + else: + uid_ = 'uid' + if 'path' in kwargs.keys(): + path = kwargs['path'] + else: + path = '' + data_dir = path + if ylabel=='g2': + ylabel='g_2' + if ylabel=='g4': + ylabel='g_4' + + if geometry =='saxs': + if qphi_analysis: + geometry = 'ang_saxs' + + + qval_dict_, fit_res_ = g2_dict, g2_fitpara + + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + fps = [] + + #print(qr_label, qz_label, short_ulabel, long_ulabel) + #$print( num_short, num_long ) + beta, relaxation_rate, baseline, alpha = ( g2_fitpara['beta'], + g2_fitpara['relaxation_rate'], + g2_fitpara['baseline'], + g2_fitpara['alpha'] ) + + fps=[] + for s_ind in range( num_short ): + ind_long_i = ind_long[ s_ind ] + num_long_i = len( ind_long_i ) + betai, relaxation_ratei, baselinei, alphai = (beta[ind_long_i], relaxation_rate[ind_long_i], + baseline[ind_long_i], alpha[ind_long_i] ) + qi = long_ulabel + #print(s_ind, qi, np.array( betai) ) + + if RUN_GUI: + fig = Figure(figsize=(10, 12)) + else: + #fig = plt.figure( ) + if num_long_i <=4: + if master_plot != 'qz': + fig = plt.figure(figsize=(8, 6)) + else: + if num_short>1: + fig = plt.figure(figsize=(8, 4)) + else: + fig = plt.figure(figsize=(10, 6)) + #print('Here') + elif num_long_i > max_plotnum_fig: + num_fig = int(np.ceil(num_long_i/max_plotnum_fig)) #num_long_i //16 + fig = [ plt.figure(figsize=figsize) for i in range(num_fig) ] + #print( figsize ) + else: + #print('Here') + if master_plot != 'qz': + fig = plt.figure(figsize=figsize) + else: + fig = plt.figure(figsize=(10, 10)) + + if master_plot == 'qz': + if geometry=='ang_saxs': + title_short = 'Angle= %.2f'%( short_ulabel[s_ind] ) + r'$^\circ$' + elif geometry=='gi_saxs': + title_short = r'$Q_z= $' + '%.4f'%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' + else: + title_short = '' + else: #qr + if geometry=='ang_saxs' or geometry=='gi_saxs': + title_short = r'$Q_r= $' + '%.5f '%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' + else: + title_short='' + #print(geometry) + #filename ='' + til = '%s:--->%s'%(uid_, title_short ) + if num_long_i <=4: + plt.title( til,fontsize= 14, y =1.15) + else: + plt.title( til,fontsize=20, y =1.06) + #print( num_long ) + if num_long!=1: + #print( 'here') + plt.axis('off') + #sy = min(num_long_i,4) + sy = min(num_long_i, int( np.ceil( min(max_plotnum_fig,num_long_i)/4)) ) + + else: + sy =1 + sx = min(4, int( np.ceil( min(max_plotnum_fig,num_long_i)/float(sy) ) )) + temp = sy + sy = sx + sx = temp + if sx==1: + if sy==1: + plt.axis('on') + ax1 = fig.add_subplot( 4,1,1 ) + ax2 = fig.add_subplot( 4,1,2 ) + ax3 = fig.add_subplot( 4,1,3 ) + ax4 = fig.add_subplot( 4,1,4 ) + plot1D(x=qi, y=betai, m='o', ls='--', c='k', ax=ax1, legend=r'$\beta$', title='') + plot1D(x=qi, y=alphai, m='o', ls='--',c='r', ax=ax2, legend=r'$\alpha$', title='') + plot1D(x=qi, y=baselinei, m='o', ls='--', c='g', ax=ax3, legend=r'$baseline$', title='') + plot1D(x=qi, y=relaxation_ratei, m='o', c='b', ls='--', ax=ax4, legend= r'$\gamma$ $(s^{-1})$' , title='') + + ax4.set_ylabel( r'$\gamma$ $(s^{-1})$' ) + ax4.set_xlabel(r"$q $ $(\AA)$", fontsize=16) + ax3.set_ylabel( r'$baseline' ) + ax2.set_ylabel( r'$\alpha$' ) + ax1.set_ylabel( r'$\beta$' ) + fig.tight_layout() + fp = data_dir + uid_ + 'g2_q_fit_para_%s.png'%short_ulabel[s_ind] + fig.savefig( fp , dpi=fig.dpi) + fps.append(fp) + outputfile = data_dir + '%s_g2_q_fitpara_plot'%uid_ + '.png' + #print(uid) + combine_images( fps, outputfile, outsize= [ 2000,2400 ] ) + + + + + +def plot_q_rate_general( qval_dict, rate, geometry ='saxs', ylim = None, logq=True, lograte=True, + plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False, + show_fit=True, + *argv,**kwargs): + ''' + Mar 29,2019, Y.G.@CHX + + plot q~rate in log-log scale + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + ''' + + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + else: + uid = 'uid' + if 'path' in kwargs.keys(): + path = kwargs['path'] + else: + path = '' + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + fig,ax = plt.subplots() + plt.title(r'$Q$''-Rate-%s'%(uid),fontsize=20, y =1.06) + Nqz = num_short + if Nqz!=1: + ls = '--' + else: + ls='' + #print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[ i ] + y = np.array( rate )[ind_long_i] + x = long_label[ind_long_i] + #print(i, x, y, D0 ) + if Nqz!=1: + label=r'$q_z=%.5f$'%short_ulabel[i] + else: + label='' + ax.loglog(x, y, marker = 'o', ls =ls, label=label) + if Nqz!=1:legend = ax.legend(loc='best') + + if plot_index_range != None: + d1,d2 = plot_index_range + d2 = min( len(x)-1, d2 ) + ax.set_xlim( (x**power)[d1], (x**power)[d2] ) + ax.set_ylim( y[d1],y[d2]) + + if ylim != None: + ax.set_ylim( ylim ) + + ax.set_ylabel('Relaxation rate 'r'$\gamma$'"($s^{-1}$) (log)") + ax.set_xlabel("$q$"r'($\AA$) (log)') + fp = path + '%s_Q_Rate_loglog'%(uid) + '.png' + fig.savefig( fp, dpi=fig.dpi) + fig.tight_layout() + if return_fig: + return fig,ax + + + +def plot_xy_x2( x, y, x2=None, pargs=None, loglog=False, logy=True, fig_ax=None, + xlabel= 'q ('r'$\AA^{-1}$)', xlabel2='q (pixel)', title= '_q_Iq', + ylabel = 'I(q)',save=True, *argv,**kwargs): + '''YG.@CHX 2019/10/ Plot x, y, x2, if have, will plot as twiny( same y, different x) + This funciton is primary for plot q-Iq + + Input: + x: one-d array, x in one unit + y: one-d array, + x2:one-d array, x in anoter unit + pargs: dict, could include 'uid', 'path' + loglog: if True, if plot x and y in log, by default plot in y-log + save: if True, save the plot in the path defined in pargs + kwargs: could include xlim (in unit of index), ylim (in unit of real value) + + ''' + if fig_ax == None: + fig, ax1 = plt.subplots() + else: + fig,ax1=fig_ax + if pargs != None: + uid = pargs['uid'] + path = pargs['path'] + else: + uid='XXX' + path='' + if loglog: + ax1.loglog( x,y, '-o') + elif logy: + ax1.semilogy( x,y, '-o') + else: + ax1.plot( x,y, '-o') + ax1.set_xlabel( xlabel ) + ax1.set_ylabel( ylabel ) + title = ax1.set_title( '%s--'%uid + title) + Nx= len(x) + if 'xlim' in kwargs.keys(): + xlim = kwargs['xlim'] + if xlim[1]>Nx: + xlim[1]=Nx-1 + else: + xlim=[ 0, Nx] + if 'ylim' in kwargs.keys(): + ylim = kwargs['ylim'] + else: + ylim=[y.min(), y.max()] + lx1,lx2=xlim + ax1.set_xlim( [ x[lx1], x[lx2] ] ) + ax1.set_ylim( ylim ) + if x2 != None: + ax2 = ax1.twiny() + ax2.set_xlabel( xlabel2 ) + ax2.set_ylabel( ylabel ) + ax2.set_xlim( [ x2[lx1], x2[lx2] ] ) + title.set_y(1.1) + fig.subplots_adjust(top=0.85) + if save: + path = pargs['path'] + fp = path + '%s_q_Iq'%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + + + + +def save_oavs_tifs( uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1,threshold = 0 ): + '''save oavs as png''' + tifs = list( db[uid].data( 'OAV_image') )[0] + try: + pixel_scalebar=np.ceil(scalebar_size/md['OAV resolution um_pixel']) + except: + pixel_scalebar=None + print('No OAVS resolution is available.') + + text_string='%s $\mu$m'%scalebar_size + h = db[uid] + oavs=tifs + + # 12/03/2023: have a problem with OAV not being detector [0]...just try and go throught the list + detectors = sorted(get_detectors(h)) + for d in range(len(detectors)): + try: + oav_period=h['descriptors'][d]['configuration']['OAV']['data']['OAV_cam_acquire_period'] + oav_expt=h['descriptors'][d]['configuration']['OAV']['data']['OAV_cam_acquire_time'] + except: + pass + oav_times=[] + for i in range(len(oavs)): + oav_times.append(oav_expt+i*oav_period) + fig=plt.subplots(int(np.ceil(len(oavs)/3)),3,figsize=(3*5.08,int(np.ceil(len(oavs)/3))*4)) + for m in range(len(oavs)): + plt.subplot(int(np.ceil(len(oavs)/3)),3,m+1) + #plt.subplots(figsize=(5.2,4)) + img = oavs[m] + try: + ind = np.flipud(img*scale)[:,:,2] < threshold + except: + ind = np.flipud(img*scale) < threshold + rgb_cont_img=np.copy(np.flipud(img)) + #rgb_cont_img[ind,0]=1000 + if brightness_scale !=1: + rgb_cont_img=scale_rgb(rgb_cont_img,scale=brightness_scale) + + plt.imshow(rgb_cont_img,interpolation='none',resample=True, cmap = 'gray') + plt.axis('equal') + cross=[685,440,50] # definintion of direct beam: x, y, size + plt.plot([cross[0]-cross[2]/2,cross[0]+cross[2]/2],[cross[1],cross[1]],'r-') + plt.plot([cross[0],cross[0]],[cross[1]-cross[2]/2,cross[1]+cross[2]/2],'r-') + if pixel_scalebar != None: + plt.plot([1100,1100+pixel_scalebar],[150,150],'r-',Linewidth=5) # scale bar. + plt.text(1000,50,text_string,fontsize=14,color='r') + plt.text(600,50,str(oav_times[m])[:5]+' [s]',fontsize=14,color='r') + plt.axis('off') + plt.savefig( data_dir + 'uid=%s_OVA_images.png'%uid) + + + + + +def shift_mask_old( mask, shiftx, shifty): + '''YG Dev Feb 4@CHX create new mask by shift mask in x and y direction with unit in pixel + Input: + mask: int-type array, + shiftx: int scalar, shift value in x direction with unit in pixel + shifty: int scalar, shift value in y direction with unit in pixel + Output: + maskn: int-type array, shifted mask + + ''' + qind, pixelist = roi.extract_label_indices( mask ) + dims = mask.shape + imgwidthy = dims[1] #dimension in y, but in plot being x + imgwidthx = dims[0] #dimension in x, but in plot being y + pixely = pixelist%imgwidthy + pixelx = pixelist//imgwidthy + pixelyn = pixely + shiftx + pixelxn = pixelx + shifty + w = (pixelyn < imgwidthy ) & (pixelyn >= 0 ) & (pixelxn < imgwidthx ) & (pixelxn >= 0 ) + pixelist_new = pixelxn[w] * imgwidthy + pixelyn[w] + maskn = np.zeros_like( mask ) + maskn.ravel()[pixelist_new] = qind[w] + return maskn + + +def get_current_time(): + '''get current time in a fomart of year/month/date/hour(24)/min/sec/, + e.g. 2009-01-05 22:14:39 + ''' + loc_dt = datetime.now(pytz.timezone('US/Eastern')) + fmt = "%Y-%m-%d %H:%M:%S" + return loc_dt.strftime(fmt) + + + +def evalue_array( array, verbose = True ): + '''Y.G., Dev Nov 1, 2018 Get min, max, avg, std of an array ''' + _min, _max, avg, std = np.min( array), np.max( array), np.average( array ), np.std( array ) + if verbose: + print( 'The min, max, avg, std of this array are: %s %s %s %s, respectively.'%(_min, _max, avg, std ) ) + return _min, _max, avg, std + + + +def find_good_xpcs_uids( fuids, Nlim=100, det = [ '4m', '1m', '500'] ): + '''Y.G., Dev Nov 1, 2018 Find the good xpcs series + Input: + fuids: list, a list of full uids + Nlim: integer, the smallest number of images to be considered as XCPS sereis + det: list, a list of detector (can be short string of the full name of the detector) + Return: + the xpcs uids list + + ''' + guids = [] + for i, uid in enumerate(fuids): + if db[uid]['start']['plan_name'] == 'count' or db[uid]['start']['plan_name'] == 'manual_count': + head = db[uid]['start'] + for dec in head['detectors']: + for dt in det: + if dt in dec: + if 'number of images' in head: + if float(head['number of images'] ) >= Nlim: + #print(i, uid) + guids.append(uid) + G = np.unique( guids ) + print('Found %s uids for XPCS series.'%len(G) ) + return G + + +def create_fullImg_with_box( shape, box_nx = 9 , box_ny = 8, ): + '''Y.G. 2018/10/26 Divide image with multi touched boxes + Input + shape: the shape of image + box_nx: the number of box in x + box_ny: the number width of box in y + Return: + roi_mask, (* mask ) + ''' + + #shape = mask.shape + Wrow, Wcol = int( np.ceil( shape[0]/box_nx )), int(np.ceil(shape[1]/box_ny) ) + #print(Wrow, Wcol) + roi_mask = np.zeros( shape, dtype=np.int32 ) + for i in range( box_nx ): + for j in range(box_ny): + roi_mask[ i*Wrow: (i+1)*Wrow , j*Wcol: (j+1)*Wcol ] = i * box_ny + j + 1 + #roi_mask *= mask + return roi_mask + + + +def get_refl_y0( inc_ang, inc_y0, Ldet, pixel_size, ): + ''' Get reflection beam center y + Input: + inc_ang: incident angle in degree + inc_y0: incident beam y center in pixel + Ldet: sample to detector distance in meter + pixel_size: pixel size in meter + Return: reflection beam center y in pixel + ''' + return Ldet * np.tan( np.radians(inc_ang)) * 2 / pixel_size + inc_y0 + + +def lin2log_g2(lin_tau,lin_g2,num_points=False): + """ + Lutz developed at Aug,2018 + function to resample g2 with linear time steps into logarithmics + g2 values between consecutive logarthmic time steps are averaged to increase statistics + calling sequence: lin2log_g2(lin_tau,lin_g2,num_points=False) + num_points=False -> determine number of logortihmically sampled time points automatically (8 pts./decade) + num_points=18 -> use 18 logarithmically spaced time points + """ + #prep taus and g2s: remove nan and first data point at tau=0 + rem = lin_tau==0 + #print('lin_tau: '+str(lin_tau.size)) + #print('lin_g2: '+str(lin_g2.size)) + lin_tau[rem]=np.nan + #lin_tau[0]=np.nan;#lin_g2[0]=np.nan + lin_g2 = lin_g2[np.isfinite(lin_tau)] + lin_tau = lin_tau[np.isfinite(lin_tau)] + #print('from lin-to-log-g2_sampling: ',lin_tau) + if num_points == False: + # automatically decide how many log-points (8/decade) + dec=int(np.ceil((np.log10(lin_tau.max())-np.log10(lin_tau.min()))*8)) + else: + dec=int(num_points) + log_tau=np.logspace(np.log10(lin_tau[0]),np.log10(lin_tau.max()),dec) + # re-sample correlation function: + log_g2=[] + for i in range(log_tau.size-1): + y=[i,log_tau[i]-(log_tau[i+1]-log_tau[i])/2,log_tau[i]+(log_tau[i+1]-log_tau[i])/2] + #x=lin_tau[lin_tau>y[1]] + x1=lin_tau>y[1]; x2=lin_tauy[1]; x2=lin_tau peak has to be taller factor 2 above background) + replot: if True, will plot data (if error func) with the fit and peak/cen/com position + logplot: if on, will plot in log scale + x: if not None, give x-data + + + ''' + if x == None: + x = np.arange( len(y) ) + x=np.array(x) + y=np.array(y) + + PEAK=x[np.argmax(y)] + PEAK_y=np.max(y) + COM=np.sum(x * y) / np.sum(y) + ### from Maksim: assume this is a peak profile: + def is_positive(num): + return True if num > 0 else False + # Normalize values first: + ym = (y - np.min(y)) / (np.max(y) - np.min(y)) - shift # roots are at Y=0 + positive = is_positive(ym[0]) + list_of_roots = [] + for i in range(len(y)): + current_positive = is_positive(ym[i]) + if current_positive != positive: + list_of_roots.append(x[i - 1] + (x[i] - x[i - 1]) / (abs(ym[i]) + abs(ym[i - 1])) * abs(ym[i - 1])) + positive = not positive + if len(list_of_roots) >= 2: + FWHM=abs(list_of_roots[-1] - list_of_roots[0]) + CEN=list_of_roots[0]+0.5*(list_of_roots[1]-list_of_roots[0]) + ps.fwhm=FWHM + ps.cen=CEN + yf=ym + #return { + # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), + # 'x_range': list_of_roots, + #} + else: # ok, maybe it's a step function.. + #print('no peak...trying step function...') + ym = ym + shift + def err_func(x, x0, k=2, A=1, base=0 ): #### erf fit from Yugang + return base - A * erf(k*(x-x0)) + mod = Model( err_func ) + ### estimate starting values: + x0=np.mean(x) + #k=0.1*(np.max(x)-np.min(x)) + pars = mod.make_params( x0=x0, k=2, A = 1., base = 0. ) + result = mod.fit(ym, pars, x = x ) + CEN=result.best_values['x0'] + FWHM = result.best_values['k'] + A = result.best_values['A'] + b = result.best_values['base'] + yf_ = err_func(x, CEN, k=FWHM, A=A, base=b ) #result.best_fit + yf = (yf_ ) * (np.max(y) - np.min(y)) + np.min(y) + + #(y - np.min(y)) / (np.max(y) - np.min(y)) - shift + + + ps.cen = CEN + ps.fwhm = FWHM + + if replot: + ### re-plot results: + if logplot=='on': + fig, ax = plt.subplots() #plt.figure() + ax.semilogy([PEAK,PEAK],[np.min(y),np.max(y)],'k--',label='PEAK') + ax.hold(True) + ax.semilogy([CEN,CEN],[np.min(y),np.max(y)],'r-.',label='CEN') + ax.semilogy([COM,COM],[np.min(y),np.max(y)],'g.-.',label='COM') + ax.semilogy(x,y,'bo-') + #plt.xlabel(field);plt.ylabel(intensity_field) + ax.legend() + #plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + #plt.show() + else: + #plt.close(999) + fig, ax = plt.subplots() #plt.figure() + ax.plot([PEAK,PEAK],[np.min(y),np.max(y)],'k--',label='PEAK') + + #ax.hold(True) + ax.plot([CEN,CEN],[np.min(y),np.max(y)],'m-.',label='CEN') + ax.plot([COM,COM],[np.min(y),np.max(y)],'g.-.',label='COM') + ax.plot(x,y,'bo--') + ax.plot(x,yf,'r-', label='Fit') + + #plt.xlabel(field);plt.ylabel(intensity_field) + ax.legend() + #plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + #plt.show() + + ### assign values of interest as function attributes: + ps.peak=PEAK + ps.com=COM + return ps.cen + + + + + + + + + +def create_seg_ring( ring_edges, ang_edges, mask, setup_pargs ): + '''YG Dev April 6, 2018 + Create segment ring mask + Input: + ring_edges: edges of rings (in pixel), e.g., [ [320,340], [450, 460], ] + ang_edges: edges of angles, e.g., [ [20,40], [50, 60], ] + mask: bool type 2D array + set_pargs: dict, should at least contains, center + e.g., + {'Ldet': 1495.0, abs #essential + 'center': [-4469, 363], #essential + 'dpix': 0.075000003562308848, #essential + 'exposuretime': 0.99999702, + 'lambda_': 0.9686265, #essential + 'path': '/XF11ID/analysis/2018_1/jianheng/Results/b85dad/', + 'timeperframe': 1.0, + 'uid': 'uid=b85dad'} + Return: + roi_mask: segmented ring mask: two-D array + qval_dict: dict, key as q-number, val: q val + + ''' + + roi_mask_qr, qr, qr_edge = get_ring_mask(mask, inner_radius= None, outer_radius = None, + width = None, num_rings = None, edges= np.array( ring_edges), unit='pixel', + pargs= setup_pargs) + + roi_mask_ang, ang_center, ang_edge = get_angular_mask( mask, inner_angle= None, + outer_angle = None, width = None, edges = np.array( ang_edges ), + num_angles = None, center = center, flow_geometry= False ) + + + roi_mask, good_ind = combine_two_roi_mask( roi_mask_qr, roi_mask_ang,pixel_num_thres=100) + qval_dict_ = get_qval_dict( qr_center = qr, qz_center = ang_center,one_qz_multi_qr=False) + qval_dict = { i:qval_dict_[k] for (i,k) in enumerate( good_ind) } + return roi_mask, qval_dict + + + + +def find_bad_pixels_FD( bad_frame_list, FD, img_shape = [514, 1030], + threshold= 15, show_progress=True): + '''Designed to find bad pixel list in 500K + threshold: the max intensity in 5K + ''' + bad = np.zeros( img_shape, dtype=bool ) + if show_progress: + for i in tqdm(bad_frame_list[ bad_frame_list>=FD.beg]): + p,v = FD.rdrawframe(i) + w = np.where( v > threshold)[0] + bad.ravel()[ p[w] ] = 1 + # x,y = np.where( imgsa[i] > threshold) + # bad[x[0],y[0]] = 1 + else: + for i in bad_frame_list[ bad_frame_list>=FD.beg]: + p,v = FD.rdrawframe(i) + w = np.where( v > threshold)[0] + bad.ravel()[ p[w] ] = 1 + + return ~bad + + +def get_q_iq_using_dynamic_mask( FD, mask, setup_pargs, bin_number=1, threshold=15 ): + '''DEV by Yugang@CHX, June 6, 2019 + Get circular average of a time series using a dynamics mask, which pixel values are defined as + zeors if above a threshold. + Return an averaged q(pix)-Iq-q(A-1) of the whole time series using bin frames with bin_number + Input: + FD: the multifile handler for the time series + mask: a two-d bool type array + setup_pargs: dict, parameters of setup for calculate q-Iq + should have keys as + 'dpix', 'Ldet','lambda_', 'center' + bin_number: bin number of the frame + threshold: define the dynamics mask, which pixel values are defined as + zeors if above this threshold + Output: + qp_saxs: q in pixel + iq_saxs: intenstity + q_saxs: q in A-1 + ''' + beg = FD.beg + end = FD.end + shape = FD.rdframe(beg).shape + Nimg_ = FD.end-FD.beg + #Nimg_ = 100 + Nimg = Nimg_//bin_number + time_edge = np.array(create_time_slice( N= Nimg_, + slice_num= Nimg, slice_width= bin_number )) + beg + for n in tqdm( range(Nimg) ): + t1,t2 = time_edge[n] + #print(t1,t2) + if bin_number==1: + avg_imgi = FD.rdframe(t1) + else: + avg_imgi = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1, + plot_ = False,show_progress= False) + badpi = find_bad_pixels_FD( np.arange(t1,t2) , FD, + img_shape = avg_imgi.shape, threshold= threshold, show_progress=False ) + img = avg_imgi* mask * badpi + qp_saxsi, iq_saxsi, q_saxsi = get_circular_average( img, + mask * badpi, save= False, + pargs=setup_pargs ) + #print( img.max()) + if t1==FD.beg: + qp_saxs, iq_saxs, q_saxs = np.zeros_like( qp_saxsi ), np.zeros_like( iq_saxsi ), np.zeros_like( q_saxsi ) + qp_saxs += qp_saxsi + iq_saxs += iq_saxsi + q_saxs += q_saxsi + qp_saxs /= Nimg + iq_saxs /= Nimg + q_saxs /= Nimg + + return qp_saxs, iq_saxs, q_saxs + +def get_waxs_beam_center( gamma, origin = [432, 363], Ldet = 1495, pixel_size = 75 * 1e-3 ): + '''YG Feb 10, 2018 + Calculate beam center for WAXS geometry by giving beam center at gamma=0 and the target gamma + Input: + gamma: angle in degree + Ldet: sample to detector distance, 1495 mm for CHX WAXS + origin: beam center for gamma = 0, (python x,y coordinate in pixel) + pxiel size: 75 * 1e-3 mm for Eiger 1M + output: + beam center: for the target gamma, in pixel + ''' + return [ int( origin[0] + np.tan( np.radians(gamma)) * Ldet/pixel_size) ,origin[1] ] + + + +def get_img_from_iq( qp, iq, img_shape, center): + '''YG Jan 24, 2018 + Get image from circular average + Input: + qp: q in pixel unit + iq: circular average + image_shape, e.g., [256,256] + center: [center_y, center_x] e.g., [120, 200] + Output: + img: recovered image + ''' + pixelist = np.arange( img_shape[0] * img_shape[1] ) + pixely = pixelist%img_shape[1] -center[1] + pixelx = pixelist//img_shape[1] - center[0] + r= np.hypot(pixelx, pixely) #leave as float. + #r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 + return (np.interp( r, qp, iq )).reshape( img_shape ) + + +def average_array_withNan( array, axis=0, mask=None): + '''YG. Jan 23, 2018 + Average array invovling np.nan along axis + + Input: + array: ND array, actually should be oneD or twoD at this stage..TODOLIST for ND + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + avg: averaged array along axis + ''' + shape = array.shape + if mask == None: + mask = np.isnan(array) + #mask = np.ma.masked_invalid(array).mask + array_ = np.ma.masked_array(array, mask=mask) + try: + sums = np.array( np.ma.sum( array_[:,:], axis= axis ) ) + except: + sums = np.array( np.ma.sum( array_[:], axis= axis ) ) + + cts = np.sum(~mask,axis=axis) + #print(cts) + return sums/cts + +def deviation_array_withNan( array, axis=0, mask=None): + '''YG. Jan 23, 2018 + Get the deviation of array invovling np.nan along axis + + Input: + array: ND array + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + dev: the deviation of array along axis + ''' + avg2 = average_array_withNan( array**2, axis = axis, mask = mask ) + avg = average_array_withNan( array, axis = axis, mask = mask ) + return np.sqrt( avg2 - avg**2 ) + + + +def refine_roi_mask( roi_mask, pixel_num_thres=10): + '''YG Dev Jan20,2018 + remove bad roi which pixel numbe is lower pixel_num_thres + roi_mask: array, + pixel_num_thres: integer, the low limit pixel number in each roi of the combined mask, + i.e., if the pixel number in one roi of the combined mask smaller than pixel_num_thres, + that roi will be considered as bad one and be removed. + ''' + new_mask = np.zeros_like( roi_mask ) + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + good_ind = np.where( nopr >= pixel_num_thres)[0] +1 + l = len(good_ind) + new_ind = np.arange( 1, l+1 ) + for i, gi in enumerate( good_ind ): + new_mask.ravel()[ + np.where( roi_mask.ravel() == gi)[0] ] = new_ind[i] + return new_mask, good_ind -1 + +def shrink_image_stack( imgs, bins): + '''shrink imgs by bins + imgs: shape as [Nimg, imx, imy] ''' + Nimg, imx, imy = imgs.shape + bx, by = bins + imgsk = np.zeros( [Nimg, imx//bx, imy//by] ) + N = len(imgs) + for i in range(N): + imgsk[i] = shrink_image(imgs[i], bins ) + return imgsk + +def shrink_image(img, bins ): + '''YG Dec 12, 2017 dev@CHX shrink a two-d image by factor as bins, i.e., bins_x, bins_y + input: + img: 2d array, + bins: integer list, eg. [2,2] + output: + imgb: binned img + ''' + m,n = img.shape + bx, by = bins + Nx, Ny = m//bx, n//by + #print(Nx*bx, Ny*by) + return img[:Nx*bx, :Ny*by].reshape( Nx,bx, Ny, by).mean(axis=(1,3) ) + + +def get_diff_fv( g2_fit_paras, qval_dict, ang_init=137.2): + '''YG@CHX Nov 9,2017 + Get flow velocity and diff from g2_fit_paras ''' + g2_fit_para_ = g2_fit_paras.copy() + qr = np.array( [qval_dict[k][0] for k in sorted( qval_dict.keys())] ) + qang = np.array( [qval_dict[k][1] for k in sorted( qval_dict.keys())] ) + #x=g2_fit_para_.pop( 'relaxation_rate' ) + #x=g2_fit_para_.pop( 'flow_velocity' ) + g2_fit_para_['diff'] = g2_fit_paras[ 'relaxation_rate' ]/qr**2 + cos_part = np.abs( np.cos( np.radians( qang - ang_init)) ) + g2_fit_para_['fv'] = g2_fit_paras[ 'flow_velocity' ]/cos_part/qr + return g2_fit_para_ + + + + +# function to get indices of local extrema (=indices of speckle echo maximum amplitudes): +def get_echos(dat_arr,min_distance=10): + """ + getting local maxima and minima from 1D data -> e.g. speckle echos + strategy: using peak_local_max (from skimage) with min_distance parameter to find well defined local maxima + using np.argmin to find absolute minima between relative maxima + returns [max_ind,min_ind] -> lists of indices corresponding to local maxima/minima + by LW 10/23/2018 + """ + from skimage.feature import peak_local_max + max_ind=peak_local_max(dat_arr, min_distance) # !!! careful, skimage function reverses the order (wtf?) + min_ind=[] + for i in range(len(max_ind[:-1])): + min_ind.append(max_ind[i+1][0]+np.argmin(dat_arr[max_ind[i+1][0]:max_ind[i][0]])) + #unfortunately, skimage function fu$$s up the format: max_ind is an array of a list of lists...fix this: + mmax_ind=[] + for l in max_ind: + mmax_ind.append(l[0]) + #return [mmax_ind,min_ind] + return [list(reversed(mmax_ind)),list(reversed(min_ind))] + + +def pad_length(arr,pad_val=np.nan): + """ + arr: 2D matrix + pad_val: values being padded + adds pad_val to each row, to make the length of each row equal to the lenght of the longest row of the original matrix + -> used to convert python generic data object to HDF5 native format + function fixes python bug in padding (np.pad) integer array with np.nan + update June 2023: remove use of np.shape and np.size that doesn't work (anymore?) on arrays with inhomogenous size + by LW 12/30/2017 + """ + max_len=[] + for i in range(len(arr)): + max_len.append([len(arr[i])]) + max_len=np.max(max_len) + for l in range(len(arr)): + arr[l]=np.pad(arr[l]*1.,(0,max_len-np.size(arr[l])),mode='constant',constant_values=pad_val) + return arr + + + +def save_array_to_tiff(array, output, verbose=True): + '''Y.G. Nov 1, 2017 + Save array to a tif file + ''' + img = PIL.Image.fromarray(array) + img.save( output ) + if verbose: + print( 'The data is save to: %s.'%( output )) + + + +def load_pilatus(filename): + '''Y.G. Nov 1, 2017 + Load a pilatus 2D image + ''' + return np.array( PIL.Image.open(filename).convert('I') ) + +def ls_dir(inDir, have_list=[], exclude_list=[] ): + '''Y.G. Aug 1, 2019 + List all filenames in a filefolder + inDir: fullpath of the inDir + have_string: only retrun filename containing the string + exclude_string: only retrun filename not containing the string + + ''' + from os import listdir + from os.path import isfile, join + + tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] ) + tifs_ = [] + for tif in tifs: + flag=1 + for string in have_list: + if string not in tif: + flag *=0 + for string in exclude_list: + if string in tif: + flag *=0 + if flag: + tifs_.append( tif ) + + return np.array( tifs_ ) + + +def ls_dir2(inDir, string=None): + '''Y.G. Nov 1, 2017 + List all filenames in a filefolder (not include hidden files and subfolders) + inDir: fullpath of the inDir + string: if not None, only retrun filename containing the string + ''' + from os import listdir + from os.path import isfile, join + if string == None: + tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] ) + else: + tifs = np.array( [f for f in listdir(inDir) if (isfile(join(inDir, f)))&(string in f) ] ) + return tifs + +def re_filename( old_filename, new_filename, inDir=None, verbose=True ): + '''Y.G. Nov 28, 2017 + Rename old_filename with new_filename in a inDir + inDir: fullpath of the inDir, if None, the filename should have the fullpath + old_filename/ new_filename: string + an example: + re_filename( 'uid=run20_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', + 'uid=run17_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', + '/home/yuzhang/Analysis/Timepix/2017_3/Results/run17/run17_pos1/' + ) + ''' + if inDir != None: + os.rename(inDir + old_filename, inDir+new_filename) + else: + os.rename( old_filename, new_filename) + print('The file: %s is changed to: %s.'%(old_filename, new_filename)) + + +def re_filename_dir( old_pattern, new_pattern, inDir,verbose=True ): + '''Y.G. Nov 28, 2017 + Rename all filenames with old_pattern with new_pattern in a inDir + inDir: fullpath of the inDir, if None, the filename should have the fullpath + old_pattern, new_pattern + an example, + re_filename_dir('20_', '17_', inDir ) + ''' + fps = ls_dir(inDir) + for fp in fps: + if old_pattern in fp: + old_filename = fp + new_filename = fp.replace(old_pattern, new_pattern) + re_filename( old_filename, new_filename, inDir,verbose= verbose ) + +def get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False,q_thresh=0, p_thresh=0, silent=True, qprecision=5): + """ + function to return roi number from qval_dict, corresponding Q and phi, lists (sets) of all available Qs and phis + [roi_nr,Q,phi,Q_list,phi_list]=get_roi_nr(..) + calling sequence: get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False, verbose=True) + qdict: qval_dict from analysis pipeline/hdf5 result file + q: q of interest, can be either value (q_nr=False) or q-number (q_nr=True) + q_thresh: threshold for comparing Q-values, set to 0 for exact comparison + phi: phi of interest, can be either value (phi_nr=False) or q-number (phi_nr=True) + p_thresh: threshold for comparing phi values, set to 0 for exact comparison + silent=True/False: Don't/Do print lists of available qs and phis, q and phi of interest + by LW 10/21/2017 + update by LW 08/22/2018: introduced thresholds for comparison of Q and phi values (before: exact match required) + update 2019/09/28 add qprecision to get unique Q + update 2020/3/12 explicitly order input dictionary to fix problem with environments >= 2019-3.0.1 + """ + import collections + from collections import OrderedDict + qdict = collections.OrderedDict(sorted(qdict.items())) + qs=[] + phis=[] + for i in qdict.keys(): + qs.append(qdict[i][0]) + phis.append(qdict[i][1]) + qslist=list(OrderedDict.fromkeys(qs)) + qslist = np.unique( np.round(qslist, qprecision ) ) + phislist=list(OrderedDict.fromkeys(phis)) + qslist=list(np.sort(qslist)) + phislist=list(np.sort(phislist)) + if q_nr: + qinterest=qslist[q] + qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] + else: + qinterest=q + qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] # new + if phi_nr: + phiinterest=phislist[phi] + phiindices = [i for i,x in enumerate(phis) if x == phiinterest] + else: + phiinterest=phi + phiindices = [i for i,x in enumerate(phis) if np.abs(x-phiinterest) < p_thresh] # new + ret_list=[list(set(qindices).intersection(phiindices))[0],qinterest,phiinterest,qslist,phislist] #-> this is the original + if silent == False: + print('list of available Qs:') + print(qslist) + print('list of available phis:') + print(phislist) + print('Roi number for Q= '+str(ret_list[1])+' and phi= '+str(ret_list[2])+': '+str(ret_list[0])) + return ret_list + +def get_fit_by_two_linear(x,y, mid_xpoint1, mid_xpoint2=None, xrange=None, ): + '''YG Octo 16,2017 Fit a curve with two linear func, the curve is splitted by mid_xpoint, + namely, fit the curve in two regions defined by (xmin,mid_xpoint ) and (mid_xpoint2, xmax) + Input: + x: 1D np.array + y: 1D np.array + mid_xpoint: float, the middle point of x + xrange: [x1,x2] + Return: + D1, gmfit1, D2, gmfit2 : + fit parameter (slope, background) of linear fit1 + convinent fit class, gmfit1(x) gives yvale + fit parameter (slope, background) of linear fit2 + convinent fit class, gmfit2(x) gives yvale + + ''' + if xrange == None: + x1,x2 = min(x), max(x) + x1,x2=xrange + if mid_xpoint2 == None: + mid_xpoint2= mid_xpoint1 + D1, gmfit1 = linear_fit( x,y, xrange= [ x1,mid_xpoint1 ]) + D2, gmfit2 = linear_fit( x,y, xrange= [mid_xpoint2, x2 ]) + return D1, gmfit1, D2, gmfit2 + +def get_cross_point( x, gmfit1, gmfit2 ): + '''YG Octo 16,2017 + Get croess point of two curve + ''' + y1 = gmfit1(x) + y2 = gmfit2(x) + return x[np.argmin( np.abs(y1-y2) )] + +def get_curve_turning_points( x, y, mid_xpoint1, mid_xpoint2=None, xrange=None, ): + '''YG Octo 16,2017 + Get a turning point of a curve by doing a two-linear fit + ''' + D1, gmfit1, D2, gmfit2 = get_fit_by_two_linear(x,y, mid_xpoint1, mid_xpoint2, xrange ) + return get_cross_point( x, gmfit1, gmfit2 ) + + +def plot_fit_two_linear_fit(x,y, gmfit1, gmfit2, ax=None ): + '''YG Octo 16,2017 Plot data with two fitted linear func + ''' + if ax == None: + fig, ax =plt.subplots() + plot1D( x = x, y = y, ax =ax, c='k', legend='data', m='o', ls='')#logx=True, logy=True ) + plot1D( x = x, y = gmfit1(x), ax =ax, c='r', m='', ls='-',legend='fit1' ) + plot1D( x = x, y = gmfit2(x), ax =ax, c='b', m='', ls='-',legend='fit2' ) + return ax + + +def linear_fit( x,y, xrange=None): + '''YG Octo 16,2017 copied from XPCS_SAXS + a linear fit + ''' + if xrange != None: + xmin, xmax = xrange + x1,x2 = find_index( x,xmin,tolerance= None),find_index( x,xmax,tolerance= None) + x_ = x[x1:x2] + y_ = y[x1:x2] + else: + x_=x + y_=y + D0 = np.polyfit(x_, y_, 1) + gmfit = np.poly1d(D0) + return D0, gmfit + + +def find_index( x,x0,tolerance= None): + '''YG Octo 16,2017 copied from SAXS + find index of x0 in x + #find the position of P in a list (plist) with tolerance + ''' + + N=len(x) + i=0 + if x0 > max(x): + position= len(x) -1 + elif x0 max(x): + position= len(x) -1 + elif x0 di: + try: + els = line.split() + if good_cols == None: + temp = np.array( els, dtype=float ) + else: + temp= np.array( [els[j] for j in good_cols], dtype=float ) + data=np.vstack( (data,temp)) + except: + pass + if labels == None: + labels = np.arange(data.shape[1]) + df = pds.DataFrame( data, index= np.arange(data.shape[0]), columns= labels ) + return df + + + +def get_print_uids( start_time, stop_time, return_all_info=False): + '''Update Feb 20, 2018 also return full uids + YG. Octo 3, 2017@CHX + Get full uids and print uid plus Measurement contents by giving start_time, stop_time + + ''' + hdrs = list( db(start_time= start_time, stop_time = stop_time) ) + fuids = np.zeros( len(hdrs),dtype=object) + uids = np.zeros( len(hdrs),dtype=object) + sids = np.zeros( len(hdrs), dtype=object) + n=0 + all_info = np.zeros( len(hdrs), dtype=object) + for i in range(len(hdrs)): + fuid = hdrs[-i-1]['start']['uid'] #reverse order + uid = fuid[:6] #reverse order + sid = hdrs[-i-1]['start']['scan_id'] + fuids[n]=fuid + uids[n]=uid + sids[n]=sid + date = time.ctime(hdrs[-i-1]['start']['time']) + try: + m = hdrs[-i-1]['start']['Measurement'] + except: + m='' + info = "%3d: uid = '%s' ##%s #%s: %s-- %s "%(i,uid,date,sid,m, fuid) + print( info ) + if return_all_info: + all_info[n]=info + n +=1 + if not return_all_info: + return fuids, uids, sids + else: + return fuids, uids, sids, all_info + + + +def get_last_uids( n=-1 ): + '''YG Sep 26, 2017 + A Convinient function to copy uid to jupyter for analysis''' + uid = db[n]['start']['uid'][:8] + sid = db[n]['start']['scan_id'] + m = db[n]['start']['Measurement'] + return " uid = '%s' #(scan num: %s (Measurement: %s "%(uid,sid,m) + + + +def get_base_all_filenames( inDir, base_filename_cut_length = -7 ): + '''YG Sep 26, 2017 + Get base filenames and their related all filenames + Input: + inDir, str, input data dir + base_filename_cut_length: to which length the base name is unique + Output: + dict: keys, base filename + vales, all realted filename + ''' + from os import listdir + from os.path import isfile, join + tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] ) + tifsc = list(tifs.copy()) + utifs = np.sort( np.unique( np.array([ f[:base_filename_cut_length] for f in tifs] ) ) )[::-1] + files = {} + for uf in utifs: + files[uf] = [] + i = 0 + reName = [] + for i in range(len(tifsc)): + if uf in tifsc[i]: + files[uf].append( tifsc[i] ) + reName.append(tifsc[i]) + for fn in reName: + tifsc.remove(fn) + return files + + +def create_ring_mask( shape, r1, r2, center, mask=None): + '''YG. Sep 20, 2017 Develop@CHX + Create 2D ring mask + input: + shape: two integer number list, mask shape, e.g., [100,100] + r1: the inner radius + r2: the outer radius + center: two integer number list, [cx,cy], ring center, e.g., [30,50] + output: + 2D numpy array, 0,1 type + ''' + + m = np.zeros( shape, dtype= bool) + rr,cc = disk((center[1], center[0]), r2, shape=shape ) + m[rr,cc] = 1 + rr,cc = disk((center[1], center[0]), r1,shape=shape ) + m[rr,cc] = 0 + if mask != None: + m += mask + return m + +def get_image_edge(img): + ''' + Y.G. Developed at Sep 8, 2017 @CHX + Get sharp edges of an image + img: two-D array, e.g., a roi mask + ''' + edg_ = prewitt(img/1.0) + edg = np.zeros_like(edg_) + w = np.where(edg_ > 1e-10) + edg[w] = img[w] + edg[np.where(edg==0)] = 1 + return edg + +def get_image_with_roi( img, roi_mask, scale_factor = 2): + ''' + Y.G. Developed at Sep 8, 2017 @CHX + Get image with edges of roi_mask by doing + i) get edges of roi_mask by function get_image_edge + ii) scale img at region of interest (ROI) by scale_factor + img: two-D array for image + roi_mask: two-D array for ROI + scale_factor: scaling factor of ROI in image + ''' + edg = get_image_edge( roi_mask ) + img_ = img.copy() + w = np.where(roi_mask) + img_[w] = img[w] * scale_factor + return img_ * edg + + + + + +def get_today_date( ): + from time import gmtime, strftime + return strftime("%m-%d-%Y", gmtime() ) + + +def move_beamstop( mask, xshift, yshift ): + '''Y.G. Developed at July 18, 2017 @CHX + Create new mask by shift the old one with xshift, yshift + Input + --- + mask: 2D numpy array, 0 for bad pixels, 1 for good pixels + xshift, integer, shift value along x direction + yshift, integer, shift value along y direction + + Output + --- + mask, 2D numpy array, + ''' + m = np.ones_like(mask) + W,H = mask.shape + w = np.where(mask==0) + nx, ny = w[0]+ int(yshift), w[1]+ int(xshift ) + gw = np.where( (nx >= 0) & (nx= 0) & (ny= xmax) | ( pixel <= xmin) )[0] + else: + badp = filter_dict[k] + if len(badp)!=0: + pls = np.where([rf==k])[1] + rf[ pls[badp] ] = 0 + return rm + + +## +#Dev at March 31 for create Eiger chip mask +def create_chip_edges_mask( det='1M' ): + ''' Create a chip edge mask for Eiger detector + + ''' + if det == '1M': + shape = [1065, 1030] + w = 4 + mask = np.ones( shape , dtype = np.int32) + cx = [ 1030//4 *i for i in range(1,4) ] + #cy = [ 1065//4 *i for i in range(1,4) ] + cy = [808, 257 ] + #print (cx, cy ) + for c in cx: + mask[:, c-w//2:c+w//2 ] = 0 + for c in cy: + mask[ c-w//2:c+w//2, : ] = 0 + + return mask + +def create_ellipse_donut( cx, cy , wx_inner, wy_inner, wx_outer, wy_outer, roi_mask, gap=0): + Nmax = np.max( np.unique( roi_mask ) ) + rr1, cc1 = ellipse( cy,cx, wy_inner, wx_inner ) + rr2, cc2 = ellipse( cy, cx, wy_inner + gap, wx_inner +gap ) + rr3, cc3 = ellipse( cy, cx, wy_outer,wx_outer ) + roi_mask[rr3,cc3] = 2 + Nmax + roi_mask[rr2,cc2] = 0 + roi_mask[rr1,cc1] = 1 + Nmax + return roi_mask + +def create_box( cx, cy, wx, wy, roi_mask): + Nmax = np.max( np.unique( roi_mask ) ) + for i, [cx_,cy_] in enumerate(list( zip( cx,cy ))): #create boxes + x = np.array( [ cx_-wx, cx_+wx, cx_+wx, cx_-wx]) + y = np.array( [ cy_-wy, cy_-wy, cy_+wy, cy_+wy]) + rr, cc = polygon( y,x) + roi_mask[rr,cc] = i +1 + Nmax + return roi_mask + + + + +def create_folder( base_folder, sub_folder ): + ''' + Crate a subfolder under base folder + Input: + base_folder: full path of the base folder + sub_folder: sub folder name to be created + Return: + Created full path of the created folder + ''' + + data_dir0 = os.path.join( base_folder, sub_folder ) + ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + os.makedirs(data_dir0, exist_ok=True) + print('Results from this analysis will be stashed in the directory %s' % data_dir0) + return data_dir0 + + + + + +def create_user_folder( CYCLE, username=None, default_dir= '/XF11ID/analysis/' ): + ''' + Crate a folder for saving user data analysis result + Input: + CYCLE: run cycle + username: if None, get username from the jupyter username + Return: + Created folder name + ''' + if username !='Default': + if username == None: + username = getpass.getuser() + data_dir0 = os.path.join(default_dir, CYCLE, username, 'Results/') + else: + data_dir0 = os.path.join(default_dir, CYCLE +'/') + ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + os.makedirs(data_dir0, exist_ok=True) + print('Results from this analysis will be stashed in the directory %s' % data_dir0) + return data_dir0 + + + + + + +################################## +#########For dose analysis ####### +################################## +def get_fra_num_by_dose( exp_dose, exp_time, att=1, dead_time =2 ): + ''' + Calculate the frame number to be correlated by giving a X-ray exposure dose + + Paramters: + exp_dose: a list, the exposed dose, e.g., in unit of exp_time(ms)*N(fram num)*att( attenuation) + exp_time: float, the exposure time for a xpcs time sereies + dead_time: dead time for the fast shutter reponse time, CHX = 2ms + Return: + noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) + e.g., + + no_dose_fra = get_fra_num_by_dose( exp_dose = [ 3.34* 20, 3.34*50, 3.34*100, 3.34*502, 3.34*505 ], + exp_time = 1.34, dead_time = 2) + + --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) + ''' + return np.int_( np.array( exp_dose )/( exp_time + dead_time)/ att ) + + +def get_multi_tau_lag_steps( fra_max, num_bufs = 8 ): + ''' + Get taus in log steps ( a multi-taus defined taus ) for a time series with max frame number as fra_max + Parameters: + fra_max: integer, the maximun frame number + buf_num (default=8), + Return: + taus_in_log, a list + + e.g., + get_multi_tau_lag_steps( 20, 8 ) --> array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]) + + ''' + num_levels = int(np.log( fra_max/(num_bufs-1))/np.log(2) +1) +1 + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + return lag_steps[lag_steps < fra_max] + + + +def get_series_g2_taus( fra_max_list, acq_time=1, max_fra_num=None, log_taus = True, + num_bufs = 8): + ''' + Get taus for dose dependent analysis + Parameters: + fra_max_list: a list, a lsit of largest available frame number + acq_time: acquistion time for each frame + log_taus: if true, will use the multi-tau defined taus bu using buf_num (default=8), + otherwise, use deltau =1 + Return: + tausd, a dict, with keys as taus_max_list items + e.g., + get_series_g2_taus( fra_max_list=[20,30,40], acq_time=1, max_fra_num=None, log_taus = True, num_bufs = 8) + --> + {20: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]), + 30: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28]), + 40: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32]) + } + + ''' + tausd = {} + for n in fra_max_list: + if max_fra_num != None: + L = max_fra_num + else: + L = np.infty + if n>L: + warnings.warn("Warning: the dose value is too large, and please" + "check the maxium dose in this data set and give a smaller dose value." + "We will use the maxium dose of the data.") + n = L + if log_taus: + lag_steps = get_multi_tau_lag_steps(n, num_bufs) + else: + lag_steps = np.arange( n ) + tausd[n] = lag_steps * acq_time + return tausd + + + + +def check_lost_metadata(md, Nimg=None, inc_x0 =None, inc_y0= None, pixelsize=7.5*10*(-5) ): + '''Y.G. Dec 31, 2016, check lost metadata + + Parameter: + md: dict, meta data dictionay + Nimg: number of frames for this uid metadata + inc_x0/y0: incident beam center x0/y0, if None, will over-write the md['beam_center_x/y'] + pixelsize: if md don't have ['x_pixel_size'], the pixelsize will add it + Return: + dpix: pixelsize, in mm + lambda_: wavelegth of the X-rays in Angstroms + exposuretime: exposure time in sec + timeperframe: acquisition time is sec + center: list, [x,y], incident beam center in pixel + Will also update md + ''' + mdn = md.copy() + if 'number of images' not in list(md.keys()): + md['number of images'] = Nimg + if 'x_pixel_size' not in list(md.keys()): + md['x_pixel_size'] = 7.5000004e-05 + dpix = md['x_pixel_size'] * 1000. #in mm, eiger 4m is 0.075 mm + try: + lambda_ =md['wavelength'] + except: + lambda_ =md['incident_wavelength'] # wavelegth of the X-rays in Angstroms + try: + Ldet = md['det_distance'] + if Ldet<=1000: + Ldet *=1000 + md['det_distance'] = Ldet + except: + Ldet = md['detector_distance'] + if Ldet<=1000: + Ldet *=1000 + md['detector_distance'] = Ldet + + + try:#try exp time from detector + exposuretime= md['count_time'] #exposure time in sec + except: + exposuretime= md['cam_acquire_time'] #exposure time in sec + try:#try acq time from detector + acquisition_period = md['frame_time'] + except: + try: + acquisition_period = md['acquire period'] + except: + uid = md['uid'] + acquisition_period = float( db[uid]['start']['acquire period'] ) + timeperframe = acquisition_period + if inc_x0 != None: + mdn['beam_center_x']= inc_y0 + print( 'Beam_center_x has been changed to %s. (no change in raw metadata): '%inc_y0) + if inc_y0 != None: + mdn['beam_center_y']= inc_x0 + print( 'Beam_center_y has been changed to %s. (no change in raw metadata): '%inc_x0) + center = [ int(mdn['beam_center_x']),int( mdn['beam_center_y'] ) ] #beam center [y,x] for python image + center=[center[1], center[0]] + + return dpix, lambda_, Ldet, exposuretime, timeperframe, center + + +def combine_images( filenames, outputfile, outsize=(2000, 2400)): + '''Y.G. Dec 31, 2016 + Combine images together to one image using PIL.Image + Input: + filenames: list, the images names to be combined + outputfile: str, the filename to generate + outsize: the combined image size + Output: + save a combined image file + ''' + N = len( filenames) + #nx = np.int( np.ceil( np.sqrt(N)) ) + #ny = np.int( np.ceil( N / float(nx) ) ) + + ny = int( np.ceil( np.sqrt(N)) ) + nx = int( np.ceil( N / float(ny) ) ) + + #print(nx,ny) + result = Image.new("RGB", outsize, color=(255,255,255,0)) + basewidth = int( outsize[0]/nx ) + hsize = int( outsize[1]/ny ) + for index, file in enumerate(filenames): + path = os.path.expanduser(file) + img = Image.open(path) + bands = img.split() + ratio = img.size[1]/ img.size[0] #h/w + if hsize > basewidth * ratio: + basewidth_ = basewidth + hsize_ = int( basewidth * ratio ) + else: + basewidth_ = int( hsize/ratio ) + hsize_ = hsize + #print( index, file, basewidth, hsize ) + size = (basewidth_,hsize_) + bands = [b.resize(size, Image.Resampling.BILINEAR) for b in bands] + img = Image.merge('RGBA', bands) + x = index % nx * basewidth + y = index // nx * hsize + w, h = img.size + #print('pos {0},{1} size {2},{3}'.format(x, y, w, h)) + result.paste(img, (x, y, x + w, y + h )) + result.save( outputfile,quality=100, optimize=True ) + print( 'The combined image is saved as: %s'%outputfile) + + +def get_qval_dict( qr_center, qz_center=None, qval_dict = None, multi_qr_for_one_qz= True, + one_qz_multi_qr = True): + '''Y.G. Dec 27, 2016 + Map the roi label array with qr or (qr,qz) or (q//, q|-) values + Parameters: + qr_center: list, a list of qr + qz_center: list, a list of qz, + multi_qr_for_one_qz: by default=True, + if one_qz_multi_qr: + one qz_center corresponds to all qr_center, in other words, there are totally, len(qr_center)* len(qz) qs + else: + one qr_center corresponds to all qz_center, + else: one qr with one qz + qval_dict: if not None, will append the new dict to the qval_dict + Return: + qval_dict, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + + ''' + + if qval_dict == None: + qval_dict = {} + maxN = 0 + else: + maxN = np.max( list( qval_dict.keys() ) ) +1 + + if qz_center != None: + if multi_qr_for_one_qz: + if one_qz_multi_qr: + for qzind in range( len( qz_center)): + for qrind in range( len( qr_center)): + qval_dict[ maxN + qzind* len( qr_center) + qrind ] = np.array( [qr_center[qrind], qz_center[qzind] ] ) + else: + for qrind in range( len( qr_center)): + for qzind in range( len( qz_center)): + qval_dict[ maxN + qrind* len( qz_center) + qzind ] = np.array( [qr_center[qrind], qz_center[qzind] ] ) + + + else: + for i, [qr, qz] in enumerate(zip( qr_center, qz_center)): + qval_dict[ maxN + i ] = np.array( [ qr, qz ] ) + else: + for qrind in range( len( qr_center)): + qval_dict[ maxN + qrind ] = np.array( [ qr_center[qrind] ] ) + return qval_dict + + +def update_qval_dict( qval_dict1, qval_dict2 ): + ''' Y.G. Dec 31, 2016 + Update qval_dict1 with qval_dict2 + Input: + qval_dict1, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + qval_dict2, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + Output: + qval_dict, a dict, with the same key as dict1, and all key in dict2 but which key plus max(dict1.keys()) + ''' + maxN = np.max( list( qval_dict1.keys() ) ) +1 + qval_dict = {} + qval_dict.update( qval_dict1 ) + for k in list( qval_dict2.keys() ): + qval_dict[k + maxN ] = qval_dict2[k] + return qval_dict + +def update_roi_mask( roi_mask1, roi_mask2 ): + ''' Y.G. Dec 31, 2016 + Update qval_dict1 with qval_dict2 + Input: + roi_mask1, 2d-array, label array, same shape as xpcs frame, + roi_mask2, 2d-array, label array, same shape as xpcs frame, + Output: + roi_mask, 2d-array, label array, same shape as xpcs frame, update roi_mask1 with roi_mask2 + ''' + roi_mask = roi_mask1.copy() + w= np.where( roi_mask2 ) + roi_mask[w] = roi_mask2[w] + np.max( roi_mask ) + return roi_mask + + +def check_bad_uids(uids, mask, img_choice_N = 10, bad_uids_index = None ): + '''Y.G. Dec 22, 2016 + Find bad uids by checking the average intensity by a selection of the number img_choice_N of frames for the uid. If the average intensity is zeros, the uid will be considered as bad uid. + Parameters: + uids: list, a list of uid + mask: array, bool type numpy.array + img_choice_N: random select number of the uid + bad_uids_index: a list of known bad uid list, default is None + Return: + guids: list, good uids + buids, list, bad uids + ''' + import random + buids = [] + guids = list( uids ) + #print( guids ) + if bad_uids_index == None: + bad_uids_index = [] + for i, uid in enumerate(uids): + #print( i, uid ) + if i not in bad_uids_index: + detector = get_detector( db[uid ] ) + imgs = load_data( uid, detector ) + img_samp_index = random.sample( range(len(imgs)), img_choice_N) + imgsa = apply_mask( imgs, mask ) + avg_img = get_avg_img( imgsa, img_samp_index, plot_ = False, uid =uid) + if avg_img.max() == 0: + buids.append( uid ) + guids.pop( list( np.where( np.array(guids) == uid)[0] )[0] ) + print( 'The bad uid is: %s'%uid ) + else: + guids.pop( list( np.where( np.array(guids) == uid)[0] )[0] ) + buids.append( uid ) + print( 'The bad uid is: %s'%uid ) + print( 'The total and bad uids number are %s and %s, repsectively.'%( len(uids), len(buids) ) ) + return guids, buids + + + +def find_uids(start_time, stop_time ): + '''Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end time + Return: + sids: list, scan id + uids: list, uid with 8 character length + fuids: list, uid with full length + + ''' + hdrs = db(start_time= start_time, stop_time = stop_time) + try: + print ('Totally %s uids are found.'%(len(list(hdrs)))) + except: + pass + sids=[] + uids=[] + fuids=[] + for hdr in hdrs: + s= get_sid_filenames( hdr) + #print (s[1][:8]) + sids.append( s[0] ) + uids.append( s[1][:8] ) + fuids.append( s[1] ) + sids=sids[::-1] + uids=uids[::-1] + fuids=fuids[::-1] + return np.array(sids), np.array(uids), np.array(fuids) + + +def ployfit( y, x=None, order = 20 ): + ''' + fit data (one-d array) by a ploynominal function + return the fitted one-d array + ''' + if x == None: + x = range(len(y)) + pol = np.polyfit(x, y, order) + return np.polyval(pol, x) + +def check_bad_data_points( data, fit=True, polyfit_order = 30, legend_size = 12, + plot=True, scale=1.0, good_start=None, good_end=None, path=None, return_ylim=False ): + ''' + data: 1D array + scale: the scale of deviation + fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve + else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve + + ''' + if good_start == None: + good_start=0 + if good_end == None: + good_end = len( data ) + bd1 = [i for i in range(0, good_start)] + bd3 = [i for i in range(good_end,len( data ) )] + + d_ = data[good_start:good_end] + + if fit: + pfit = ployfit( d_, order = polyfit_order) + d = d_ - pfit + else: + d = d_ + pfit = np.ones_like(d) * data.mean() + + ymin = d.mean()-scale *d.std() + ymax = d.mean()+scale *d.std() + + if plot: + fig = plt.figure( ) + ax = fig.add_subplot(2,1,1 ) + plot1D( d_, ax = ax, color='k', legend='data',legend_size=legend_size ) + plot1D( pfit,ax=ax, color='b', legend='ploy-fit', title='Find Bad Points',legend_size=legend_size ) + + ax2 = fig.add_subplot(2,1,2 ) + plot1D( d, ax = ax2,legend='difference',marker='s', color='b', ) + + #print('here') + plot1D(x=[0,len(d_)], y=[ymin,ymin], ax = ax2, ls='--',lw= 3, marker='o', color='r', legend='low_thresh', legend_size=legend_size ) + + plot1D(x=[0,len(d_)], y=[ymax,ymax], ax = ax2 , ls='--', lw= 3,marker='o', color='r',legend='high_thresh',title='',legend_size=legend_size ) + + if path != None: + fp = path + '%s'%( uid ) + '_find_bad_points' + '.png' + plt.savefig( fp, dpi=fig.dpi) + bd2= list( np.where( np.abs(d -d.mean()) > scale *d.std() )[0] + good_start ) + + if return_ylim: + return np.array( bd1 + bd2 + bd3 ), ymin, ymax,pfit + else: + return np.array( bd1 + bd2 + bd3 ), pfit + + + + +def get_bad_frame_list( imgsum, fit=True, polyfit_order = 30,legend_size = 12, + plot=True, scale=1.0, good_start=None, good_end=None, uid='uid',path=None, + + return_ylim=False): + ''' + imgsum: the sum intensity of a time series + scale: the scale of deviation + fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve + else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve + + ''' + if good_start == None: + good_start=0 + if good_end == None: + good_end = len( imgsum ) + bd1 = [i for i in range(0, good_start)] + bd3 = [i for i in range(good_end,len( imgsum ) )] + + imgsum_ = imgsum[good_start:good_end] + + if fit: + pfit = ployfit( imgsum_, order = polyfit_order) + data = imgsum_ - pfit + else: + data = imgsum_ + pfit = np.ones_like(data) * data.mean() + + ymin = data.mean()-scale *data.std() + ymax = data.mean()+scale *data.std() + + if plot: + fig = plt.figure( ) + ax = fig.add_subplot(2,1,1 ) + plot1D( imgsum_, ax = ax, color='k', legend='data',legend_size=legend_size ) + plot1D( pfit,ax=ax, color='b', legend='ploy-fit', title=uid + '_imgsum',legend_size=legend_size ) + + ax2 = fig.add_subplot(2,1,2 ) + plot1D( data, ax = ax2,legend='difference',marker='s', color='b', ) + + #print('here') + plot1D(x=[0,len(imgsum_)], y=[ymin,ymin], ax = ax2, ls='--',lw= 3, marker='o', color='r', legend='low_thresh', legend_size=legend_size ) + + plot1D(x=[0,len(imgsum_)], y=[ymax,ymax], ax = ax2 , ls='--', lw= 3,marker='o', color='r',legend='high_thresh',title='imgsum_to_find_bad_frame',legend_size=legend_size ) + + if path != None: + fp = path + '%s'%( uid ) + '_imgsum_analysis' + '.png' + plt.savefig( fp, dpi=fig.dpi) + + + + bd2= list( np.where( np.abs(data -data.mean()) > scale *data.std() )[0] + good_start ) + + if return_ylim: + return np.array( bd1 + bd2 + bd3 ), ymin, ymax + else: + return np.array( bd1 + bd2 + bd3 ) + +def save_dict_csv( mydict, filename, mode='w'): + import csv + with open(filename, mode) as csv_file: + spamwriter = csv.writer(csv_file) + for key, value in mydict.items(): + spamwriter.writerow([key, value]) + + + +def read_dict_csv( filename ): + import csv + with open(filename, 'r') as csv_file: + reader = csv.reader(csv_file) + mydict = dict(reader) + return mydict + + +def find_bad_pixels( FD, bad_frame_list, uid='uid'): + bpx = [] + bpy=[] + for n in bad_frame_list: + if n>= FD.beg and n<=FD.end: + f = FD.rdframe(n) + w = np.where( f == f.max()) + if len(w[0])==1: + bpx.append( w[0][0] ) + bpy.append( w[1][0] ) + + + return trans_data_to_pd( [bpx,bpy], label=[ uid+'_x', uid +'_y' ], dtype='list') + + + + + +def mask_exclude_badpixel( bp, mask, uid ): + + for i in range( len(bp)): + mask[ int( bp[bp.columns[0]][i] ), int( bp[bp.columns[1]][i] )]=0 + return mask + + + +def print_dict( dicts, keys=None): + ''' + print keys: values in a dicts + if keys is None: print all the keys + ''' + if keys == None: + keys = list( dicts.keys()) + for k in keys: + try: + print('%s--> %s'%(k, dicts[k]) ) + except: + pass + +def get_meta_data( uid, default_dec = 'eiger', *argv,**kwargs ): + ''' + Jan 25, 2018 add default_dec opt + + Y.G. Dev Dec 8, 2016 + + Get metadata from a uid + + - Adds detector key with detector name + + Parameters: + uid: the unique data acquisition id + kwargs: overwrite the meta data, for example + get_meta_data( uid = uid, sample = 'test') --> will overwrtie the meta's sample to test + return: + meta data of the uid: a dictionay + with keys: + detector + suid: the simple given uid + uid: full uid + filename: the full path of the data + start_time: the data acquisition starting time in a human readable manner + And all the input metadata + ''' + + if 'verbose' in kwargs.keys(): # added: option to suppress output + verbose= kwargs['verbose'] + else: + verbose=True + + import time + header = db[uid] + md ={} + + md['suid'] = uid #short uid + try: + md['filename'] = get_sid_filenames(header)[2][0] + except: + md['filename'] = 'N.A.' + + devices = sorted( list(header.devices()) ) + if len(devices) > 1: + if verbose: # added: mute output + print( "More than one device. This would have unintented consequences.Currently, only the device contains 'default_dec=%s'."%default_dec) + #raise ValueError("More than one device. This would have unintented consequences.") + dec = devices[0] + for dec_ in devices: + if default_dec in dec_: + dec = dec_ + + #print(dec) + #detector_names = sorted( header.start['detectors'] ) + detector_names = sorted( get_detectors(db[uid]) ) + #if len(detector_names) > 1: + # raise ValueError("More than one det. This would have unintented consequences.") + detector_name = detector_names[0] + #md['detector'] = detector_name + md['detector'] = get_detector( header ) + #print( md['detector'] ) + new_dict = header.config_data(dec)['primary'][0] + for key, val in new_dict.items(): + newkey = key.replace(detector_name+"_", "") + md[newkey] = val + + # for k,v in ev['descriptor']['configuration'][dec]['data'].items(): + # md[ k[len(dec)+1:] ]= v + + try: + md.update(header.start['plan_args'].items()) + md.pop('plan_args') + except: + pass + md.update(header.start.items()) + + + # print(header.start.time) + md['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(header.start['time'])) + md['stop_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime( header.stop['time'])) + try: # added: try to handle runs that don't contain image data + if "primary" in header.v2: + descriptor = header.v2["primary"].descriptors[0] + md['img_shape'] = descriptor['data_keys'][md['detector']]['shape'][:2][::-1] + except: + if verbose: + print("couldn't find image shape...skip!") + else: + pass + md.update(kwargs) + + #for k, v in sorted(md.items()): + # ... + # print(f'{k}: {v}') + + return md + + + +def get_max_countc(FD, labeled_array ): + """YG. 2016, Nov 18 + Compute the max intensity of ROIs in the compressed file (FD) + + Parameters + ---------- + FD: Multifile class + compressed file + labeled_array : array + labeled array; 0 is background. + Each ROI is represented by a nonzero integer. It is not required that + the ROI labels are contiguous + index : int, list, optional + The ROI's to use. If None, this function will extract averages for all + ROIs + + Returns + ------- + max_intensity : a float + index : list + The labels for each element of the `mean_intensity` list + """ + + qind, pixelist = roi.extract_label_indices( labeled_array ) + timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) + timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) + + if labeled_array.shape != ( FD.md['ncols'],FD.md['nrows']): + raise ValueError( + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( FD.md['ncols'],FD.md['nrows'], labeled_array.shape[0], labeled_array.shape[1]) ) + + max_inten =0 + for i in tqdm(range( FD.beg, FD.end, 1 ), desc= 'Get max intensity of ROIs in all frames' ): + try: + (p,v) = FD.rdrawframe(i) + w = np.where( timg[p] )[0] + max_inten = max( max_inten, np.max(v[w]) ) + except: + pass + return max_inten + + +def create_polygon_mask( image, xcorners, ycorners ): + ''' + Give image and x/y coners to create a polygon mask + image: 2d array + xcorners, list, points of x coners + ycorners, list, points of y coners + Return: + the polygon mask: 2d array, the polygon pixels with values 1 and others with 0 + + Example: + + + ''' + from skimage.draw import line_aa, line, polygon, disk + imy, imx = image.shape + bst_mask = np.zeros_like( image , dtype = bool) + rr, cc = polygon( ycorners,xcorners,shape = image.shape) + bst_mask[rr,cc] =1 + #full_mask= ~bst_mask + return bst_mask + + +def create_rectangle_mask( image, xcorners, ycorners ): + ''' + Give image and x/y coners to create a rectangle mask + image: 2d array + xcorners, list, points of x coners + ycorners, list, points of y coners + Return: + the polygon mask: 2d array, the polygon pixels with values 1 and others with 0 + + Example: + + + ''' + from skimage.draw import line_aa, line, polygon, disk + imy, imx = image.shape + bst_mask = np.zeros_like( image , dtype = bool) + rr, cc = polygon( ycorners,xcorners,shape = image.shape) + bst_mask[rr,cc] =1 + #full_mask= ~bst_mask + return bst_mask + + +def create_multi_rotated_rectangle_mask( image, center=None, length=100, width=50, angles=[0] ): + ''' Developed at July 10, 2017 by Y.G.@CHX, NSLS2 + Create multi rectangle-shaped mask by rotating a rectangle with a list of angles + The original rectangle is defined by four corners, i.e., + [ (center[1] - width//2, center[0]), + (center[1] + width//2, center[0]), + (center[1] + width//2, center[0] + length), + (center[1] - width//2, center[0] + length) + ] + + Parameters: + image: 2D numpy array, to give mask shape + center: integer list, if None, will be the center of the image + length: integer, the length of the non-ratoted rectangle + width: integer, the width of the non-ratoted rectangle + angles: integer list, a list of rotated angles + + Return: + mask: 2D bool-type numpy array + ''' + + from skimage.draw import polygon + from skimage.transform import rotate + cx,cy = center + imy, imx = image.shape + mask = np.zeros( image.shape, dtype = bool) + wy = length + wx = width + x = np.array( [ max(0, cx - wx//2), min(imx, cx+wx//2), min(imx, cx+wx//2), max(0,cx-wx//2 ) ]) + y = np.array( [ cy, cy, min( imy, cy + wy) , min(imy, cy + wy) ]) + rr, cc = polygon( y,x, shape = image.shape) + mask[rr,cc] =1 + mask_rot= np.zeros( image.shape, dtype = bool) + for angle in angles: + mask_rot += np.array( rotate( mask, angle, center= center ), dtype=bool) #, preserve_range=True) + return ~mask_rot + +def create_wedge( image, center, radius, wcors, acute_angle=True) : + '''YG develop at June 18, 2017, @CHX + Create a wedge by a combination of disk and a triangle defined by center and wcors + wcors: [ [x1,x2,x3...], [y1,y2,y3..] + + ''' + from skimage.draw import line_aa, line, polygon, disk + imy, imx = image.shape + cy,cx = center + x = [cx] + list(wcors[0]) + y = [cy] + list(wcors[1]) + + maskc = np.zeros_like( image , dtype = bool) + rr, cc = disk((cy, cx), radius, shape = image.shape) + maskc[rr,cc] =1 + + maskp = np.zeros_like( image , dtype = bool) + x = np.array( x ) + y = np.array( y ) + print(x,y) + rr, cc = polygon( y,x, shape = image.shape) + maskp[rr,cc] =1 + if acute_angle: + return maskc*maskp + else: + return maskc*~maskp + + + +def create_cross_mask( image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4, + center_disk = True, center_radius=10 + ): + ''' + Give image and the beam center to create a cross-shaped mask + wy_left: the width of left h-line + wy_right: the width of rigth h-line + wx_up: the width of up v-line + wx_down: the width of down v-line + center_disk: if True, create a disk with center and center_radius + + Return: + the cross mask + ''' + from skimage.draw import line_aa, line, polygon, disk + + imy, imx = image.shape + cx,cy = center + bst_mask = np.zeros_like( image , dtype = bool) + ### + #for right part + wy = wy_right + x = np.array( [ cx, imx, imx, cx ]) + y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy]) + rr, cc = polygon( y,x, shape = image.shape) + bst_mask[rr,cc] =1 + + ### + #for left part + wy = wy_left + x = np.array( [0, cx, cx,0 ]) + y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy]) + rr, cc = polygon( y,x, shape = image.shape) + bst_mask[rr,cc] =1 + + ### + #for up part + wx = wx_up + x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) + y = np.array( [ cy, cy, imy, imy]) + rr, cc = polygon( y,x, shape = image.shape) + bst_mask[rr,cc] =1 + + ### + #for low part + wx = wx_down + x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) + y = np.array( [ 0,0, cy, cy]) + rr, cc = polygon( y,x, shape = image.shape) + bst_mask[rr,cc] =1 + + if center_radius!=0: + rr, cc = disk((cy, cx), center_radius, shape = bst_mask.shape) + bst_mask[rr,cc] =1 + + + full_mask= ~bst_mask + + return full_mask + + + + + +def generate_edge( centers, width): + '''YG. 10/14/2016 + give centers and width (number or list) to get edges''' + edges = np.zeros( [ len(centers),2]) + edges[:,0] = centers - width + edges[:,1] = centers + width + return edges + + +def export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'], + path='/XF11ID/analysis/2016_3/commissioning/Results/' ): + '''YG. 10/17/2016 + export uid data to a txt file + uid: unique scan id + x: the x-col + y: the y-cols + path: save path + Example: + data = export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'], + path='/XF11ID/analysis/2016_3/commissioning/Results/exported/' ) + A plot for the data: + d.plot(x='dcm_b', y = 'xray_eye1_stats1_total', marker='o', ls='-', color='r') + + ''' + from databroker import DataBroker as db + from pyCHX.chx_generic_functions import trans_data_to_pd + + hdr = db[uid] + print(hdr.fields()) + data = db[uid].table() + xp = data[x] + datap = np.zeros( [len(xp), len(y)+1]) + datap[:,0] = xp + for i, yi in enumerate(y): + datap[:,i+1] = data[yi] + + datap = trans_data_to_pd( datap, label=[x] + [yi for yi in y]) + datap.to_csv( path + 'uid=%s.csv'%uid) + return datap + + + + +##### +#load data by databroker + +def get_flatfield( uid, reverse=False ): + import h5py + detector = get_detector( db[uid ] ) + sud = get_sid_filenames(db[uid]) + master_path = '%s_master.h5'%(sud[2][0]) + print( master_path) + f= h5py.File(master_path, 'r') + k= 'entry/instrument/detector/detectorSpecific/' #data_collection_date' + d= np.array( f[ k]['flatfield'] ) + f.close() + if reverse: + d = reverse_updown( d ) + + return d + + + +def get_detector( header ): + '''Get the first detector image string by giving header ''' + keys = get_detectors(header) + for k in keys: + if 'eiger' in k: + return k + +def get_detectors( header ): + '''Get all the detector image strings by giving header ''' + if "primary" in header.v2: + descriptor = header.v2["primary"].descriptors[0] + keys = [k for k, v in descriptor['data_keys'].items() if 'external' in v] + return sorted(set(keys)) + return [] + +def get_full_data_path( uid ): + '''A dirty way to get full data path''' + header = db[uid] + d = header.db + s = list(d.get_documents( db[uid ])) + #print(s[2]) + p = s[2][1]['resource_path'] + p2 = s[3][1]['datum_kwargs']['seq_id'] + #print(p,p2) + return p + '_' + str(p2) + '_master.h5' + +def get_sid_filenames(hdr,verbose=False): + """ + get scan_id, uid and detector filename from databroker + get_sid_filenames(hdr,verbose=False) + hdr = db[uid] + returns (scan_id, uid, filepath) + LW 04/30/2024 + """ + import glob + from time import strftime, localtime + start_doc = hdr.start + stop_doc = hdr.stop + success = False + + ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{start_doc['data path']}*_{start_doc['sequence id']}_master.h5")) # looking for (eiger) datafile at the path specified in metadata + if len(ret[2])==0: + if verbose: print('could not find detector filename from "data_path" in metadata: %s'%start_doc['data path']) + else: + if verbose: + print('Found detector filename from "data_path" in metadata!') + success=True + + if not success: # looking at path in metadata, but taking the date from the run start document + data_path=start_doc['data path'][:-11]+strftime("%Y/%m/%d/",localtime(start_doc['time'])) + ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5")) + if len(ret[2])==0: + if verbose: print('could not find detector filename in %s'%data_path) + else: + if verbose: + print('Found detector filename in %s'%data_path) + success=True + + if not success: # looking at path in metadata, but taking the date from the run stop document (in case the date rolled over between creating the start doc and staging the detector) + data_path=start_doc['data path'][:-11]+strftime("%Y/%m/%d/",localtime(stop_doc['time'])) + ret = (start_doc["scan_id"], start_doc["uid"], glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5")) + if len(ret[2])==0: + if verbose: print('Sorry, could not find detector filename....') + else: + if verbose: + print('Found detector filename in %s'%data_path) + success=True + return ret + + +# def get_sid_filenames(header): +# """YG. Dev Jan, 2016 +# Get a bluesky scan_id, unique_id, filename by giveing uid + +# Parameters +# ---------- +# header: a header of a bluesky scan, e.g. db[-1] + +# Returns +# ------- +# scan_id: integer +# unique_id: string, a full string of a uid +# filename: sring + +# Usuage: +# sid,uid, filenames = get_sid_filenames(db[uid]) + +# """ +# from collections import defaultdict +# from glob import glob +# from pathlib import Path + +# filepaths = [] +# resources = {} # uid: document +# datums = defaultdict(list) # uid: List(document) +# for name, doc in header.documents(): +# if name == "resource": +# resources[doc["uid"]] = doc +# elif name == "datum": +# datums[doc["resource"]].append(doc) +# elif name == "datum_page": +# for datum in event_model.unpack_datum_page(doc): +# datums[datum["resource"]].append(datum) +# for resource_uid, resource in resources.items(): +# file_prefix = Path(resource.get('root', '/'), resource["resource_path"]) +# if 'eiger' not in resource['spec'].lower(): +# continue +# for datum in datums[resource_uid]: +# dm_kw = datum["datum_kwargs"] +# seq_id = dm_kw['seq_id'] +# new_filepaths = glob(f'{file_prefix!s}_{seq_id}*') +# filepaths.extend(new_filepaths) +# return header.start['scan_id'], header.start['uid'], filepaths + +def load_dask_data(uid,detector,mask_path_full,reverse=False,rot90=False): + """ + load data as dask-array + get image md (direct beam, wavelength, sample-detector distance,...) from databroker documents (no need to read an actual image) + get pixel_mask and binary_mask from static location (getting it from image metadata takes forever in some conda envs...) + load_dask_data(uid,detector,reverse=False,rot90=False) + uid: uid (str) + detector: md['detector'] + mask_path_full: current standard would be _mask_path_+'pixel_masks/' + returns detector_images(dask-array), image_md + LW 04/26/2024 + """ + import dask + hdr=db[uid] + det=detector.split('_image')[0] + # collect image metadata from loading single image + img_md_dict={'detector_distance':'det_distance','incident_wavelength':'wavelength','frame_time':'cam_acquire_period','count_time':'cam_acquire_time','num_images':'cam_num_images','beam_center_x':'beam_center_x','beam_center_y':'beam_center_y'} + img_md={} + for k in list(img_md_dict.keys()): + img_md[k]=hdr.config_data(det)['primary'][0]['%s_%s'%(det,img_md_dict[k])] + if md['detector'] in ['eiger4m_single_image','eiger1m_single_image','eiger500K_single_image']: + img_md.update({'y_pixel_size': 7.5e-05, 'x_pixel_size': 7.5e-05}) + got_pixel_mask=True + else: + img_md.update({'y_pixel_size': None, 'x_pixel_size': None}) + got_pixel_mask=False + # load pixel mask from static location + if got_pixel_mask: + json_open=open(_mask_path_+'pixel_masks/pixel_mask_compression_%s.json'%detector.split('_')[0]) + mask_dict=json.load(json_open) + img_md['pixel_mask']=np.array(mask_dict['pixel_mask']) + img_md['binary_mask']=np.array(mask_dict['binary_mask']) + del mask_dict + + # load image data as dask-arry: + dimg=hdr.xarray_dask()[md['detector']][0] + if reverse: + dimg=dask.array.flip(dimg,axis=(1,1)) + if rot90: + dimg=dask.array.rot90(dimg,axes=(1,2)) + return dimg,img_md + +def load_data(uid, detector='eiger4m_single_image', fill=True, reverse=False, rot90=False): + """load bluesky scan data by giveing uid and detector + + Parameters + ---------- + uid: unique ID of a bluesky scan + detector: the used area detector + fill: True to fill data + reverse: if True, reverse the image upside down to match the "real" image geometry (should always be True in the future) + + Returns + ------- + image data: a pims frames series + if not success read the uid, will return image data as 0 + + Usuage: + imgs = load_data( uid, detector ) + md = imgs.md + """ + hdr = db[uid] + + if False: + ATTEMPTS = 0 + for attempt in range(ATTEMPTS): + try: + ev, = hdr.events(fields=[detector], fill=fill) + break + + except Exception: + print ('Trying again ...!') + if attempt == ATTEMPTS - 1: + # We're out of attempts. Raise the exception to help with debugging. + raise + else: + # We didn't succeed + raise Exception("Failed after {} repeated attempts".format(ATTEMPTS)) + + # TODO(mrakitin): replace with the lazy loader (when it's implemented): + imgs = list(hdr.data(detector)) + + if len(imgs[0])>=1: + md = imgs[0].md + imgs = pims.pipeline(lambda img: img)(imgs[0]) + imgs.md = md + + if reverse: + md = imgs.md + imgs = reverse_updown( imgs ) # Why not np.flipud? + imgs.md = md + + if rot90: + md = imgs.md + imgs = rot90_clockwise( imgs ) # Why not np.flipud? + imgs.md = md + + return imgs + + +def mask_badpixels( mask, detector ): + ''' + Mask known bad pixel from the giveing mask + + ''' + if detector =='eiger1m_single_image': + #to be determined + mask = mask + elif detector =='eiger4m_single_image' or detector == 'image': + mask[513:552,:] =0 + mask[1064:1103,:] =0 + mask[1615:1654,:] =0 + mask[:,1029:1041] = 0 + mask[:, 0] =0 + mask[0:, 2069] =0 + mask[0] =0 + mask[2166] =0 + + elif detector =='eiger500K_single_image': + #to be determined + mask = mask + else: + mask = mask + return mask + + + + + +def load_data2( uid , detector = 'eiger4m_single_image' ): + """load bluesky scan data by giveing uid and detector + + Parameters + ---------- + uid: unique ID of a bluesky scan + detector: the used area detector + + Returns + ------- + image data: a pims frames series + if not success read the uid, will return image data as 0 + + Usuage: + imgs = load_data( uid, detector ) + md = imgs.md + """ + hdr = db[uid] + flag =1 + while flag<4 and flag !=0: + try: + ev, = hdr.events(fields=[detector]) + flag =0 + except: + flag += 1 + print ('Trying again ...!') + + if flag: + print ("Can't Load Data!") + uid = '00000' #in case of failling load data + imgs = 0 + else: + imgs = ev['data'][detector] + + #print (imgs) + return imgs + + + +def psave_obj(obj, filename ): + '''save an object with filename by pickle.dump method + This function automatically add '.pkl' as filename extension + Input: + obj: the object to be saved + filename: filename (with full path) to be saved + Return: + None + ''' + with open( filename + '.pkl', 'wb') as f: + pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) + +def pload_obj(filename ): + '''load a pickled filename + This function automatically add '.pkl' to filename extension + Input: + filename: filename (with full path) to be saved + Return: + load the object by pickle.load method + ''' + with open( filename + '.pkl', 'rb') as f: + return pickle.load(f) + + + +def load_mask( path, mask_name, plot_ = False, reverse=False, rot90=False, *argv,**kwargs): + + """load a mask file + the mask is a numpy binary file (.npy) + + Parameters + ---------- + path: the path of the mask file + mask_name: the name of the mask file + plot_: a boolen type + reverse: if True, reverse the image upside down to match the "real" image geometry (should always be True in the future) + Returns + ------- + mask: array + if plot_ =True, will show the mask + + Usuage: + mask = load_mask( path, mask_name, plot_ = True ) + """ + + mask = np.load( path + mask_name ) + mask = np.array(mask, dtype = np.int32) + if reverse: + mask = mask[::-1,:] + if rot90: + mask = np.rot90( mask ) + if plot_: + show_img( mask, *argv,**kwargs) + return mask + + + +def create_hot_pixel_mask(img, threshold, center=None, center_radius=300, outer_radius=0 ): + '''create a hot pixel mask by giving threshold + Input: + img: the image to create hot pixel mask + threshold: the threshold above which will be considered as hot pixels + center: optional, default=None + else, as a two-element list (beam center), i.e., [center_x, center_y] + if center is not None, the hot pixel will not include a disk region + which is defined by center and center_radius ( in unit of pixel) + Output: + a bool types numpy array (mask), 1 is good and 0 is excluded + + ''' + bst_mask = np.ones_like( img , dtype = bool) + if center != None: + from skimage.draw import disk + imy, imx = img.shape + cy,cx = center + rr, cc = disk((cy, cx), center_radius,shape=img.shape ) + bst_mask[rr,cc] =0 + if outer_radius: + bst_mask = np.zeros_like( img , dtype = bool) + rr2, cc2 = disk((cy, cx), outer_radius,shape=img.shape ) + bst_mask[rr2,cc2] =1 + bst_mask[rr,cc] =0 + hmask = np.ones_like( img ) + hmask[np.where( img * bst_mask > threshold)]=0 + return hmask + + + + +def apply_mask( imgs, mask): + '''apply mask to imgs to produce a generator + + Usuages: + imgsa = apply_mask( imgs, mask ) + good_series = apply_mask( imgs[good_start:], mask ) + + ''' + return pims.pipeline(lambda img: np.int_(mask) * img)(imgs) # lazily apply mask + + +def reverse_updown( imgs): + '''reverse imgs upside down to produce a generator + + Usuages: + imgsr = reverse_updown( imgs) + + + ''' + return pims.pipeline(lambda img: img[::-1,:])(imgs) # lazily apply mask + +def rot90_clockwise( imgs): + '''reverse imgs upside down to produce a generator + + Usuages: + imgsr = rot90_clockwise( imgs) + + ''' + return pims.pipeline(lambda img: np.rot90(img) )(imgs) # lazily apply mask + +def RemoveHot( img,threshold= 1E7, plot_=True ): + '''Remove hot pixel from img''' + + mask = np.ones_like( np.array( img ) ) + badp = np.where( np.array(img) >= threshold ) + if len(badp[0])!=0: + mask[badp] = 0 + if plot_: + show_img( mask ) + return mask + + +############ +###plot data + +def show_img( image, ax=None,label_array=None, alpha=0.5, interpolation='nearest', + xlim=None, ylim=None, save=False,image_name=None,path=None, + aspect=None, logs=False,vmin=None,vmax=None,return_fig=False,cmap='viridis', + show_time= False, file_name =None, ylabel=None, xlabel=None, extent=None, + show_colorbar=True, tight=True, show_ticks=True, save_format = 'png', dpi= None, + center=None,origin='lower', lab_fontsize = 16, tick_size = 12, colorbar_fontsize = 8, + use_mat_imshow=False, + *argv,**kwargs ): + """YG. Sep26, 2017 Add label_array/alpha option to show a mask on top of image + + a simple function to show image by using matplotlib.plt imshow + pass *argv,**kwargs to imshow + + Parameters + ---------- + image : array + Image to show + Returns + ------- + None + """ + if ax == None: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots() + else: + fig, ax=ax + + + if center != None: + plot1D(center[1],center[0],ax=ax, c='b', m='o', legend='') + if not logs: + if not use_mat_imshow: + im=imshow(ax, image, origin=origin,cmap=cmap,interpolation=interpolation, vmin=vmin,vmax=vmax, + extent=extent) #vmin=0,vmax=1, + else: + im=ax.imshow( image, origin=origin,cmap=cmap,interpolation=interpolation, vmin=vmin,vmax=vmax, + extent=extent) #vmin=0,vmax=1, + else: + if not use_mat_imshow: + im=imshow(ax, image, origin=origin,cmap=cmap, + interpolation=interpolation, norm=LogNorm(vmin, vmax),extent=extent) + else: + im=ax.imshow(image, origin=origin,cmap=cmap, + interpolation=interpolation, norm=LogNorm(vmin, vmax),extent=extent) + if label_array != None: + im2=show_label_array(ax, label_array, alpha= alpha, cmap=cmap, interpolation=interpolation ) + + ax.set_title( image_name ) + if xlim != None: + ax.set_xlim( xlim ) + if ylim != None: + ax.set_ylim( ylim ) + + if not show_ticks: + ax.set_yticks([]) + ax.set_xticks([]) + else: + + ax.tick_params(axis='both', which='major', labelsize=tick_size ) + ax.tick_params(axis='both', which='minor', labelsize=tick_size ) + #mpl.rcParams['xtick.labelsize'] = tick_size + #mpl.rcParams['ytick.labelsize'] = tick_size + #print(tick_size) + + if ylabel != None: + #ax.set_ylabel(ylabel)#, fontsize = 9) + ax.set_ylabel( ylabel , fontsize = lab_fontsize ) + if xlabel != None: + ax.set_xlabel(xlabel , fontsize = lab_fontsize ) + + if aspect != None: + #aspect = image.shape[1]/float( image.shape[0] ) + ax.set_aspect(aspect) + else: + ax.set_aspect(aspect='auto') + + if show_colorbar: + cbar = fig.colorbar(im, extend='neither', spacing='proportional', + orientation='vertical' ) + cbar.ax.tick_params(labelsize=colorbar_fontsize) + fig.set_tight_layout(tight) + if save: + if show_time: + dt =datetime.now() + CurTime = '_%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + fp = path + '%s'%( file_name ) + CurTime + '.' + save_format + else: + fp = path + '%s'%( image_name ) + '.' + save_format + if dpi == None: + dpi = fig.dpi + plt.savefig( fp, dpi= dpi) + #fig.set_tight_layout(tight) + if return_fig: + return im #fig + + + + +def plot1D( y,x=None, yerr=None, ax=None,return_fig=False, ls='-', figsize=None,legend=None, + legend_size=None, lw=None, markersize=None, tick_size=8, *argv,**kwargs): + """a simple function to plot two-column data by using matplotlib.plot + pass *argv,**kwargs to plot + + Parameters + ---------- + y: column-y + x: column-x, by default x=None, the plot will use index of y as x-axis + the other paramaters are defined same as plt.plot + Returns + ------- + None + """ + if ax == None: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + if figsize != None: + fig, ax = plt.subplots(figsize=figsize) + else: + fig, ax = plt.subplots() + + if legend == None: + legend = ' ' + try: + logx = kwargs['logx'] + except: + logx=False + try: + logy = kwargs['logy'] + except: + logy=False + + try: + logxy = kwargs['logxy'] + except: + logxy= False + + if logx==True and logy==True: + logxy = True + + try: + marker = kwargs['marker'] + except: + try: + marker = kwargs['m'] + except: + marker= next( markers_ ) + try: + color = kwargs['color'] + except: + try: + color = kwargs['c'] + except: + color = next( colors_ ) + + if x == None: + x=range(len(y)) + if yerr == None: + ax.plot(x,y, marker=marker,color=color,ls=ls,label= legend, lw=lw, + markersize=markersize, )#,*argv,**kwargs) + else: + ax.errorbar(x,y,yerr, marker=marker,color=color,ls=ls,label= legend, + lw=lw,markersize=markersize,)#,*argv,**kwargs) + if logx: + ax.set_xscale('log') + if logy: + ax.set_yscale('log') + if logxy: + ax.set_xscale('log') + ax.set_yscale('log') + + + ax.tick_params(axis='both', which='major', labelsize=tick_size ) + ax.tick_params(axis='both', which='minor', labelsize=tick_size ) + + if 'xlim' in kwargs.keys(): + ax.set_xlim( kwargs['xlim'] ) + if 'ylim' in kwargs.keys(): + ax.set_ylim( kwargs['ylim'] ) + if 'xlabel' in kwargs.keys(): + ax.set_xlabel(kwargs['xlabel']) + if 'ylabel' in kwargs.keys(): + ax.set_ylabel(kwargs['ylabel']) + + if 'title' in kwargs.keys(): + title = kwargs['title'] + else: + title = 'plot' + ax.set_title( title ) + #ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)') + if (legend!='') and (legend!=None): + ax.legend(loc = 'best', fontsize=legend_size ) + if 'save' in kwargs.keys(): + if kwargs['save']: + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + #fp = kwargs['path'] + '%s'%( title ) + CurTime + '.png' + fp = kwargs['path'] + '%s'%( title ) + '.png' + plt.savefig( fp, dpi=fig.dpi) + if return_fig: + return fig + + +### + +def check_shutter_open( data_series, min_inten=0, time_edge = [0,10], plot_ = False, *argv,**kwargs): + + '''Check the first frame with shutter open + + Parameters + ---------- + data_series: a image series + min_inten: the total intensity lower than min_inten is defined as shtter close + time_edge: the searching frame number range + + return: + shutter_open_frame: a integer, the first frame number with open shutter + + Usuage: + good_start = check_shutter_open( imgsa, min_inten=5, time_edge = [0,20], plot_ = False ) + + ''' + imgsum = np.array( [np.sum(img ) for img in data_series[time_edge[0]:time_edge[1]:1]] ) + if plot_: + fig, ax = plt.subplots() + ax.plot(imgsum,'bo') + ax.set_title('uid=%s--imgsum'%uid) + ax.set_xlabel( 'Frame' ) + ax.set_ylabel( 'Total_Intensity' ) + #plt.show() + shutter_open_frame = np.where( np.array(imgsum) > min_inten )[0][0] + print ('The first frame with open shutter is : %s'%shutter_open_frame ) + return shutter_open_frame + + + +def get_each_frame_intensity( data_series, sampling = 50, + bad_pixel_threshold=1e10, + plot_ = False, save= False, *argv,**kwargs): + '''Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + ''' + + #print ( argv, kwargs ) + imgsum = np.array( [np.sum(img ) for img in tqdm( data_series[::sampling] , leave = True ) ] ) + if plot_: + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + fig, ax = plt.subplots() + ax.plot(imgsum,'bo') + ax.set_title('uid= %s--imgsum'%uid) + ax.set_xlabel( 'Frame_bin_%s'%sampling ) + ax.set_ylabel( 'Total_Intensity' ) + if save: + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs['path'] + if 'uid' in kwargs: + uid = kwargs['uid'] + else: + uid = 'uid' + #fp = path + "Uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-"%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + #plt.show() + + bad_frame_list = np.where( np.array(imgsum) > bad_pixel_threshold )[0] + if len(bad_frame_list): + print ('Bad frame list are: %s' %bad_frame_list) + else: + print ('No bad frames are involved.') + return imgsum,bad_frame_list + + + + +def create_time_slice( N, slice_num, slice_width, edges=None ): + '''create a ROI time regions ''' + if edges != None: + time_edge = edges + else: + if slice_num==1: + time_edge = [ [0,N] ] + else: + tstep = N // slice_num + te = np.arange( 0, slice_num +1 ) * tstep + tc = np.int_( (te[:-1] + te[1:])/2 )[1:-1] + if slice_width%2: + sw = slice_width//2 +1 + time_edge = [ [0,slice_width], ] + [ [s-sw+1,s+sw] for s in tc ] + [ [N-slice_width,N]] + else: + sw= slice_width//2 + time_edge = [ [0,slice_width], ] + [ [s-sw,s+sw] for s in tc ] + [ [N-slice_width,N]] + + + + return np.array(time_edge) + + +def show_label_array(ax, label_array, cmap=None, aspect=None,interpolation='nearest',**kwargs): + """ + YG. Sep 26, 2017 + Modified show_label_array(ax, label_array, cmap=None, **kwargs) + from https://github.com/Nikea/xray-vision/blob/master/xray_vision/mpl_plotting/roi.py + Display a labeled array nicely + Additional kwargs are passed through to `ax.imshow`. + If `vmin` is in kwargs, it is clipped to minimum of 0.5. + Parameters + ---------- + ax : Axes + The `Axes` object to add the artist too + label_array: ndarray + Expected to be an unsigned integer array. 0 is background, + positive integers label region of interest + cmap : str or colormap, optional + Color map to use, defaults to 'Paired' + Returns + ------- + img : AxesImage + The artist added to the axes + """ + if cmap == None: + cmap = 'viridis' + #print(cmap) + _cmap = copy.copy((mcm.get_cmap(cmap))) + _cmap.set_under('w', 0) + vmin = max(.5, kwargs.pop('vmin', .5)) + im = ax.imshow(label_array, cmap=cmap, + interpolation=interpolation, + vmin=vmin, + **kwargs) + if aspect == None: + ax.set_aspect(aspect='auto') + #ax.set_aspect('equal') + return im + + + +def show_label_array_on_image(ax, image, label_array, cmap=None,norm=None, log_img=True,alpha=0.3, vmin=0.1, vmax=5, + imshow_cmap='gray', **kwargs): #norm=LogNorm(), + """ + This will plot the required ROI's(labeled array) on the image + + Additional kwargs are passed through to `ax.imshow`. + If `vmin` is in kwargs, it is clipped to minimum of 0.5. + Parameters + ---------- + ax : Axes + The `Axes` object to add the artist too + image : array + The image array + label_array : array + Expected to be an unsigned integer array. 0 is background, + positive integers label region of interest + cmap : str or colormap, optional + Color map to use for plotting the label_array, defaults to 'None' + imshow_cmap : str or colormap, optional + Color map to use for plotting the image, defaults to 'gray' + norm : str, optional + Normalize scale data, defaults to 'Lognorm()' + Returns + ------- + im : AxesImage + The artist added to the axes + im_label : AxesImage + The artist added to the axes + """ + ax.set_aspect('equal') + + #print (vmin, vmax ) + if log_img: + im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',norm=LogNorm(vmin, vmax),**kwargs) #norm=norm, + else: + im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',vmin=vmin, vmax=vmax,**kwargs) #norm=norm, + + im_label = mpl_plot.show_label_array(ax, label_array, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha, + **kwargs) # norm=norm, + + + return im, im_label + + + +def show_ROI_on_image( image, ROI, center=None, rwidth=400,alpha=0.3, label_on = True, + save=False, return_fig = False, rect_reqion=None, log_img = True, vmin=0.01, vmax=5, + show_ang_cor = False,cmap = cmap_albula, fig_ax=None, + uid='uid', path='', aspect = 1, show_colorbar=True, show_roi_edge=False, *argv,**kwargs): + + '''show ROI on an image + image: the data frame + ROI: the interested region + center: the plot center + rwidth: the plot range around the center + + ''' + + + if RUN_GUI: + fig = Figure(figsize=(8,8)) + axes = fig.add_subplot(111) + elif fig_ax != None: + fig, axes = fig_ax + else: + fig, axes = plt.subplots( ) #plt.subplots(figsize=(8,8)) + + #print( vmin, vmax) + #norm=LogNorm(vmin, vmax) + + axes.set_title( "%s_ROI_on_Image"%uid ) + if log_img: + if vmin==0: + vmin += 1e-10 + + vmax = max(1, vmax ) + if not show_roi_edge: + #print('here') + im,im_label = show_label_array_on_image(axes, image, ROI, imshow_cmap='viridis', + cmap=cmap,alpha=alpha, log_img=log_img, + vmin=vmin, vmax=vmax, origin="lower") + else: + edg = get_image_edge( ROI ) + image_ = get_image_with_roi( image, ROI, scale_factor = 2) + #fig, axes = plt.subplots( ) + show_img( image_, ax=[fig,axes], vmin=vmin, vmax=vmax, + logs= log_img, image_name= "%s_ROI_on_Image"%uid, + cmap = cmap ) + + + if rect_reqion == None: + if center != None: + x1,x2 = [center[1] - rwidth, center[1] + rwidth] + y1,y2 = [center[0] - rwidth, center[0] + rwidth] + axes.set_xlim( [x1,x2]) + axes.set_ylim( [y1,y2]) + else: + x1,x2,y1,y2= rect_reqion + axes.set_xlim( [x1,x2]) + axes.set_ylim( [y1,y2]) + + if label_on: + num_qzr = len(np.unique( ROI )) -1 + for i in range( 1, num_qzr + 1 ): + ind = np.where( ROI == i)[1] + indz = np.where( ROI == i)[0] + c = '%i'%i + y_val = int( indz.mean() ) + x_val = int( ind.mean() ) + #print (xval, y) + axes.text(x_val, y_val, c, color='b',va='center', ha='center') + if show_ang_cor: + axes.text(-0.0, 0.5, '-/+180' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) + axes.text(1.0, 0.5, '0' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) + axes.text(0.5, -0.0, '-90'+ r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) + axes.text(0.5, 1.0, '90' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) + + axes.set_aspect(aspect) + #fig.colorbar(im_label) + if show_colorbar: + if not show_roi_edge: + fig.colorbar(im) + if save: + fp = path + "%s_ROI_on_Image"%uid + '.png' + plt.savefig( fp, dpi=fig.dpi) + #plt.show() + if return_fig: + return fig, axes, im + + + + +def crop_image( image, crop_mask ): + + ''' Crop the non_zeros pixels of an image to a new image + + + ''' + from skimage.util import crop, pad + pxlst = np.where(crop_mask.ravel())[0] + dims = crop_mask.shape + imgwidthy = dims[1] #dimension in y, but in plot being x + imgwidthx = dims[0] #dimension in x, but in plot being y + #x and y are flipped??? + #matrix notation!!! + pixely = pxlst%imgwidthy + pixelx = pxlst//imgwidthy + + minpixelx = np.min(pixelx) + minpixely = np.min(pixely) + maxpixelx = np.max(pixelx) + maxpixely = np.max(pixely) + crops = crop_mask*image + img_crop = crop( crops, ((minpixelx, imgwidthx - maxpixelx -1 ), + (minpixely, imgwidthy - maxpixely -1 )) ) + return img_crop + + +def get_avg_img( data_series, img_samp_index=None, sampling = 100, plot_ = False , save=False, *argv,**kwargs): + '''Get average imagef from a data_series by every sampling number to save time''' + if img_samp_index == None: + avg_img = np.average(data_series[:: sampling], axis=0) + else: + avg_img = np.zeros_like( data_series[0] ) + n=0 + for i in img_samp_index: + avg_img += data_series[i] + n +=1 + avg_img = np.array( avg_img) / n + + if plot_: + fig, ax = plt.subplots() + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + + im = ax.imshow(avg_img , cmap='viridis',origin='lower', + norm= LogNorm(vmin=0.001, vmax=1e2)) + #ax.set_title("Masked Averaged Image") + ax.set_title('uid= %s--Masked Averaged Image'%uid) + fig.colorbar(im) + + if save: + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs['path'] + if 'uid' in kwargs: + uid = kwargs['uid'] + else: + uid = 'uid' + #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-"%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + #plt.show() + + return avg_img + + + +def check_ROI_intensity( avg_img, ring_mask, ring_number=3 , save=False, plot=True, *argv,**kwargs): + + """plot intensity versus pixel of a ring + Parameters + ---------- + avg_img: 2D-array, the image + ring_mask: 2D-array + ring_number: which ring to plot + + Returns + ------- + + + """ + #print('here') + + uid = 'uid' + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + pixel = roi.roi_pixel_values(avg_img, ring_mask, [ring_number] ) + + if plot: + fig, ax = plt.subplots() + ax.set_title('%s--check-RIO-%s-intensity'%(uid, ring_number) ) + ax.plot( pixel[0][0] ,'bo', ls='-' ) + ax.set_ylabel('Intensity') + ax.set_xlabel('pixel') + if save: + path = kwargs['path'] + fp = path + "%s_Mean_intensity_of_one_ROI"%uid + '.png' + fig.savefig( fp, dpi=fig.dpi) + if save: + path = kwargs['path'] + save_lists( [range( len( pixel[0][0] )), pixel[0][0]], label=['pixel_list', 'roi_intensity'], + filename="%s_Mean_intensity_of_one_ROI"%uid, path= path) + #plt.show() + return pixel[0][0] + +#from tqdm import tqdm + +def cal_g2( image_series, ring_mask, bad_image_process, + bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None ): + '''calculation g2 by using a multi-tau algorithm''' + + noframes = len( image_series) # number of frames, not "no frames" + #num_buf = 8 # number of buffers + + if bad_image_process: + import skbeam.core.mask as mask_image + bad_img_list = np.array( bad_frame_list) - good_start + new_imgs = mask_image.bad_to_nan_gen( image_series, bad_img_list) + + if num_lev == None: + num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) + print ('%s frames will be processed...'%(noframes)) + print( 'Bad Frames involved!') + + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm( new_imgs) ) + print( 'G2 calculation DONE!') + + else: + + if num_lev == None: + num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev)) + print ('%s frames will be processed...'%(noframes)) + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(image_series) ) + print( 'G2 calculation DONE!') + + return g2, lag_steps + + + +def run_time(t0): + '''Calculate running time of a program + Parameters + ---------- + t0: time_string, t0=time.time() + The start time + Returns + ------- + Print the running time + + One usage + --------- + t0=time.time() + .....(the running code) + run_time(t0) + ''' + + elapsed_time = time.time() - t0 + if elapsed_time<60: + print ('Total time: %.3f sec' %(elapsed_time )) + else: + print ('Total time: %.3f min' %(elapsed_time/60.)) + + +def trans_data_to_pd(data, label=None,dtype='array'): + ''' + convert data into pandas.DataFrame + Input: + data: list or np.array + label: the coloum label of the data + dtype: list or array [[NOT WORK or dict (for dict only save the scalar not arrays values)]] + Output: + a pandas.DataFrame + ''' + #lists a [ list1, list2...] all the list have the same length + from numpy import arange,array + import pandas as pd,sys + if dtype == 'list': + data=array(data).T + N,M=data.shape + elif dtype == 'array': + data=array(data) + N,M=data.shape + else: + print("Wrong data type! Now only support 'list' and 'array' tpye") + + + index = arange( N ) + if label == None:label=['data%s'%i for i in range(M)] + #print label + df = pd.DataFrame( data, index=index, columns= label ) + return df + + +def save_lists( data, label=None, filename=None, path=None, return_res = False, verbose=False): + ''' + save_lists( data, label=None, filename=None, path=None) + + save lists to a CSV file with filename in path + Parameters + ---------- + data: list + label: the column name, the length should be equal to the column number of list + filename: the filename to be saved + path: the filepath to be saved + + Example: + save_arrays( [q,iq], label= ['q_A-1', 'Iq'], filename='uid=%s-q-Iq'%uid, path= data_dir ) + ''' + + M,N = len(data[0]),len(data) + d = np.zeros( [N,M] ) + for i in range(N): + d[i] = data[i] + + df = trans_data_to_pd(d.T, label, 'array') + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + if filename == None: + filename = 'data' + filename = os.path.join(path, filename )#+'.csv') + df.to_csv(filename) + if verbose: + print('The data was saved in: %s.'%filename) + if return_res: + return df + +def get_pos_val_overlap( p1, v1, p2,v2, Nl): + '''get the overlap of v1 and v2 + p1: the index of array1 in array with total length as Nl + v1: the corresponding value of p1 + p2: the index of array2 in array with total length as Nl + v2: the corresponding value of p2 + Return: + The values in v1 with the position in overlap of p1 and p2 + The values in v2 with the position in overlap of p1 and p2 + + An example: + Nl =10 + p1= np.array( [1,3,4,6,8] ) + v1 = np.array( [10,20,30,40,50]) + p2= np.array( [ 0,2,3,5,7,8]) + v2=np.array( [10,20,30,40,50,60,70]) + + get_pos_val_overlap( p1, v1, p2,v2, Nl) + + ''' + ind = np.zeros( Nl, dtype=np.int32 ) + ind[p1] = np.arange( len(p1) ) +1 + w2 = np.where( ind[p2] )[0] + w1 = ind[ p2[w2]] -1 + return v1[w1], v2[w2] + + + +def save_arrays( data, label=None, dtype='array', filename=None, path=None, return_res = False,verbose=False): + ''' + July 10, 2016, Y.G.@CHX + save_arrays( data, label=None, dtype='array', filename=None, path=None): + save data to a CSV file with filename in path + Parameters + ---------- + data: arrays + label: the column name, the length should be equal to the column number of data + dtype: array or list + filename: the filename to be saved + path: the filepath to be saved + + Example: + + save_arrays( qiq, label= ['q_A-1', 'Iq'], dtype='array', filename='uid=%s-q-Iq'%uid, path= data_dir ) + + + ''' + df = trans_data_to_pd(data, label,dtype) + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + if filename == None: + filename = 'data' + filename_ = os.path.join(path, filename)# +'.csv') + df.to_csv(filename_) + if verbose: + print( 'The file: %s is saved in %s'%(filename, path) ) + #print( 'The g2 of uid= %s is saved in %s with filename as g2-%s-%s.csv'%(uid, path, uid, CurTime)) + if return_res: + return df + +def cal_particle_g2( radius, viscosity, qr, taus, beta=0.2, T=298): + '''YG Dev Nov 20, 2017@CHX + calculate particle g2 fucntion by giving particle radius, Q , and solution viscosity using a simple + exponetional model + Input: + radius: m + qr, list, in A-1 + visocity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) + T: temperture, in K + e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: + 1.38064852*10**(-123)*298 / ( 6*np.pi * 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10**5 A2/s + taus: time + beta: contrast + + cal_particle_g2( radius=125 *10**(-9), qr=[0.01,0.015], viscosity= 8.9*1e-4) + + ''' + D0 = get_diffusion_coefficient( viscosity, radius, T=T) + g2_q1 = np.zeros(len(qr), dtype = object) + for i, q1 in enumerate(qr): + relaxation_rate = D0 * q1**2 + g2_q1[i] = simple_exponential( taus, beta=beta, relaxation_rate = relaxation_rate, baseline=1) + return g2_q1 + +def get_Reynolds_number( flow_rate, flow_radius, fluid_density, fluid_viscosity ): + '''May 10, 2019, Y.G.@CHX + get Reynolds_number , the ratio of the inertial to viscous forces, V*Dia*density/eta + Reynolds_number << 1000 gives a laminar flow + flow_rate: ul/s + flow_radius: mm + fluid_density: Kg/m^3 ( for water, 1000 Kg/m^3 = 1 g/cm^3 ) + fliud_viscosity: N*s/m^2 ( Kg /(s*m) ) + + return Reynolds_number + ''' + return flow_rate * 1e-6 * flow_radius * 1e-3 *2 * fluid_density/ fluid_viscosity + +def get_Deborah_number( flow_rate, beam_size, q_vector, diffusion_coefficient ): + '''May 10, 2019, Y.G.@CHX + get Deborah_number, the ratio of transit time to diffusion time, (V/beam_size)/ ( D*q^2) + flow_rate: ul/s + beam_size: ul + q_vector: A-1 + diffusion_coefficient: A^2/s + + return Deborah_number + ''' + return (flow_rate /beam_size) / ( diffusion_coefficient * q_vector**2 ) + + + +def get_viscosity( diffusion_coefficient , radius, T=298): + '''May 10, 2019, Y.G.@CHX + get visocity of a Brownian motion particle with radius in fuild with diffusion_coefficient + diffusion_coefficient in unit of A^2/s + radius: m + T: K + k: 1.38064852(79)*10**(−23) J/T, Boltzmann constant + + return visosity: N*s/m^2 (water at 25K = 8.9*10**(-4) ) + ''' + + k= 1.38064852*10**(-23) + return k*T / ( 6*np.pi* diffusion_coefficient * radius) * 10**20 + +def get_diffusion_coefficient( viscosity, radius, T=298): + '''July 10, 2016, Y.G.@CHX + get diffusion_coefficient of a Brownian motion particle with radius in fuild with visocity + viscosity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) + radius: m + T: K + k: 1.38064852(79)×10−23 J/T, Boltzmann constant + + return diffusion_coefficient in unit of A^2/s + e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: + 1.38064852*10**(−23) *298 / ( 6*np.pi* 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10^5 A2/s + + get_diffusion_coefficient( 0.20871, 250 *10**(-9), T=298) + + ''' + + k= 1.38064852*10**(-23) + return k*T / ( 6*np.pi* viscosity * radius) * 10**20 + + +def ring_edges(inner_radius, width, spacing=0, num_rings=None): + """ + Aug 02, 2016, Y.G.@CHX + ring_edges(inner_radius, width, spacing=0, num_rings=None) + + Calculate the inner and outer radius of a set of rings. + + The number of rings, their widths, and any spacing between rings can be + specified. They can be uniform or varied. + + LW 04/02/2024: fixed checking whether width and spacing are iterable + + Parameters + ---------- + inner_radius : float + inner radius of the inner-most ring + + width : float or list of floats + ring thickness + If a float, all rings will have the same thickness. + + spacing : float or list of floats, optional + margin between rings, 0 by default + If a float, all rings will have the same spacing. If a list, + the length of the list must be one less than the number of + rings. + + num_rings : int, optional + number of rings + Required if width and spacing are not lists and number + cannot thereby be inferred. If it is given and can also be + inferred, input is checked for consistency. + + Returns + ------- + edges : array + inner and outer radius for each ring + + Example + ------- + # Make two rings starting at r=1px, each 5px wide + >>> ring_edges(inner_radius=1, width=5, num_rings=2) + [(1, 6), (6, 11)] + # Make three rings of different widths and spacings. + # Since the width and spacings are given individually, the number of + # rings here is simply inferred. + >>> ring_edges(inner_radius=1, width=(5, 4, 3), spacing=(1, 2)) + [(1, 6), (7, 11), (13, 16)] + + """ + # All of this input validation merely checks that width, spacing, and + # num_rings are self-consistent and complete. + try: + iter(width) + width_is_list=True + except: width_is_list=False + try: + iter(spacing) + spacing_is_list=True + except: spacing_is_list=False + + # width_is_list = isinstance(width, collections.Iterable) + # spacing_is_list = isinstance(spacing, collections.Iterable) + if (width_is_list and spacing_is_list): + if len(width) != len(spacing) + 1: + raise ValueError("List of spacings must be one less than list " + "of widths.") + if num_rings == None: + try: + num_rings = len(width) + except TypeError: + try: + num_rings = len(spacing) + 1 + except TypeError: + raise ValueError("Since width and spacing are constant, " + "num_rings cannot be inferred and must be " + "specified.") + else: + if width_is_list: + if num_rings != len(width): + raise ValueError("num_rings does not match width list") + if spacing_is_list: + if num_rings-1 != len(spacing): + raise ValueError("num_rings does not match spacing list") + # Now regularlize the input. + if not width_is_list: + width = np.ones(num_rings) * width + + if spacing == None: + spacing = [] + else: + if not spacing_is_list: + spacing = np.ones(num_rings - 1) * spacing + # The inner radius is the first "spacing." + all_spacings = np.insert(spacing, 0, inner_radius) + steps = np.array([all_spacings, width]).T.ravel() + edges = np.cumsum(steps).reshape(-1, 2) + return edges + + + +def get_non_uniform_edges( centers, width = 4, number_rings=1, spacing=0, ): + ''' + YG CHX Spe 6 + get_non_uniform_edges( centers, width = 4, number_rings=3 ) + + Calculate the inner and outer radius of a set of non uniform distributed + rings by giving ring centers + For each center, there are number_rings with each of width + + LW 04/02/2024: fixed checking whether 'width' is iterable + + Parameters + ---------- + centers : float + the center of the rings + + width : float or list of floats + ring thickness + If a float, all rings will have the same thickness. + + num_rings : int, optional + number of rings + Required if width and spacing are not lists and number + cannot thereby be inferred. If it is given and can also be + inferred, input is checked for consistency. + + Returns + ------- + edges : array + inner and outer radius for each ring + ''' + + if number_rings == None: + number_rings = 1 + edges = np.zeros( [len(centers)*number_rings, 2] ) + + try: + iter(width) + except: + width = np.ones_like( centers ) * width + for i, c in enumerate(centers): + edges[i*number_rings:(i+1)*number_rings,:] = ring_edges( inner_radius = c - width[i]*number_rings/2, + width= width[i], spacing= spacing, num_rings=number_rings) + return edges + + +def trans_tf_to_td(tf, dtype = 'dframe'): + '''July 02, 2015, Y.G.@CHX + Translate epoch time to string + ''' + import pandas as pd + import numpy as np + from datetime import datetime + '''translate time.float to time.date, + td.type dframe: a dataframe + td.type list, a list + ''' + if dtype == 'dframe':ind = tf.index + else:ind = range(len(tf)) + td = np.array([ datetime.fromtimestamp(tf[i]) for i in ind ]) + return td + + + +def trans_td_to_tf(td, dtype = 'dframe'): + '''July 02, 2015, Y.G.@CHX + Translate string to epoch time + + ''' + import time + import numpy as np + '''translate time.date to time.float, + td.type dframe: a dataframe + td.type list, a list + ''' + if dtype == 'dframe':ind = td.index + else:ind = range(len(td)) + #tf = np.array([ time.mktime(td[i].timetuple()) for i in range(len(td)) ]) + tf = np.array([ time.mktime(td[i].timetuple()) for i in ind]) + return tf + + + +def get_averaged_data_from_multi_res( multi_res, keystr='g2', different_length= True, verbose=False, + cal_errorbar=False): + '''Y.G. Dec 22, 2016 + get average data from multi-run analysis result + Parameters: + multi_res: dict, generated by function run_xpcs_xsvs_single + each key is a uid, inside each uid are also dict with key as 'g2','g4' et.al. + keystr: string, get the averaged keystr + different_length: if True, do careful average for different length results + return: + array, averaged results + + ''' + maxM = 0 + mkeys = multi_res.keys() + if not different_length: + n=0 + for i, key in enumerate( list( mkeys) ): + keystri = multi_res[key][keystr] + if i ==0: + keystr_average = keystri + else: + keystr_average += keystri + n +=1 + keystr_average /=n + + else: + length_dict = {} + D= 1 + for i, key in enumerate( list( mkeys) ): + if verbose: + print(i,key) + shapes = multi_res[key][keystr].shape + M=shapes[0] + if i ==0: + if len(shapes)==2: + D=2 + maxN = shapes[1] + elif len(shapes)==3: + D=3 + maxN = shapes[2] #in case of two-time correlation + if (M) not in length_dict: + length_dict[(M) ] =1 + else: + length_dict[(M) ] += 1 + maxM = max( maxM, M ) + #print( length_dict ) + avg_count = {} + sk = np.array( sorted(length_dict) ) + for i, k in enumerate( sk ): + avg_count[k] = np.sum( np.array( [ length_dict[k] for k in sk[i:] ] ) ) + #print(length_dict, avg_count) + if D==2: + #print('here') + keystr_average = np.zeros( [maxM, maxN] ) + elif D==3: + keystr_average = np.zeros( [maxM, maxM, maxN ] ) + else: + keystr_average = np.zeros( [maxM] ) + for i, key in enumerate( list( mkeys) ): + keystri = multi_res[key][keystr] + Mi = keystri.shape[0] + if D!=3: + keystr_average[:Mi] += keystri + else: + keystr_average[:Mi,:Mi,:] += keystri + if D!=3: + keystr_average[:sk[0]] /= avg_count[sk[0]] + else: + keystr_average[:sk[0],:sk[0], : ] /= avg_count[sk[0]] + for i in range( 0, len(sk)-1 ): + if D!=3: + keystr_average[sk[i]:sk[i+1]] /= avg_count[sk[i+1]] + else: + keystr_average[sk[i]:sk[i+1],sk[i]:sk[i+1],:] /= avg_count[sk[i+1]] + + return keystr_average + + +def save_g2_general( g2, taus, qr=None, qz=None, uid='uid', path=None, return_res= False ): + + '''Y.G. Dec 29, 2016 + + save g2 results, + res_pargs should contain + g2: one-time correlation function + taus, lags of g2 + qr: the qr center, same length as g2 + qz: the qz or angle center, same length as g2 + path: + uid: + + ''' + + df = DataFrame( np.hstack( [ (taus).reshape( len(g2),1) , g2] ) ) + t,qs = g2.shape + if qr is None: + qr = range( qs ) + if qz is None: + df.columns = ( ['tau'] + [str(qr_) for qr_ in qr ] ) + else: + df.columns = ( ['tau'] + [ str(qr_) +'_'+ str(qz_) for (qr_,qz_) in zip(qr,qz) ] ) + + #dt =datetime.now() + #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + + #if filename is None: + + filename = uid + #filename = 'uid=%s--g2.csv' % (uid) + #filename += '-uid=%s-%s.csv' % (uid,CurTime) + #filename += '-uid=%s.csv' % (uid) + filename1 = os.path.join(path, filename) + df.to_csv(filename1) + print( 'The correlation function is saved in %s with filename as %s'%( path, filename)) + if return_res: + return df + + +########### +#*for g2 fit and plot + +def stretched_auto_corr_scat_factor(x, beta, relaxation_rate, alpha=1.0, baseline=1): + return beta * np.exp(-2 * (relaxation_rate * x)**alpha ) + baseline + +def simple_exponential(x, beta, relaxation_rate, baseline=1): + '''relation_rate: unit 1/s ''' + return beta * np.exp(-2 * relaxation_rate * x) + baseline + + +def simple_exponential_with_vibration(x, beta, relaxation_rate, freq, amp, baseline=1): + return beta * (1 + amp*np.cos( 2*np.pi*freq* x) )* np.exp(-2 * relaxation_rate * x) + baseline + +def stretched_auto_corr_scat_factor_with_vibration(x, beta, relaxation_rate, alpha, freq, amp, baseline=1): + return beta * (1 + amp*np.cos( 2*np.pi*freq* x) )* np.exp(-2 * (relaxation_rate * x)**alpha ) + baseline + + +def flow_para_function_with_vibration( x, beta, relaxation_rate, flow_velocity, freq, amp, baseline=1): + vibration_part = (1 + amp*np.cos( 2*np.pi*freq* x) ) + Diff_part= np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 + return beta* vibration_part* Diff_part * Flow_part + baseline + +def flow_para_function( x, beta, relaxation_rate, flow_velocity, baseline=1): + '''flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )''' + + Diff_part= np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 + return beta*Diff_part * Flow_part + baseline + + +def flow_para_function_explicitq( x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0 ): + '''Nov 9, 2017 Basically, make q vector to (qr, angle), + ###relaxation_rate is actually a diffusion rate + flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) + Diffusion part: np.exp( -2*D q^2 *tau ) + q_ang: would be np.radians( ang - 90 ) + + ''' + + Diff_part= np.exp(-2 * ( diffusion* qr**2 * x)**alpha ) + if flow_velocity !=0: + if np.cos( q_ang ) >= 1e-8: + Flow_part = np.pi**2/(16*x*flow_velocity*qr* abs(np.cos(q_ang)) ) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity * qr* abs(np.cos(q_ang)) ) ) )**2 + else: + Flow_part = 1 + else: + Flow_part = 1 + return beta*Diff_part * Flow_part + baseline + + + +def get_flow_velocity( average_velocity, shape_factor): + + return average_velocity * (1- shape_factor)/(1+ shape_factor) + +def stretched_flow_para_function( x, beta, relaxation_rate, alpha, flow_velocity, baseline=1): + ''' + flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) + ''' + Diff_part= np.exp(-2 * (relaxation_rate * x)**alpha ) + Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 + return beta*Diff_part * Flow_part + baseline + + +def get_g2_fit_general_two_steps( g2, taus, function='simple_exponential', + second_fit_range=[0,20], + sequential_fit=False, *argv,**kwargs): + ''' + Fit g2 in two steps, + i) Using the "function" to fit whole g2 to get baseline and beta (contrast) + ii) Then using the obtained baseline and beta to fit g2 in a "second_fit_range" by using simple_exponential function + ''' + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, function, sequential_fit, *argv,**kwargs) + guess_values = {} + for k in list (g2_fit_result[0].params.keys()): + guess_values[k] = np.array( [ g2_fit_result[i].params[k].value + for i in range( g2.shape[1] ) ]) + + if 'guess_limits' in kwargs: + guess_limits = kwargs['guess_limits'] + else: + guess_limits = dict( baseline =[1, 1.8], alpha=[0, 2], + beta = [0., 1], relaxation_rate= [0.001, 10000]) + + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, function ='simple_exponential', + sequential_fit= sequential_fit, fit_range=second_fit_range, + fit_variables={'baseline':False, 'beta': False, 'alpha':False,'relaxation_rate':True}, + guess_values= guess_values, guess_limits = guess_limits ) + + return g2_fit_result, taus_fit, g2_fit + + +def get_g2_fit_general( g2, taus, function='simple_exponential', + sequential_fit=False, qval_dict = None, + ang_init = 90, *argv,**kwargs): + ''' + Nov 9, 2017, give qval_dict for using function of flow_para_function_explicitq + qval_dict: a dict with qr and ang (in unit of degrees).") + + + Dec 29,2016, Y.G.@CHX + + Fit one-time correlation function + + The support functions include simple exponential and stretched/compressed exponential + Parameters + ---------- + g2: one-time correlation function for fit, with shape as [taus, qs] + taus: the time delay + sequential_fit: if True, will use the low-q fit result as initial value to fit the higher Qs + function: + supported function include: + 'simple_exponential' (or 'simple'): fit by a simple exponential function, defined as + beta * np.exp(-2 * relaxation_rate * lags) + baseline + 'streched_exponential'(or 'streched'): fit by a streched exponential function, defined as + beta * ( np.exp( -2 * ( relaxation_rate * tau )**alpha ) + baseline + 'stretched_vibration': fit by a streched exponential function with vibration, defined as + beta * (1 + amp*np.cos( 2*np.pi*60* x) )* np.exp(-2 * (relaxation_rate * x)**alpha) + baseline + 'flow_para_function' (or flow): fit by a flow function + + + kwargs: + could contains: + 'fit_variables': a dict, for vary or not, + keys are fitting para, including + beta, relaxation_rate , alpha ,baseline + values: a False or True, False for not vary + 'guess_values': a dict, for initial value of the fitting para, + the defalut values are + dict( beta=.1, alpha=1.0, relaxation_rate =0.005, baseline=1.0) + + 'guess_limits': a dict, for the limits of the fittting para, for example: + dict( beta=[0, 10],, alpha=[0,100] ) + the default is: + dict( baseline =[0.5, 2.5], alpha=[0, inf] ,beta = [0, 1], relaxation_rate= [0.0,1000] ) + Returns + ------- + fit resutls: a instance in limfit + tau_fit + fit_data by the model, it has the q number of g2 + + an example: + fit_g2_func = 'stretched' + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, + function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, + fit_variables={'baseline':True, 'beta':True, 'alpha':True,'relaxation_rate':True}, + guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,}) + + g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_g2_fit_paras.csv', path=data_dir ) + + + ''' + + if 'fit_range' in kwargs.keys(): + fit_range = kwargs['fit_range'] + else: + fit_range=None + + + num_rings = g2.shape[1] + if 'fit_variables' in kwargs: + additional_var = kwargs['fit_variables'] + _vars =[ k for k in list( additional_var.keys()) if additional_var[k] == False] + else: + _vars = [] + if function=='simple_exponential' or function=='simple': + _vars = np.unique ( _vars + ['alpha']) + mod = Model(stretched_auto_corr_scat_factor)#, independent_vars= list( _vars) ) + elif function=='stretched_exponential' or function=='stretched': + mod = Model(stretched_auto_corr_scat_factor)#, independent_vars= _vars) + elif function=='stretched_vibration': + mod = Model(stretched_auto_corr_scat_factor_with_vibration)#, independent_vars= _vars) + elif function=='flow_para_function' or function=='flow_para': + mod = Model(flow_para_function)#, independent_vars= _vars) + elif function=='flow_para_function_explicitq' or function=='flow_para_qang': + mod = Model(flow_para_function_explicitq)#, independent_vars= _vars) + elif function=='flow_para_function_with_vibration' or function=='flow_vibration': + mod = Model( flow_para_function_with_vibration ) + + else: + print ("The %s is not supported.The supported functions include simple_exponential and stretched_exponential"%function) + + mod.set_param_hint( 'baseline', min=0.5, max= 2.5 ) + mod.set_param_hint( 'beta', min=0.0, max=1.0 ) + mod.set_param_hint( 'alpha', min=0.0 ) + mod.set_param_hint( 'relaxation_rate', min=0.0, max= 1000 ) + mod.set_param_hint( 'flow_velocity', min=0) + mod.set_param_hint( 'diffusion', min=0.0, max= 2e8 ) + + if 'guess_limits' in kwargs: + guess_limits = kwargs['guess_limits'] + for k in list( guess_limits.keys() ): + mod.set_param_hint( k, min= guess_limits[k][0], max= guess_limits[k][1] ) + + if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration': + mod.set_param_hint( 'flow_velocity', min=0) + if function=='flow_para_function_explicitq' or function=='flow_para_qang': + mod.set_param_hint( 'flow_velocity', min=0) + mod.set_param_hint( 'diffusion', min=0.0, max= 2e8 ) + if function=='stretched_vibration' or function=='flow_vibration': + mod.set_param_hint( 'freq', min=0) + mod.set_param_hint( 'amp', min=0) + + _guess_val = dict( beta=.1, alpha=1.0, relaxation_rate =0.005, baseline=1.0) + if 'guess_values' in kwargs: + guess_values = kwargs['guess_values'] + _guess_val.update( guess_values ) + + _beta=_guess_val['beta'] + _alpha=_guess_val['alpha'] + _relaxation_rate = _guess_val['relaxation_rate'] + _baseline= _guess_val['baseline'] + if isinstance( _beta, (np.ndarray, list) ): + _beta_=_beta[0] + else: + _beta_=_beta + if isinstance( _baseline, (np.ndarray, list) ): + _baseline_ = _baseline[0] + else: + _baseline_ = _baseline + if isinstance( _relaxation_rate, (np.ndarray, list) ): + _relaxation_rate_= _relaxation_rate[0] + else: + _relaxation_rate_= _relaxation_rate + if isinstance( _alpha, (np.ndarray, list) ): + _alpha_ = _alpha[0] + else: + _alpha_ = _alpha + pars = mod.make_params( beta=_beta_, alpha=_alpha_, + relaxation_rate =_relaxation_rate_, baseline= _baseline_) + + if function=='flow_para_function' or function=='flow_para': + _flow_velocity =_guess_val['flow_velocity'] + if isinstance( _flow_velocity, (np.ndarray, list) ): + _flow_velocity_ = _flow_velocity[0] + else: + _flow_velocity_ = _flow_velocity + pars = mod.make_params( beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_, + relaxation_rate =_relaxation_rate_, baseline= _baseline_) + + if function=='flow_para_function_explicitq' or function=='flow_para_qang': + _flow_velocity =_guess_val['flow_velocity'] + _diffusion =_guess_val['diffusion'] + _guess_val['qr'] = 1 + _guess_val['q_ang'] = 0 + if isinstance( _flow_velocity, (np.ndarray, list) ): + _flow_velocity_ = _flow_velocity[0] + else: + _flow_velocity_ = _flow_velocity + if isinstance( _diffusion, (np.ndarray, list) ): + _diffusion_ = _diffusion[0] + else: + _diffusion_ = _diffusion + pars = mod.make_params( beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_, + diffusion =_diffusion_, baseline= _baseline_, + qr=1, q_ang=0 + ) + + if function=='stretched_vibration': + _freq =_guess_val['freq'] + _amp = _guess_val['amp'] + pars = mod.make_params( beta=_beta, alpha=_alpha, freq=_freq, amp = _amp, + relaxation_rate =_relaxation_rate, baseline= _baseline) + + if function=='flow_vibration': + _flow_velocity =_guess_val['flow_velocity'] + _freq =_guess_val['freq'] + _amp = _guess_val['amp'] + pars = mod.make_params( beta=_beta, freq=_freq, amp = _amp,flow_velocity=_flow_velocity, + relaxation_rate =_relaxation_rate, baseline= _baseline) + for v in _vars: + pars['%s'%v].vary = False + #print( pars ) + fit_res = [] + model_data = [] + for i in range(num_rings): + if fit_range != None: + y_=g2[1:, i][fit_range[0]:fit_range[1]] + lags_=taus[1:][fit_range[0]:fit_range[1]] + else: + y_=g2[1:, i] + lags_=taus[1:] + + mm = ~np.isnan(y_) + y = y_[mm] + lags = lags_[mm] + #print( i, mm.shape, y.shape, y_.shape, lags.shape, lags_.shape ) + #y=y_ + #lags=lags_ + #print( _relaxation_rate ) + for k in list(pars.keys()): + #print(k, _guess_val[k] ) + try: + if isinstance( _guess_val[k], (np.ndarray, list) ): + pars[k].value = _guess_val[k][i] + except: + pass + + if True: + if isinstance( _beta, (np.ndarray, list) ): + #pars['beta'].value = _guess_val['beta'][i] + _beta_ = _guess_val['beta'][i] + if isinstance( _baseline, (np.ndarray, list) ): + #pars['baseline'].value = _guess_val['baseline'][i] + _baseline_ = _guess_val['baseline'][i] + if isinstance( _relaxation_rate, (np.ndarray, list) ): + #pars['relaxation_rate'].value = _guess_val['relaxation_rate'][i] + _relaxation_rate_ = _guess_val['relaxation_rate'][i] + if isinstance( _alpha, (np.ndarray, list) ): + #pars['alpha'].value = _guess_val['alpha'][i] + _alpha_ = _guess_val['alpha'][i] + #for k in list(pars.keys()): + #print(k, _guess_val[k] ) + # pars[k].value = _guess_val[k][i] + if function=='flow_para_function_explicitq' or function=='flow_para_qang': + if qval_dict == None: + print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") + else: + + pars = mod.make_params( + beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_, + diffusion =_diffusion_, baseline= _baseline_, + qr = qval_dict[i][0], q_ang = abs(np.radians( qval_dict[i][1] - ang_init) ) ) + + + pars['qr'].vary = False + pars['q_ang'].vary = False + for v in _vars: + pars['%s'%v].vary = False + + #if i==20: + # print(pars) + #print( pars ) + result1 = mod.fit(y, pars, x =lags ) + #print(qval_dict[i][0], qval_dict[i][1], y) + if sequential_fit: + for k in list(pars.keys()): + #print( pars ) + if k in list(result1.best_values.keys()): + pars[k].value = result1.best_values[k] + fit_res.append( result1) + #model_data.append( result1.best_fit ) + yf=result1.model.eval(params=result1.params, x= lags_ ) + model_data.append( yf ) + return fit_res, lags_, np.array( model_data ).T + + + + +def get_short_long_labels_from_qval_dict(qval_dict, geometry='saxs'): + '''Y.G. 2016, Dec 26 + Get short/long labels from a qval_dict + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + geometry: + 'saxs': a saxs with Qr partition + 'ang_saxs': a saxs with Qr and angular partition + 'gi_saxs': gisaxs with Qz, Qr + ''' + + Nqs = len( qval_dict.keys()) + len_qrz = len( list( qval_dict.values() )[0] ) + #qr_label = sorted( np.array( list( qval_dict.values() ) )[:,0] ) + qr_label = np.array( list( qval_dict.values() ) )[:,0] + if geometry=='gi_saxs' or geometry=='ang_saxs':# or geometry=='gi_waxs': + if len_qrz < 2: + print( "please give qz or qang for the q-label") + else: + #qz_label = sorted( np.array( list( qval_dict.values() ) )[:,1] ) + qz_label = np.array( list( qval_dict.values() ) )[:,1] + else: + qz_label = np.array( [0] ) + + uqz_label = np.unique( qz_label ) + num_qz = len( uqz_label) + + uqr_label = np.unique( qr_label ) + num_qr = len( uqr_label) + + #print( uqr_label, uqz_label ) + if len( uqr_label ) >= len( uqz_label ): + master_plot= 'qz' #one qz for many sub plots of each qr + else: + master_plot= 'qr' + + mastp= master_plot + if geometry == 'ang_saxs': + mastp= 'ang' + num_short = min(num_qz, num_qr) + num_long = max(num_qz, num_qr) + + #print( mastp, num_short, num_long) + if num_qz != num_qr: + short_label = [qz_label,qr_label][ np.argmin( [num_qz, num_qr] ) ] + long_label = [qz_label,qr_label][ np.argmax( [num_qz, num_qr] ) ] + short_ulabel = [uqz_label,uqr_label][ np.argmin( [num_qz, num_qr] ) ] + long_ulabel = [uqz_label,uqr_label][ np.argmax( [num_qz, num_qr] ) ] + else: + short_label = qz_label + long_label = qr_label + short_ulabel = uqz_label + long_ulabel = uqr_label + #print( long_ulabel ) + #print( qz_label,qr_label ) + #print( short_label, long_label ) + + if geometry == 'saxs' or geometry == 'gi_waxs': + ind_long = [ range( num_long ) ] + else: + ind_long = [ np.where( short_label == i)[0] for i in short_ulabel ] + + + if Nqs == 1: + long_ulabel = list( qval_dict.values() )[0] + long_label = list( qval_dict.values() )[0] + return qr_label, qz_label, num_qz, num_qr, num_short,num_long, short_label, long_label,short_ulabel,long_ulabel, ind_long, master_plot, mastp + + +############################################ +##a good func to plot g2 for all types of geogmetries +############################################ + + + + +def plot_g2_general( g2_dict, taus_dict, qval_dict, g2_err_dict = None, + fit_res=None, geometry='saxs',filename='g2', + path=None, function='simple_exponential', g2_labels=None, + fig_ysize= 12, qth_interest = None, + ylabel='g2', return_fig=False, append_name='', outsize=(2000, 2400), + max_plotnum_fig=16, figsize=(10, 12), show_average_ang_saxs=True, + qphi_analysis = False, fontsize_sublabel = 12, + *argv,**kwargs): + ''' + Jan 10, 2018 add g2_err_dict option to plot g2 with error bar + Oct31, 2017 add qth_interest option + + Dec 26,2016, Y.G.@CHX + + Plot one/four-time correlation function (with fit) for different geometry + + The support functions include simple exponential and stretched/compressed exponential + Parameters + ---------- + g2_dict: dict, format as {1: g2_1, 2: g2_2, 3: g2_3...} one-time correlation function, g1,g2, g3,...must have the same shape + taus_dict, dict, format {1: tau_1, 2: tau_2, 3: tau_3...}, tau1,tau2, tau3,...must have the same shape + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + + fit_res: give all the fitting parameters for showing in the plot + qth_interest: if not None: should be a list, and will only plot the qth_interest qs + filename: for the title of plot + append_name: if not None, will save as filename + append_name as filename + path: the path to save data + outsize: for gi/ang_saxs, will combine all the different qz images together with outsize + function: + 'simple_exponential': fit by a simple exponential function, defined as + beta * np.exp(-2 * relaxation_rate * lags) + baseline + 'streched_exponential': fit by a streched exponential function, defined as + beta * (np.exp(-2 * relaxation_rate * lags))**alpha + baseline + geometry: + 'saxs': a saxs with Qr partition + 'ang_saxs': a saxs with Qr and angular partition + 'gi_saxs': gisaxs with Qz, Qr + + one_plot: if True, plot all images in one pannel + kwargs: + + Returns + ------- + None + + ToDoList: plot an average g2 for ang_saxs for each q + + ''' + + if ylabel=='g2': + ylabel='g_2' + if ylabel=='g4': + ylabel='g_4' + + if geometry =='saxs': + if qphi_analysis: + geometry = 'ang_saxs' + if qth_interest != None: + if not isinstance(qth_interest, list): + print('Please give a list for qth_interest') + else: + #g2_dict0, taus_dict0, qval_dict0, fit_res0= g2_dict, taus_dict, qval_dict, fit_res + qth_interest = np.array( qth_interest ) -1 + g2_dict_ = {} + #taus_dict_ = {} + for k in list(g2_dict.keys()): + g2_dict_[k] = g2_dict[k][:,[i for i in qth_interest]] + #for k in list(taus_dict.keys()): + # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] + taus_dict_ = taus_dict + qval_dict_ = {k:qval_dict[k] for k in qth_interest} + if fit_res != None: + fit_res_ = [ fit_res[k] for k in qth_interest ] + else: + fit_res_ = None + else: + g2_dict_, taus_dict_, qval_dict_, fit_res_ = g2_dict, taus_dict, qval_dict, fit_res + + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + fps = [] + + #$print( num_short, num_long ) + + for s_ind in range( num_short ): + ind_long_i = ind_long[ s_ind ] + num_long_i = len( ind_long_i ) + #if show_average_ang_saxs: + # if geometry=='ang_saxs': + # num_long_i += 1 + if RUN_GUI: + fig = Figure(figsize=(10, 12)) + else: + #fig = plt.figure( ) + if num_long_i <=4: + if master_plot != 'qz': + fig = plt.figure(figsize=(8, 6)) + else: + if num_short>1: + fig = plt.figure(figsize=(8, 4)) + else: + fig = plt.figure(figsize=(10, 6)) + #print('Here') + elif num_long_i > max_plotnum_fig: + num_fig = int(np.ceil(num_long_i/max_plotnum_fig)) #num_long_i //16 + fig = [ plt.figure(figsize=figsize) for i in range(num_fig) ] + #print( figsize ) + else: + #print('Here') + if master_plot != 'qz': + fig = plt.figure(figsize=figsize) + else: + fig = plt.figure(figsize=(10, 10)) + + if master_plot == 'qz': + if geometry=='ang_saxs': + title_short = 'Angle= %.2f'%( short_ulabel[s_ind] ) + r'$^\circ$' + elif geometry=='gi_saxs': + title_short = r'$Q_z= $' + '%.4f'%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' + else: + title_short = '' + else: #qr + if geometry=='ang_saxs' or geometry=='gi_saxs': + title_short = r'$Q_r= $' + '%.5f '%( short_ulabel[s_ind] ) + r'$\AA^{-1}$' + else: + title_short='' + #print(geometry) + #filename ='' + til = '%s:--->%s'%(filename, title_short ) + if num_long_i <=4: + plt.title( til,fontsize= 14, y =1.15) + #plt.title( til,fontsize=20, y =1.06) + #print('here') + else: + plt.title( til,fontsize=20, y =1.06) + #print( num_long ) + if num_long!=1: + #print( 'here') + plt.axis('off') + #sy = min(num_long_i,4) + sy = min(num_long_i, int( np.ceil( min(max_plotnum_fig,num_long_i)/4)) ) + #fig.set_size_inches(10, 12) + #fig.set_size_inches(10, fig_ysize ) + else: + sy =1 + #fig.set_size_inches(8,6) + #plt.axis('off') + sx = min(4, int( np.ceil( min(max_plotnum_fig,num_long_i)/float(sy) ) )) + + temp = sy + sy = sx + sx = temp + + #print( num_long_i, sx, sy ) + #print( master_plot ) + #print(ind_long_i, len(ind_long_i) ) + + for i, l_ind in enumerate( ind_long_i ): + if num_long_i <= max_plotnum_fig: + #if s_ind ==2: + # print('Here') + # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) + ax = fig.add_subplot(sx,sy, i + 1 ) + if sx==1: + if sy==1: + plt.axis('on') + else: + #fig_subnum = l_ind//max_plotnum_fig + #ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) + fig_subnum = i//max_plotnum_fig + #print( i, sx,sy, fig_subnum, max_plotnum_fig, i + 1 - fig_subnum*max_plotnum_fig ) + ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) + + + ax.set_ylabel( r"$%s$"%ylabel + '(' + r'$\tau$' + ')' ) + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + if master_plot == 'qz' or master_plot == 'angle': + if geometry!='gi_waxs': + title_long = r'$Q_r= $'+'%.5f '%( long_label[l_ind] ) + r'$\AA^{-1}$' + else: + title_long = r'$Q_r= $'+'%i '%( long_label[l_ind] ) + #print( title_long,long_label,l_ind ) + else: + if geometry=='ang_saxs': + #title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) + r'$^\circ$' + '( %d )'%(l_ind) + title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) #+ r'$^\circ$' + '( %d )'%(l_ind) + elif geometry=='gi_saxs': + title_long = r'$Q_z= $'+ '%.5f '%( long_label[l_ind] ) + r'$\AA^{-1}$' + else: + title_long = '' + #print( master_plot ) + if master_plot != 'qz': + ax.set_title(title_long + ' (%s )'%(1+l_ind), y =1.1, fontsize=12) + else: + ax.set_title(title_long + ' (%s )'%(1+l_ind), y =1.05, fontsize= fontsize_sublabel) + #print( geometry ) + #print( title_long ) + if qth_interest != None:#it might have a bug here, todolist!!! + lab = sorted(list(qval_dict_.keys())) + #print( lab, l_ind) + ax.set_title(title_long + ' (%s )'%( lab[l_ind] +1), y =1.05, fontsize= 12) + for ki, k in enumerate( list(g2_dict_.keys()) ): + if ki==0: + c='b' + if fit_res == None: + m='-o' + else: + m='o' + elif ki==1: + c='r' + if fit_res == None: + m='s' + else: + m='-' + elif ki==2: + c='g' + m='-D' + else: + c = colors[ki+2] + m= '-%s'%markers[ki+2] + try: + dumy = g2_dict_[k].shape + #print( 'here is the shape' ) + islist = False + except: + islist_n = len( g2_dict_[k] ) + islist = True + #print( 'here is the list' ) + if islist: + for nlst in range( islist_n ): + m = '-%s'%markers[ nlst ] + #print(m) + y=g2_dict_[k][nlst][:, l_ind ] + x = taus_dict_[k][nlst] + if ki==0: + ymin,ymax = min(y), max(y[1:]) + if g2_err_dict == None: + if g2_labels == None: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + #print('here ki ={} nlst = {}'.format( ki, nlst )) + if nlst==0: + ax.semilogx(x, y, m, color=c,markersize=6, label=g2_labels[ki]) + else: + ax.semilogx(x, y, m, color=c,markersize=6) + else: + yerr= g2_err_dict[k][nlst][:, l_ind ] + if g2_labels == None: + ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6) + else: + if nlst==0: + ax.errorbar(x, y, yerr=yerr, fmt=m, + color=c,markersize=6, label=g2_labels[ki]) + else: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c,markersize=6) + ax.set_xscale("log", nonposx='clip') + if nlst==0: + if l_ind==0: + ax.legend(loc='best', fontsize = 8, fancybox=True, framealpha=0.5) + + else: + y=g2_dict_[k][:, l_ind ] + x = taus_dict_[k] + if ki==0: + ymin,ymax = min(y), max(y[1:]) + if g2_err_dict == None: + if g2_labels == None: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + ax.semilogx(x, y, m, color=c,markersize=6, label=g2_labels[ki]) + else: + yerr= g2_err_dict[k][:, l_ind ] + #print(x.shape, y.shape, yerr.shape) + #print(yerr) + if g2_labels == None: + ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6) + else: + ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6,label=g2_labels[ki] ) + ax.set_xscale("log", nonposx='clip') + if l_ind==0: + ax.legend(loc='best', fontsize = 8, fancybox=True, framealpha=0.5) + + if fit_res_ != None: + result1 = fit_res_[l_ind] + #print (result1.best_values) + + beta = result1.best_values['beta'] + baseline = result1.best_values['baseline'] + if function=='simple_exponential' or function=='simple': + rate = result1.best_values['relaxation_rate'] + alpha =1.0 + elif function=='stretched_exponential' or function=='stretched': + rate = result1.best_values['relaxation_rate'] + alpha = result1.best_values['alpha'] + elif function=='stretched_vibration': + rate = result1.best_values['relaxation_rate'] + alpha = result1.best_values['alpha'] + freq = result1.best_values['freq'] + elif function=='flow_vibration': + rate = result1.best_values['relaxation_rate'] + freq = result1.best_values['freq'] + if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration': + rate = result1.best_values['relaxation_rate'] + flow = result1.best_values['flow_velocity'] + if function=='flow_para_function_explicitq' or function=='flow_para_qang': + diff = result1.best_values['diffusion'] + qrr = short_ulabel[s_ind] + #print(qrr) + rate = diff * qrr**2 + flow = result1.best_values['flow_velocity'] + if qval_dict_ == None: + print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") + else: + pass + + if rate!=0: + txts = r'$\tau_0$' + r'$ = %.3f$'%(1/rate) + r'$ s$' + else: + txts = r'$\tau_0$' + r'$ = inf$' + r'$ s$' + x=0.25 + y0=0.9 + fontsize = 12 + ax.text(x =x, y= y0, s=txts, fontsize=fontsize, transform=ax.transAxes) + #print(function) + dt=0 + if function!='flow_para_function' and function!='flow_para' and function!='flow_vibration' and function!='flow_para_qang': + txts = r'$\alpha$' + r'$ = %.3f$'%(alpha) + dt +=0.1 + #txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x =x, y= y0-dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r'$baseline$' + r'$ = %.3f$'%( baseline) + dt +=0.1 + ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration' or function=='flow_para_qang': + txts = r'$flow_v$' + r'$ = %.3f$'%( flow) + dt += 0.1 + ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + if function=='stretched_vibration' or function=='flow_vibration': + txts = r'$vibration$' + r'$ = %.1f Hz$'%( freq) + dt += 0.1 + ax.text(x =x, y= y0-dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r'$\beta$' + r'$ = %.3f$'%( beta ) + dt +=0.1 + ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + + if 'ylim' in kwargs: + ax.set_ylim( kwargs['ylim']) + elif 'vlim' in kwargs: + vmin, vmax =kwargs['vlim'] + try: + ax.set_ylim([ymin*vmin, ymax*vmax ]) + except: + pass + else: + pass + if 'xlim' in kwargs: + ax.set_xlim( kwargs['xlim']) + if num_short == 1: + fp = path + filename + else: + fp = path + filename + '_%s_%s'%(mastp, s_ind) + + if append_name != '': + fp = fp + append_name + fps.append( fp + '.png' ) + #if num_long_i <= 16: + if num_long_i <= max_plotnum_fig: + fig.set_tight_layout(True) + #fig.tight_layout() + #print(fig) + try: + plt.savefig( fp + '.png', dpi=fig.dpi) + except: + print('Can not save figure here.') + + else: + fps=[] + for fn, f in enumerate(fig): + f.set_tight_layout(True) + fp = path + filename + '_q_%s_%s'%(fn*16, (fn+1)*16) + if append_name != '': + fp = fp + append_name + fps.append( fp + '.png' ) + f.savefig( fp + '.png', dpi=f.dpi) + #plt.savefig( fp + '.png', dpi=fig.dpi) + #combine each saved images together + + if (num_short !=1) or (num_long_i > 16): + outputfile = path + filename + '.png' + if append_name != '': + outputfile = path + filename + append_name + '__joint.png' + else: + outputfile = path + filename + '__joint.png' + combine_images( fps, outputfile, outsize= outsize ) + if return_fig: + return fig + + + +def power_func(x, D0, power=2): + return D0 * x**power + + +def get_q_rate_fit_general( qval_dict, rate, geometry ='saxs', weights=None, *argv,**kwargs): + ''' + Dec 26,2016, Y.G.@CHX + + Fit q~rate by a power law function and fit curve pass (0,0) + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + Return: + D0 + qrate_fit_res + ''' + + power_variable=False + + if 'fit_range' in kwargs.keys(): + fit_range = kwargs['fit_range'] + else: + fit_range= None + + mod = Model( power_func ) + #mod.set_param_hint( 'power', min=0.5, max= 10 ) + #mod.set_param_hint( 'D0', min=0 ) + pars = mod.make_params( power = 2, D0=1*10^(-5) ) + if power_variable: + pars['power'].vary = True + else: + pars['power'].vary = False + + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + Nqr = num_long + Nqz = num_short + D0= np.zeros( Nqz ) + power= 2 #np.zeros( Nqz ) + qrate_fit_res=[] + #print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[ i ] + y = np.array( rate )[ind_long_i] + x = long_label[ind_long_i] + #print(y,x) + if fit_range != None: + y=y[fit_range[0]:fit_range[1]] + x=x[fit_range[0]:fit_range[1]] + #print (i, y,x) + _result = mod.fit(y, pars, x = x ,weights=weights ) + qrate_fit_res.append( _result ) + D0[i] = _result.best_values['D0'] + #power[i] = _result.best_values['power'] + print ('The fitted diffusion coefficient D0 is: %.3e A^2S-1'%D0[i]) + return D0, qrate_fit_res + + +def plot_q_rate_fit_general( qval_dict, rate, qrate_fit_res, geometry ='saxs', ylim = None, + plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False, + show_fit=True, + *argv,**kwargs): + ''' + Dec 26,2016, Y.G.@CHX + + plot q~rate fitted by a power law function and fit curve pass (0,0) + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + ''' + + if 'uid' in kwargs.keys(): + uid = kwargs['uid'] + else: + uid = 'uid' + if 'path' in kwargs.keys(): + path = kwargs['path'] + else: + path = '' + (qr_label, qz_label, num_qz, num_qr, num_short, + num_long, short_label, long_label,short_ulabel, + long_ulabel,ind_long, master_plot, + mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + power = 2 + fig,ax = plt.subplots() + plt.title(r'$Q^%s$'%(power) + '-Rate-%s_Fit'%(uid),fontsize=20, y =1.06) + Nqz = num_short + if Nqz!=1: + ls = '--' + else: + ls='' + for i in range(Nqz): + ind_long_i = ind_long[ i ] + y = np.array( rate )[ind_long_i] + x = long_label[ind_long_i] + D0 = qrate_fit_res[i].best_values['D0'] + #print(i, x, y, D0 ) + if Nqz!=1: + label=r'$q_z=%.5f$'%short_ulabel[i] + else: + label='' + ax.plot(x**power, y, marker = 'o', ls =ls, label=label) + yfit = qrate_fit_res[i].best_fit + + if show_fit: + if plot_all_range: + ax.plot(x**power, x**power*D0, '-r') + else: + ax.plot( (x**power)[:len(yfit) ], yfit, '-r') + + if show_text: + txts = r'$D0: %.3e$'%D0 + r' $A^2$' + r'$s^{-1}$' + dy=0.1 + ax.text(x =0.15, y=.65 -dy *i, s=txts, fontsize=14, transform=ax.transAxes) + if Nqz!=1:legend = ax.legend(loc='best') + + if plot_index_range != None: + d1,d2 = plot_index_range + d2 = min( len(x)-1, d2 ) + ax.set_xlim( (x**power)[d1], (x**power)[d2] ) + ax.set_ylim( y[d1],y[d2]) + if ylim != None: + ax.set_ylim( ylim ) + + ax.set_ylabel('Relaxation rate 'r'$\gamma$'"($s^{-1}$)") + ax.set_xlabel("$q^%s$"r'($\AA^{-2}$)'%power) + fp = path + '%s_Q_Rate'%(uid) + '_fit.png' + fig.savefig( fp, dpi=fig.dpi) + fig.tight_layout() + if return_fig: + return fig,ax + + +def save_g2_fit_para_tocsv( fit_res, filename, path): + '''Y.G. Dec 29, 2016, + save g2 fitted parameter to csv file + ''' + col = list( fit_res[0].best_values.keys() ) + m,n = len( fit_res ), len( col ) + data = np.zeros( [m,n] ) + for i in range( m ): + data[i] = list( fit_res[i].best_values.values() ) + df = DataFrame( data ) + df.columns = col + filename1 = os.path.join(path, filename) # + '.csv') + df.to_csv(filename1) + print( "The g2 fitting parameters are saved in %s"%filename1) + return df + + + +def R_2(ydata,fit_data): + ''' Calculates R squared for a particular fit - by L.W. + usage R_2(ydata,fit_data) + returns R2 + by L.W. Feb. 2019 + ''' + y_ave=np.average(ydata) + SS_tot=np.sum((np.array(ydata)-y_ave)**2) + #print('SS_tot: %s'%SS_tot) + SS_res=np.sum((np.array(ydata)-np.array(fit_data))**2) + #print('SS_res: %s'%SS_res) + return 1-SS_res/SS_tot + +def is_outlier(points,thresh=3.5,verbose=False): + """MAD test + """ + points.tolist() + if len(points) ==1: + points=points[:,None] + if verbose: + print('input to is_outlier is a single point...') + median = np.median(points)*np.ones(np.shape(points))#, axis=0) + + diff = (points-median)**2 + diff=np.sqrt(diff) + med_abs_deviation= np.median(diff) + modified_z_score = .6745*diff/med_abs_deviation + return modified_z_score > thresh + +def outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False): + """ + outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) + avg_img: average image data (2D) + mask: 2D array, same size as avg_img with pixels that are already masked + roi_mask: 2D array, same size as avg_img, ROI labels 'encoded' as mask values (i.e. all pixels belonging to ROI 5 have the value 5) + outlier_threshold: threshold for MAD test + maximum_outlier_fraction: maximum fraction of pixels in an ROI that can be classifed as outliers. If the detected fraction is higher, no outliers will be masked for that ROI. + verbose: 'True' enables message output + plot: 'True' enables visualization of outliers + returns: mask (dtype=float): 0 for pixels that have been classified as outliers, 1 else + dependency: is_outlier() + + function does outlier detection for each ROI separately based on pixel intensity in avg_img*mask and ROI specified by roi_mask, using the median-absolute-deviation (MAD) method + + by LW 06/21/2023 + """ + hhmask = np.ones(np.shape(roi_mask)) + pc=1 + + for rn in np.arange(1,np.max(roi_mask)+1,1): + rm=np.zeros(np.shape(roi_mask));rm=rm-1;rm[np.where( roi_mask == rn)]=1 + pixel = roi.roi_pixel_values(avg_img*rm, roi_mask, [rn] ) + out_l = is_outlier((avg_img*mask*rm)[rm>-1], thresh=outlier_threshold) + if np.nanmax(out_l)>0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l<1]) + if verbose: print('ROI #%s\naverage ROI intensity: %s'%(rn,ave_roi_int)) + try: + upper_outlier_threshold = np.nanmin((out_l*pixel[0][0])[out_l*pixel[0][0]>ave_roi_int]) + if verbose: print('upper outlier threshold: %s'%upper_outlier_threshold) + except: + upper_outlier_threshold = False + if verbose: print('no upper outlier threshold found') + ind1 = (out_l*pixel[0][0])>0; ind2 = (out_l*pixel[0][0])< ave_roi_int + try: + lower_outlier_threshold = np.nanmax((out_l*pixel[0][0])[ind1*ind2]) + except: + lower_outlier_threshold = False + if verbose: print('no lower outlier threshold found') + else: + if verbose: print('ROI #%s: no outliers detected'%rn) + + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l)/len(pixel[0][0]) + if verbose: print('fraction of pixel values detected as outliers: %s'%np.round(outlier_fraction,2)) + if outlier_fraction > maximum_outlier_fraction: + if verbose: print('fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed'%maximum_outlier_fraction) + upper_outlier_threshold = False; lower_outlier_threshold = False + + if upper_outlier_threshold: + hhmask[avg_img*rm > upper_outlier_threshold] = 0 + if lower_outlier_threshold: + hhmask[avg_img*rm < lower_outlier_threshold] = 0 + + if plot: + if pc == 1: fig,ax = plt.subplots(1,5,figsize=(24,4)) + plt.subplot(1,5,pc);pc+=1; + if pc>5: pc=1 + pixel = roi.roi_pixel_values(avg_img*rm*mask, roi_mask, [rn] ) + plt.plot( pixel[0][0] ,'bo',markersize=1.5 ) + if upper_outlier_threshold or lower_outlier_threshold: + x=np.arange(len(out_l)) + plt.plot([x[0],x[-1]],[ave_roi_int,ave_roi_int],'g--',label='ROI average: %s'%np.round(ave_roi_int,4)) + if upper_outlier_threshold: + ind=(out_l*pixel[0][0])> upper_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[upper_outlier_threshold,upper_outlier_threshold],'r--',label='upper thresh.: %s'%np.round(upper_outlier_threshold,4)) + if lower_outlier_threshold: + ind=(out_l*pixel[0][0])< lower_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[lower_outlier_threshold,lower_outlier_threshold],'r--',label='lower thresh.: %s'%np.round(upper_outlier_threshold,4)) + plt.ylabel('Intensity') ;plt.xlabel('pixel');plt.title('ROI #: %s'%rn);plt.legend(loc='best',fontsize=8) + + if plot: + fig,ax = plt.subplots() + plt.imshow(hhmask) + hot_dark=np.nonzero(hhmask<1) + cmap = plt.cm.get_cmap('viridis') + plt.plot(hot_dark[1],hot_dark[0],'+',color=cmap(0)) + plt.xlabel('pixel');plt.ylabel('pixel');plt.title('masked pixels with outlier threshold: %s'%outlier_threshold) + + return hhmask \ No newline at end of file diff --git a/pyCHX/backups/pyCHX-backup/backups/chx_olog_012925.py b/pyCHX/backups/pyCHX-backup/backups/chx_olog_012925.py new file mode 100644 index 0000000..880c9f4 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/backups/chx_olog_012925.py @@ -0,0 +1,140 @@ +from pyOlog import Attachment, LogEntry, OlogClient, SimpleOlogClient +from pyOlog.OlogDataTypes import Logbook + + +def create_olog_entry(text, logbooks="Data Acquisition"): + """ + Create a log entry to xf11id. + + Parameters + ---------- + text : str + the text string to add to the logbook + logbooks : str, optional + the name of the logbook to update + + Returns + ------- + eid : the entry id returned from the Olog server + """ + olog_client = SimpleOlogClient() + eid = olog_client.log(text, logbooks=logbooks) + return eid + + +def update_olog_uid_with_file(uid, text, filename, append_name=""): + """ + Attach text and file (with filename) to CHX olog with entry defined by uid. + + Parameters + ---------- + uid : str + string of unique id + text : str + string to put into olog book + filename : str + file name + append_name : str + first try to attach olog with the file, if there is already a same file + in attached file, copy the file with different filename (append + append_name), and then attach to olog + """ + atch = [Attachment(open(filename, "rb"))] + + try: + update_olog_uid(uid=uid, text=text, attachments=atch) + except Exception: + from shutil import copyfile + + npname = f"{filename[:-4]}_{append_name}.pdf" + copyfile(filename, npname) + atch = [Attachment(open(npname, "rb"))] + print(f"Append {append_name} to the filename.") + update_olog_uid(uid=uid, text=text, attachments=atch) + + +def update_olog_logid_with_file(logid, text, filename=None, verbose=False): + """ + Attach text and file (with filename) to CHX olog with entry defined by + logid. + + Parameters + ---------- + logid : str + the log entry id + text : str + string to put into olog book + filename : str + file name + """ + if filename is not None: + atch = [Attachment(open(filename, "rb"))] + else: + atch = None + try: + update_olog_id(logid=logid, text=text, attachments=atch, verbose=verbose) + except Exception: + pass + + +def update_olog_id(logid, text, attachments, verbose=True): + """ + Update olog book logid entry with text and attachments files. + + Parameters + ---------- + logid : integer + the log entry id + text : str + the text to update, will add this text to the old text + attachments : ??? + add new attachment files + + Example + ------- + filename1 = ('/XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/' + 'Report_uid=af8f66.pdf') + atch = [Attachment(open(filename1, 'rb'))] + + update_olog_id(logid=29327, text='add_test_atch', attachmenents=atch) + """ + olog_client = SimpleOlogClient() + client = OlogClient() + url = client._url + + old_text = olog_client.find(id=logid)[0]["text"] + upd = LogEntry( + text=f"{old_text}\n{text}", + attachments=attachments, + logbooks=[Logbook(name="Operations", owner=None, active=True)], + ) + client.updateLog(logid, upd) + if verbose: + print(f"The url={url} was successfully updated with {text} and with " f"the attachments") + + +def update_olog_uid(uid, text, attachments): + """ + Update olog book logid entry cotaining uid string with text and attachments + files. + + Parameters + ---------- + uid: str + the uid of a scan or a specficial string (only gives one log entry) + text: str + the text to update, will add this text to the old text + attachments: ??? + add new attachment files + + Example + ------- + filename1 = ('/XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/' + 'Report_uid=af8f66.pdf') + atch = [Attachment(open(filename1, 'rb'))] + update_olog_uid(uid='af8f66', text='Add xpcs pdf report', attachments=atch) + """ + olog_client = SimpleOlogClient() + + logid = olog_client.find(search=f"*{uid}*")[0]["id"] + update_olog_id(logid, text, attachments) diff --git a/pyCHX/backups/pyCHX-backup/backups/chx_outlier_detection_05012024.py b/pyCHX/backups/pyCHX-backup/backups/chx_outlier_detection_05012024.py new file mode 100644 index 0000000..e211742 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/backups/chx_outlier_detection_05012024.py @@ -0,0 +1,98 @@ +def is_outlier(points,thresh=3.5,verbose=False): + """MAD test + """ + points.tolist() + if len(points) ==1: + points=points[:,None] + if verbose: + print('input to is_outlier is a single point...') + median = np.median(points)*np.ones(np.shape(points))#, axis=0) + + diff = (points-median)**2 + diff=np.sqrt(diff) + med_abs_deviation= np.median(diff) + modified_z_score = .6745*diff/med_abs_deviation + return modified_z_score > thresh + +def outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False): + """ + outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) + avg_img: average image data (2D) + mask: 2D array, same size as avg_img with pixels that are already masked + roi_mask: 2D array, same size as avg_img, ROI labels 'encoded' as mask values (i.e. all pixels belonging to ROI 5 have the value 5) + outlier_threshold: threshold for MAD test + maximum_outlier_fraction: maximum fraction of pixels in an ROI that can be classifed as outliers. If the detected fraction is higher, no outliers will be masked for that ROI. + verbose: 'True' enables message output + plot: 'True' enables visualization of outliers + returns: mask (dtype=float): 0 for pixels that have been classified as outliers, 1 else + dependency: is_outlier() + + function does outlier detection for each ROI separately based on pixel intensity in avg_img*mask and ROI specified by roi_mask, using the median-absolute-deviation (MAD) method + + by LW 06/21/2023 + """ + hhmask = np.ones(np.shape(roi_mask)) + pc=1 + + for rn in np.arange(1,np.max(roi_mask)+1,1): + rm=np.zeros(np.shape(roi_mask));rm=rm-1;rm[np.where( roi_mask == rn)]=1 + pixel = roi.roi_pixel_values(avg_img*rm, roi_mask, [rn] ) + out_l = is_outlier((avg_img*mask*rm)[rm>-1], thresh=outlier_threshold) + if np.nanmax(out_l)>0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l<1]) + if verbose: print('ROI #%s\naverage ROI intensity: %s'%(rn,ave_roi_int)) + try: + upper_outlier_threshold = np.nanmin((out_l*pixel[0][0])[out_l*pixel[0][0]>ave_roi_int]) + if verbose: print('upper outlier threshold: %s'%upper_outlier_threshold) + except: + upper_outlier_threshold = False + if verbose: print('no upper outlier threshold found') + ind1 = (out_l*pixel[0][0])>0; ind2 = (out_l*pixel[0][0])< ave_roi_int + try: + lower_outlier_threshold = np.nanmax((out_l*pixel[0][0])[ind1*ind2]) + except: + lower_outlier_threshold = False + if verbose: print('no lower outlier threshold found') + else: + if verbose: print('ROI #%s: no outliers detected'%rn) + + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l)/len(pixel[0][0]) + if verbose: print('fraction of pixel values detected as outliers: %s'%np.round(outlier_fraction,2)) + if outlier_fraction > maximum_outlier_fraction: + if verbose: print('fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed'%maximum_outlier_fraction) + upper_outlier_threshold = False; lower_outlier_threshold = False + + if upper_outlier_threshold: + hhmask[avg_img*rm > upper_outlier_threshold] = 0 + if lower_outlier_threshold: + hhmask[avg_img*rm < lower_outlier_threshold] = 0 + + if plot: + if pc == 1: fig,ax = plt.subplots(1,5,figsize=(24,4)) + plt.subplot(1,5,pc);pc+=1; + if pc>5: pc=1 + pixel = roi.roi_pixel_values(avg_img*rm*mask, roi_mask, [rn] ) + plt.plot( pixel[0][0] ,'bo',markersize=1.5 ) + if upper_outlier_threshold or lower_outlier_threshold: + x=np.arange(len(out_l)) + plt.plot([x[0],x[-1]],[ave_roi_int,ave_roi_int],'g--',label='ROI average: %s'%np.round(ave_roi_int,4)) + if upper_outlier_threshold: + ind=(out_l*pixel[0][0])> upper_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[upper_outlier_threshold,upper_outlier_threshold],'r--',label='upper thresh.: %s'%np.round(upper_outlier_threshold,4)) + if lower_outlier_threshold: + ind=(out_l*pixel[0][0])< lower_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[lower_outlier_threshold,lower_outlier_threshold],'r--',label='lower thresh.: %s'%np.round(upper_outlier_threshold,4)) + plt.ylabel('Intensity') ;plt.xlabel('pixel');plt.title('ROI #: %s'%rn);plt.legend(loc='best',fontsize=8) + + if plot: + fig,ax = plt.subplots() + plt.imshow(hhmask) + hot_dark=np.nonzero(hhmask<1) + cmap = plt.cm.get_cmap('viridis') + plt.plot(hot_dark[1],hot_dark[0],'+',color=cmap(0)) + plt.xlabel('pixel');plt.ylabel('pixel');plt.title('masked pixels with outlier threshold: %s'%outlier_threshold) + + return hhmask diff --git a/pyCHX/backups/pyCHX-backup/backups/chx_packages_local-20240502.py b/pyCHX/backups/pyCHX-backup/backups/chx_packages_local-20240502.py new file mode 100644 index 0000000..828cb12 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/backups/chx_packages_local-20240502.py @@ -0,0 +1,302 @@ +### This enables local import of pyCHX for testing + +import pickle as cpk + +import historydict +from eiger_io.fs_handler import EigerImages +from skimage.draw import line, line_aa, polygon + +# changes to current version of chx_packages.py +# added load_dask_data in generic_functions + + +#from pyCHX.chx_handlers import use_dask, use_pims +from chx_handlers import use_dask, use_pims +# from pyCHX.chx_libs import ( +from chx_libs import ( + EigerHandler, + Javascript, + LogNorm, + Model, + cmap_albula, + cmap_vge, + datetime, + db, + getpass, + h5py, + multi_tau_lags, + np, + os, + pims, + plt, + random, + roi, + time, + tqdm, + utils, + warnings, +) + +use_pims(db) # use pims for importing eiger data, register_handler 'AD_EIGER2' and 'AD_EIGER' + +# from pyCHX.chx_compress import ( +from chx_compress import ( + MultifileBNLCustom, + combine_binary_files, + create_compress_header, + para_compress_eigerdata, + para_segment_compress_eigerdata, + segment_compress_eigerdata, +) +# from pyCHX.chx_compress_analysis import ( +from chx_compress_analysis import ( + Multifile, + cal_each_ring_mean_intensityc, + cal_waterfallc, + compress_eigerdata, + get_avg_imgc, + get_each_frame_intensityc, + get_each_ring_mean_intensityc, + get_time_edge_avg_img, + mean_intensityc, + plot_each_ring_mean_intensityc, + plot_waterfallc, + read_compressed_eigerdata, +) +# from pyCHX.chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq +from chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq +# from pyCHX.chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF +from chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF +# from pyCHX.chx_crosscor import CrossCorrelator2, run_para_ccorr_sym +from chx_crosscor import CrossCorrelator2, run_para_ccorr_sym +#from pyCHX.chx_generic_functions import ( +from chx_generic_functions import ( + R_2, + apply_mask, + average_array_withNan, + check_bad_uids, + check_lost_metadata, + check_ROI_intensity, + check_shutter_open, + combine_images, + copy_data, + create_cross_mask, + create_fullImg_with_box, + create_hot_pixel_mask, + create_multi_rotated_rectangle_mask, + create_polygon_mask, + create_rectangle_mask, + create_ring_mask, + create_seg_ring, + create_time_slice, + create_user_folder, + delete_data, + extract_data_from_file, + filter_roi_mask, + find_bad_pixels, + find_bad_pixels_FD, + find_good_xpcs_uids, + find_index, + find_uids, + fit_one_peak_curve, + get_averaged_data_from_multi_res, + get_avg_img, + get_bad_frame_list, + get_base_all_filenames, + get_cross_point, + get_current_pipeline_filename, + get_current_pipeline_fullpath, + get_curve_turning_points, + get_detector, + get_detectors, + get_each_frame_intensity, + get_echos, + get_eigerImage_per_file, + get_fit_by_two_linear, + get_fra_num_by_dose, + get_g2_fit_general, + get_image_edge, + get_image_with_roi, + get_img_from_iq, + get_last_uids, + get_mass_center_one_roi, + get_max_countc, + get_meta_data, + get_multi_tau_lag_steps, + get_non_uniform_edges, + get_print_uids, + get_q_rate_fit_general, + get_qval_dict, + get_qval_qwid_dict, + get_roi_mask_qval_qwid_by_shift, + get_roi_nr, + get_series_g2_taus, + get_SG_norm, + get_sid_filenames, + get_today_date, + get_touched_qwidth, + get_waxs_beam_center, + lin2log_g2, + linear_fit, + load_dask_data, + load_data, + load_mask, + load_pilatus, + ls_dir, + mask_badpixels, + mask_exclude_badpixel, + move_beamstop, + pad_length, + pload_obj, + plot1D, + plot_fit_two_linear_fit, + plot_g2_general, + plot_q_g2fitpara_general, + plot_q_rate_fit_general, + plot_q_rate_general, + plot_xy_with_fit, + plot_xy_x2, + print_dict, + psave_obj, + read_dict_csv, + refine_roi_mask, + RemoveHot, + reverse_updown, + ring_edges, + run_time, + save_array_to_tiff, + save_arrays, + save_current_pipeline, + save_dict_csv, + save_g2_fit_para_tocsv, + save_g2_general, + save_lists, + save_oavs_tifs, + sgolay2d, + shift_mask, + show_img, + show_ROI_on_image, + shrink_image, + trans_data_to_pd, + update_qval_dict, + update_roi_mask, + validate_uid, +) +# from pyCHX.chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file +from chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file +# from pyCHX.chx_specklecp import ( +from chx_specklecp import ( + get_binned_his_std, + get_contrast, + get_his_std_from_pds, + get_xsvs_fit, + plot_g2_contrast, + plot_xsvs_fit, + save_bin_his_std, + save_KM, + xsvsc, + xsvsp, +) +# from pyCH.chx_xpcs_xsvs_jupyter_V1 import( +from chx_xpcs_xsvs_jupyter_V1 import( + get_t_iqc_uids, + plot_t_iqtMq2, + plot_t_iqc_uids, + plot_entries_from_csvlist, + plot_entries_from_uids, + get_iq_from_uids, + wait_func, + wait_data_acquistion_finish, + get_uids_by_range, + get_uids_in_time_period, + do_compress_on_line, + realtime_xpcs_analysis, + compress_multi_uids, + get_two_time_mulit_uids, + get_series_g2_from_g12, + get_fra_num_by_dose, + get_series_one_time_mulit_uids, + plot_dose_g2, + run_xpcs_xsvs_single, +) +# from pyCHX.Create_Report import ( +from Create_Report import ( + create_multi_pdf_reports_for_uids, + create_one_pdf_reports_for_uids, + create_pdf_report, + export_xpcs_results_to_h5, + extract_xpcs_results_from_h5, + make_pdf_report, +) +#from pyCHX.DataGonio import qphiavg +from DataGonio import qphiavg +# from pyCHX.SAXS import ( +from SAXS import ( + fit_form_factor, + fit_form_factor2, + form_factor_residuals_bg_lmfit, + form_factor_residuals_lmfit, + get_form_factor_fit_lmfit, + poly_sphere_form_factor_intensity, + show_saxs_qmap, +) +#from pyCHX.Two_Time_Correlation_Function import ( +from Two_Time_Correlation_Function import ( + get_aged_g2_from_g12, + get_aged_g2_from_g12q, + get_four_time_from_two_time, + get_one_time_from_two_time, + rotate_g12q_to_rectangle, + show_C12, +) +# from pyCHX.XPCS_GiSAXS import ( +from XPCS_GiSAXS import ( + cal_1d_qr, + convert_gisaxs_pixel_to_q, + fit_qr_qz_rate, + get_1d_qr, + get_each_box_mean_intensity, + get_gisaxs_roi, + get_qedge, + get_qmap_label, + get_qr_tick_label, + get_qzr_map, + get_qzrmap, + get_reflected_angles, + get_t_qrc, + multi_uids_gisaxs_xpcs_analysis, + plot_gisaxs_g4, + plot_gisaxs_two_g2, + plot_qr_1d_with_ROI, + plot_qrt_pds, + plot_qzr_map, + plot_t_qrc, + show_qzr_map, + show_qzr_roi, +) +# from pyCHX.XPCS_SAXS import ( +from XPCS_SAXS import ( + cal_g2, + combine_two_roi_mask, + create_hot_pixel_mask, + get_angular_mask, + get_circular_average, + get_cirucular_average_std, + get_each_ring_mean_intensity, + get_QrQw_From_RoiMask, + get_ring_mask, + get_seg_from_ring_mask, + get_t_iq, + get_t_iqc, + multi_uids_saxs_xpcs_analysis, + plot_circular_average, + plot_qIq_with_ROI, + plot_t_iqc, + recover_img_from_iq, + save_lists, +) +#from pyCHX.chx_outlier_detection import ( +from chx_outlier_detection import ( + is_outlier, + outlier_mask +) \ No newline at end of file diff --git a/pyCHX/backups/pyCHX-backup/backups/chx_xpcs_xsvs_jupyter_V1_05012024.py b/pyCHX/backups/pyCHX-backup/backups/chx_xpcs_xsvs_jupyter_V1_05012024.py new file mode 100644 index 0000000..6b10886 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/backups/chx_xpcs_xsvs_jupyter_V1_05012024.py @@ -0,0 +1,1698 @@ +from pyCHX.chx_packages import * +from pyCHX.chx_libs import markers, colors +#from pyCHX.chx_generic_functions import get_short_long_labels_from_qval_dict +#RUN_GUI = False +#from pyCHX.chx_libs import markers +import pandas as pds +# temporary fix: get_data() uses depreciated np.float and gets imported from pyCHX/chx_correlationc.py -> clobber function with temporary fix: +%run /nsls2/data/chx/legacy/analysis/2022_3/lwiegart/development/chx_analysis_setup.ipynb + +def get_t_iqc_uids( uid_list, setup_pargs, slice_num= 10, slice_width= 1): + '''Get Iq at different time edge (difined by slice_num and slice_width) for a list of uids + Input: + uid_list: list of string (uid) + setup_pargs: dict, for caculation of Iq, the key of this dict should include + 'center': beam center + 'dpix': pixel size + 'lambda_': X-ray wavelength + slice_num: slice number of the time edge + slice_edge: the width of the time_edge + Output: + qs: dict, with uid as key, with value as q values + iqsts:dict, with uid as key, with value as iq values + tstamp:dict, with uid as key, with value as time values + + ''' + iqsts = {} + tstamp = {} + qs = {} + label = [] + for uid in uid_list: + md = get_meta_data( uid ) + luid = md['uid'] + timeperframe = md['cam_acquire_period'] + N = md['cam_num_images'] + filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%luid + good_start = 5 + FD = Multifile(filename, good_start, N ) + Nimg = FD.end - FD.beg + time_edge = create_time_slice( Nimg, slice_num= slice_num, slice_width= slice_width, edges = None ) + time_edge = np.array( time_edge ) + good_start + #print( time_edge ) + tstamp[uid] = time_edge[:,0] * timeperframe + qpt, iqsts[uid], qt = get_t_iqc( FD, time_edge, None, pargs=setup_pargs, nx=1500 ) + qs[uid] = qt + + return qs, iqsts, tstamp + + + + +def plot_t_iqtMq2(qt, iqst, tstamp, ax=None, perf='' ): + '''plot q2~Iq at differnt time''' + if ax is None: + fig, ax = plt.subplots() + q = qt + for i in range(iqst.shape[0]): + yi = iqst[i] * q**2 + time_labeli = perf+'time_%s s'%( round( tstamp[i], 3) ) + plot1D( x = q, y = yi, legend= time_labeli, xlabel='Q (A-1)', ylabel='I(q)*Q^2', title='I(q)*Q^2 ~ time', + m=markers[i], c = colors[i], ax=ax, ylim=[ -0.001, 0.005]) #, xlim=[0.007,0.1] ) + + +def plot_t_iqc_uids( qs, iqsts, tstamps ): + '''plot q2~Iq at differnt time for a uid list + ''' + keys = list(qs.keys()) + fig, ax = plt.subplots() + for uid in keys: + qt = qs[uid] + iqst = iqsts[uid] + tstamp = tstamps[uid] + plot_t_iqtMq2(qt, iqst, tstamp, ax=ax, perf=uid + '_' ) + + +def plot_entries_from_csvlist( csv_list, uid_list, inDir, key = 'g2', qth = 1, legend_size=8, + yshift= 0.01, ymulti=1, xlim=None, ylim=None,uid_length=None, + legend=None, fp_fulluid=True ): + + ''' + YG Feb2, 2018, make yshift be also a list + + YG June 9, 2017@CHX + YG Sep 29, 2017@CHX. + plot enteries for a list csvs + Input: + csv_list: list, a list of uid (string) + inDir: string, imported folder for saved analysis results + key: string, plot entry, surport + 'g2' for one-time, + 'iq' for q~iq + 'mean_int_sets' for mean intensity of each roi as a function of frame + TODOLIST:#also can plot the following + dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', + 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', + 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', + 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) + qth: integer, the intesrest q number + yshift: float, values of shift in y direction + xlim: [x1,x2], for plot x limit + ylim: [y1,y2], for plot y limit + Output: + show the plot + Example: + uid_list = ['5492b9', '54c5e0'] + plot_entries_from_uids( uid_list, inDir, yshift = 0.01, key= 'g2', ylim=[1, 1.2]) + ''' + + uid_dict = {} + fig, ax =plt.subplots() + for uid in uid_list: + if uid_length is not None: + uid_ = uid[:uid_length] + else: + uid_=uid + #print(uid_) + uid_dict[uid_] = get_meta_data( uid )['uid'] + #for i, u in enumerate( list( uid_dict.keys() )): + + for i,fp in enumerate( list(csv_list)): + u = uid_list[i] #print(u) + inDiru = inDir + u + '/' + if fp_fulluid: + inDiru = inDir + uid_dict[u] + '/' + else: + inDiru = inDir + u + '/' + d = pds.read_csv( inDiru + fp ) + #print(d) + + if key == 'g2': + taus = d['tau'][1:] + col = d.columns[qth +1] + #print( qth+1, col ) + y= d[col][1:] + if legend is None: + leg=u + else: + leg='uid=%s-->'%u+legend[i] + if isinstance(yshift,list): + yshift_ = yshift[i] + ii = i + 1 + else: + yshift_ = yshift + ii = i + plot1D( x = taus, y=y + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=True, legend= leg, + xlabel='t (sec)', ylabel='g2', legend_size=legend_size,) + title='Q = %s'%(col) + ax.set_title(title) + elif key=='imgsum': + y = total_res[key] + plot1D( y=d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, + xlabel='Frame', ylabel='imgsum',) + + elif key == 'iq': + x= total_res['q_saxs'] + y= total_res['iq_saxs'] + plot1D( x=x, y= y* ymulti[i] + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx= False, logy=True, + legend= u, xlabel ='Q 'r'($\AA^{-1}$)', ylabel = "I(q)" ) + + else: + d = total_res[key][:,qth] + plot1D( x = np.arange(len(d)), y= d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, + xlabel= 'xx', ylabel=key ) + if key=='mean_int_sets':ax.set_xlabel( 'frame ') + if xlim is not None:ax.set_xlim(xlim) + if ylim is not None:ax.set_ylim(ylim) + return fig,ax + + +def plot_entries_from_uids( uid_list, inDir, key= 'g2', qth = 1, legend_size=8, + yshift= 0.01, ymulti=1, xlim=None, ylim=None,legend=None, uid_length = None, filename_list=None, fp_fulluid=False, fp_append = None ):#,title='' ): + + ''' + YG Feb2, 2018, make yshift be also a list + + YG June 9, 2017@CHX + YG Sep 29, 2017@CHX. + plot enteries for a list uids + Input: + uid_list: list, a list of uid (string) + inDir: string, imported folder for saved analysis results + key: string, plot entry, surport + 'g2' for one-time, + 'iq' for q~iq + 'mean_int_sets' for mean intensity of each roi as a function of frame + TODOLIST:#also can plot the following + dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', + 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', + 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', + 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) + qth: integer, the intesrest q number + yshift: float, values of shift in y direction + xlim: [x1,x2], for plot x limit + ylim: [y1,y2], for plot y limit + Output: + show the plot + Example: + uid_list = ['5492b9', '54c5e0'] + plot_entries_from_uids( uid_list, inDir, yshift = 0.01, key= 'g2', ylim=[1, 1.2]) + ''' + + uid_dict = {} + fig, ax =plt.subplots() + for uid in uid_list: + if uid_length is not None: + uid_ = uid[:uid_length] + else: + uid_=uid + #print(uid_) + uid_dict[uid_] = get_meta_data( uid )['uid'] + #for i, u in enumerate( list( uid_dict.keys() )): + for i,u in enumerate( list(uid_list)): + #print(u) + if isinstance(yshift,list): + yshift_ = yshift[i] + ii = i + 1 + else: + yshift_ = yshift + ii = i + if uid_length is not None: + u = u[:uid_length] + inDiru = inDir + u + '/' + if fp_fulluid: + inDiru = inDir + uid_dict[u] + '/' + else: + inDiru = inDir + u + '/' + if filename_list is None: + if fp_append is not None: + filename = 'uid=%s%s_Res.h5'%(uid_dict[u],fp_append ) + else: + filename = 'uid=%s_Res.h5'%uid_dict[u] + else: + filename = filename_list[i] + total_res = extract_xpcs_results_from_h5( filename = filename, + import_dir = inDiru, exclude_keys = ['g12b'] ) + if key=='g2': + d = total_res[key][1:,qth] + taus = total_res['taus'][1:] + if legend is None: + leg=u + else: + leg='uid=%s-->'%u+legend[i] + plot1D( x = taus, y=d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=True, legend= leg, + xlabel='t (sec)', ylabel='g2', legend_size=legend_size,) + title='Q = %s'%(total_res['qval_dict'][qth]) + ax.set_title(title) + elif key=='imgsum': + d = total_res[key] + plot1D( y=d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, + xlabel='Frame', ylabel='imgsum',) + + elif key == 'iq': + + x= total_res['q_saxs'] + y= total_res['iq_saxs'] + plot1D( x=x, y= y* ymulti[i] + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx= False, logy=True, + legend= u, xlabel ='Q 'r'($\AA^{-1}$)', ylabel = "I(q)" ) + + else: + d = total_res[key][:,qth] + plot1D( x = np.arange(len(d)), y= d + yshift_*ii, c=colors[i], m = markers[i], ax=ax, logx=False, legend= u, + xlabel= 'xx', ylabel=key ) + if key=='mean_int_sets':ax.set_xlabel( 'frame ') + if xlim is not None:ax.set_xlim(xlim) + if ylim is not None:ax.set_ylim(ylim) + return fig,ax + + + + + + + +#################################################################################################### +##For real time analysis## +################################################################################################# + + + + + +def get_iq_from_uids( uids, mask, setup_pargs ): + ''' Y.G. developed July 17, 2017 @CHX + Get q-Iq of a uids dict, each uid could corrrespond one frame or a time seriers + uids: dict, val: meaningful decription, key: a list of uids + mask: bool-type 2D array + setup_pargs: dict, at least should contains, the following paramters for calculation of I(q) + + 'Ldet': 4917.50495, + 'center': [988, 1120], + 'dpix': 0.075000003562308848, + 'exposuretime': 0.99998999, + 'lambda_': 1.2845441, + 'path': '/XF11ID/analysis/2017_2/yuzhang/Results/Yang_Pressure/', + + ''' + Nuid = len( np.concatenate( np.array( list(uids.values()) ) ) ) + label = np.zeros( [ Nuid+1], dtype=object) + img_data = {} #np.zeros( [ Nuid, avg_img.shape[0], avg_img.shape[1]]) + + n = 0 + for k in list(uids.keys()): + for uid in uids[k]: + + uidstr = 'uid=%s'%uid + sud = get_sid_filenames(db[uid]) + #print(sud) + md = get_meta_data( uid ) + imgs = load_data( uid, md['detector'], reverse= True ) + md.update( imgs.md ); + Nimg = len(imgs); + if Nimg !=1: + filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%sud[1] + mask0, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename, + force_compress= False, para_compress= True, bad_pixel_threshold = 1e14, + bins=1, num_sub= 100, num_max_para_process= 500, with_pickle=True ) + else: + avg_img = imgs[0] + show_img( avg_img, vmin=0.00001, vmax= 1e1, logs=True, aspect=1, #save_format='tif', + image_name= uidstr + '_img_avg', save=True, + path=setup_pargs['path'], cmap = cmap_albula ) + + setup_pargs['uid'] = uidstr + + qp_saxs, iq_saxs, q_saxs = get_circular_average( avg_img, mask, + pargs= setup_pargs, save=True ) + if n ==0: + iqs = np.zeros( [ len(q_saxs), Nuid+1]) + iqs[:,0] = q_saxs + label[0] = 'q' + img_data[ k + '_'+ uid ] = avg_img + iqs[:,n+1] = iq_saxs + label[n+1] = k + '_'+ uid + n +=1 + plot_circular_average( qp_saxs, iq_saxs, q_saxs, pargs= setup_pargs, + xlim=[q_saxs.min(), q_saxs.max()*0.9], ylim = [iq_saxs.min(), iq_saxs.max()] ) + if 'filename' in list(setup_pargs.keys()): + filename = setup_pargs['filename'] + else: + filename = 'qIq.csv' + pd = save_arrays( iqs, label=label, dtype='array', filename= filename, + path= setup_pargs['path'], return_res=True) + return pd, img_data + + + +def wait_func( wait_time = 2 ): + print( 'Waiting %s secdons for upcoming data...'%wait_time) + time.sleep( wait_time) + #print( 'Starting to do something here...') + +def wait_data_acquistion_finish( uid, wait_time = 2, max_try_num = 3 ): + '''check the completion of a data uid acquistion + Parameter: + uid: + wait_time: the waiting step in unit of second + check_func: the function to check the completion + max_try_num: the maximum number for waiting + Return: + True: completion + False: not completion (include waiting time exceeds the max_wait_time) + + ''' + FINISH = False + Fake_FINISH = True + w = 0 + sleep_time = 0 + while( not FINISH): + try: + get_meta_data( uid ) + FINISH = True + print( 'The data acquistion finished.') + print( 'Starting to do something here...') + except: + wait_func( wait_time = wait_time ) + w += 1 + print('Try number: %s'%w) + if w> max_try_num: + print( 'There could be something going wrong with data acquistion.') + print( 'Force to terminate after %s tries.'%w) + FINISH = True + Fake_FINISH = False + sleep_time += wait_time + return FINISH * Fake_FINISH #, sleep_time + +def get_uids_by_range( start_uidth=-1, end_uidth = 0 ): + '''Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end uid number, i.e. -10, -1 + Return: + uids: list, uid with 8 character length + fuids: list, uid with full length + + ''' + hdrs = list([ db[n] for n in range(start_uidth, end_uidth)] ) + if len(hdrs)!=0: + print ('Totally %s uids are found.'%(len(hdrs))) + + uids=[] #short uid + fuids=[] #full uid + for hdr in hdrs: + fuid = hdr['start']['uid'] + uids.append( fuid[:8] ) + fuids.append( fuid ) + uids=uids[::-1] + fuids=fuids[::-1] + return np.array(uids), np.array(fuids) + + +def get_uids_in_time_period( start_time, stop_time ): + '''Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end time + Return: + uids: list, uid with 8 character length + fuids: list, uid with full length + + ''' + hdrs = list( db(start_time= start_time, stop_time = stop_time) ) + if len(hdrs)!=0: + print ('Totally %s uids are found.'%(len(hdrs))) + + uids=[] #short uid + fuids=[] #full uid + for hdr in hdrs: + fuid = hdr['start']['uid'] + uids.append( fuid[:8] ) + fuids.append( fuid ) + uids=uids[::-1] + fuids=fuids[::-1] + return np.array(uids), np.array(fuids) + +def do_compress_on_line( start_time, stop_time, mask_dict=None, mask=None, + wait_time = 2, max_try_num = 3 ): + '''Y.G. Mar 10, 2017 + Do on-line compress by giving start time and stop time + Parameters: + mask_dict: a dict, e.g., {mask1: mask_array1, mask2:mask_array2} + wait_time: search interval time + max_try_num: for each found uid, will try max_try_num*wait_time seconds + Return: + running time + ''' + + t0 = time.time() + uids, fuids = get_uids_in_time_period(start_time, stop_time) + print( fuids ) + if len(fuids): + for uid in fuids: + print('*'*50) + print('Do compress for %s now...'%uid) + if db[uid]['start']['plan_name'] == 'count': + finish = wait_data_acquistion_finish( uid, wait_time,max_try_num ) + if finish: + try: + md = get_meta_data( uid ) + compress_multi_uids( [ uid ], mask=mask, mask_dict = mask_dict, + force_compress=False, para_compress= True, bin_frame_number=1 ) + + update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) + except: + print('There are something wrong with this data: %s...'%uid) + print('*'*50) + return time.time() - t0 + + + +def realtime_xpcs_analysis( start_time, stop_time, run_pargs, md_update=None, + wait_time = 2, max_try_num = 3, emulation=False,clear_plot=False ): + '''Y.G. Mar 10, 2017 + Do on-line xpcs by giving start time and stop time + Parameters: + run_pargs: all the run control parameters, including giving roi_mask + md_update: if not None, a dict, will update all the found uid metadata by this md_update + e.g, + md['beam_center_x'] = 1012 + md['beam_center_y']= 1020 + md['det_distance']= 16718.0 + wait_time: search interval time + max_try_num: for each found uid, will try max_try_num*wait_time seconds + emulation: if True, it will only check dataset and not do real analysis + Return: + running time + ''' + + t0 = time.time() + uids, fuids = get_uids_in_time_period(start_time, stop_time) + #print( fuids ) + if len(fuids): + for uid in fuids: + print('*'*50) + #print('Do compress for %s now...'%uid) + print('Starting analysis for %s now...'%uid) + if db[uid]['start']['plan_name'] == 'count' or db[uid]['start']['plan_name'] == 'manual_count': + #if db[uid]['start']['dtype'] =='xpcs': + finish = wait_data_acquistion_finish( uid, wait_time,max_try_num ) + if finish: + try: + md = get_meta_data( uid ) + ##corect some metadata + if md_update is not None: + md.update( md_update ) + #if 'username' in list(md.keys()): + #try: + # md_cor['username'] = md_update['username'] + #except: + # md_cor = None + #uid = uid[:8] + #print(md_cor) + if not emulation: + #suid=uid[:6] + run_xpcs_xsvs_single( uid, run_pargs= run_pargs, md_cor = None, + return_res= False, clear_plot=clear_plot ) + #update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) + except: + print('There are something wrong with this data: %s...'%uid) + else: + print('\nThis is not a XPCS series. We will simiply ignore it.') + print('*'*50) + + #print( 'Sleep 10 sec here!!!') + #time.sleep(10) + + return time.time() - t0 + + + + + + + + + + + + +#################################################################################################### +##compress multi uids, sequential compress for uids, but for each uid, can apply parallel compress## +################################################################################################# +def compress_multi_uids( uids, mask, mask_dict = None, force_compress=False, para_compress= True, bin_frame_number=1, + reverse=True, rot90=False,use_local_disk=True): + ''' Compress time series data for a set of uids + Parameters: + uids: list, a list of uid + mask: bool array, mask array + force_compress: default is False, just load the compresssed data; + if True, will compress it to overwrite the old compressed data + para_compress: apply the parallel compress algorithm + bin_frame_number: + Return: + None, save the compressed data in, by default, /XF11ID/analysis/Compressed_Data with filename as + '/uid_%s.cmp' uid is the full uid string + + e.g., compress_multi_uids( uids, mask, force_compress= False, bin_frame_number=1 ) + + ''' + for uid in uids: + print('UID: %s is in processing...'%uid) + if validate_uid( uid ): + md = get_meta_data( uid ) + imgs = load_data( uid, md['detector'], reverse= reverse, rot90=rot90 ) + sud = get_sid_filenames(db[uid]) + for pa in sud[2]: + if 'master.h5' in pa: + data_fullpath = pa + print( imgs, data_fullpath ) + if mask_dict is not None: + mask = mask_dict[md['detector']] + print('The detecotr is: %s'% md['detector']) + md.update( imgs.md ) + if not use_local_disk: + cmp_path = '/nsls2/xf11id1/analysis/Compressed_Data' + else: + cmp_path = '/tmp_data/compressed' + cmp_path = '/nsls2/xf11id1/analysis/Compressed_Data' + if bin_frame_number==1: + cmp_file = '/uid_%s.cmp'%md['uid'] + else: + cmp_file = '/uid_%s_bined--%s.cmp'%(md['uid'],bin_frame_number) + filename = cmp_path + cmp_file + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename, + force_compress= force_compress, para_compress= para_compress, bad_pixel_threshold = 1e14, + reverse=reverse, rot90=rot90, + bins=bin_frame_number, num_sub= 100, num_max_para_process= 500, with_pickle=True, + direct_load_data =use_local_disk, data_path = data_fullpath, ) + + print('Done!') + + +#################################################################################################### +##get_two_time_mulit_uids, sequential cal for uids, but apply parallel for each uid ## +################################################################################################# + +def get_two_time_mulit_uids( uids, roi_mask, norm= None, bin_frame_number=1, path=None, force_generate=False, + md=None, imgs=None,direct_load_data=False,compress_path=None ): + + ''' Calculate two time correlation by using auto_two_Arrayc func for a set of uids, + if the two-time resutls are already created, by default (force_generate=False), just pass + Parameters: + uids: list, a list of uid + roi_mask: bool array, roi mask array + norm: the normalization array + path: string, where to save the two time + force_generate: default, False, if the two-time resutls are already created, just pass + if True, will force to calculate two-time no matter exist or not + + Return: + None, save the two-time in as path + uid + 'uid=%s_g12b'%uid + + e.g., + get_two_time_mulit_uids( guids, roi_mask, norm= norm,bin_frame_number=1, + path= data_dir,force_generate=False ) + + ''' + + qind, pixelist = roi.extract_label_indices(roi_mask) + for uid in uids: + print('UID: %s is in processing...'%uid) + if not direct_load_data: + md = get_meta_data( uid ) + imgs = load_data( uid, md['detector'], reverse= True ) + else: + pass + N = len(imgs) + #print( N ) + if compress_path is None: + compress_path = '/XF11ID/analysis/Compressed_Data/' + if bin_frame_number==1: + filename = '%s'%compress_path +'uid_%s.cmp'%md['uid'] + else: + filename = '%s'%compress_path +'uid_%s_bined--%s.cmp'%(md['uid'],bin_frame_number) + + FD = Multifile(filename, 0, N//bin_frame_number) + #print( FD.beg, FD.end) + uid_ = md['uid'] + os.makedirs(path + uid_ + '/', exist_ok=True) + filename = path + uid_ + '/' + 'uid=%s_g12b'%uid + doit = True + if not force_generate: + if os.path.exists( filename + '.npy'): + doit=False + print('The two time correlation function for uid=%s is already calculated. Just pass...'%uid) + if doit: + data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm= norm ).get_data() + g12b = auto_two_Arrayc( data_pixel, roi_mask, index = None ) + np.save( filename, g12b) + del g12b + print( 'The two time correlation function for uid={} is saved as {}.'.format(uid, filename )) + + + + + + +def get_series_g2_from_g12( g12b, fra_num_by_dose = None, dose_label = None, + good_start=0, log_taus = True, num_bufs=8, time_step=1 ): + ''' + Get a series of one-time function from two-time by giving noframes + Parameters: + g12b: a two time function + good_start: the start frame number + fra_num_by_dose: a list, correlation number starting from index 0, + if this number is larger than g12b length, will give a warning message, and + will use g12b length to replace this number + by default is None, will = [ g12b.shape[0] ] + dose_label: the label of each dose, also is the keys of returned g2, lag + log_taus: if true, will only return a g2 with the correponding tau values + as calculated by multi-tau defined taus + Return: + + g2_series, a dict, with keys as dose_label (corrected on if warning message is given) + lag_steps, the corresponding lags + + ''' + g2={} + lag_steps = {} + L,L,qs= g12b.shape + if fra_num_by_dose is None: + fra_num_by_dose = [L] + if dose_label is None: + dose_label = fra_num_by_dose + fra_num_by_dose = sorted( fra_num_by_dose ) + dose_label = sorted( dose_label ) + for i, good_end in enumerate(fra_num_by_dose): + key = round(dose_label[i] ,3) + #print( good_end ) + if good_end>L: + warnings.warn("Warning: the dose value is too large, and please check the maxium dose in this data set and give a smaller dose value. We will use the maxium dose of the data.") + good_end = L + if not log_taus: + g2[ key ] = get_one_time_from_two_time(g12b[good_start:good_end,good_start:good_end,:] ) + else: + #print( good_end, num_bufs ) + lag_step = get_multi_tau_lag_steps(good_end, num_bufs) + lag_step = lag_step[ lag_step < good_end - good_start] + #print( len(lag_steps ) ) + lag_steps[key] = lag_step * time_step + g2[key] = get_one_time_from_two_time(g12b[good_start:good_end,good_start:good_end,:] )[lag_step] + + return lag_steps, g2 + + +def get_fra_num_by_dose( exp_dose, exp_time, att=1, dead_time =2 ): + ''' + Calculate the frame number to be correlated by giving a X-ray exposure dose + + Paramters: + exp_dose: a list, the exposed dose, e.g., in unit of exp_time(ms)*N(fram num)*att( attenuation) + exp_time: float, the exposure time for a xpcs time sereies + dead_time: dead time for the fast shutter reponse time, CHX = 2ms + Return: + noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) + e.g., + + no_dose_fra = get_fra_num_by_dose( exp_dose = [ 3.34* 20, 3.34*50, 3.34*100, 3.34*502, 3.34*505 ], + exp_time = 1.34, dead_time = 2) + + --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) + ''' + return np.int_( np.array( exp_dose )/( exp_time + dead_time)/ att ) + + + +def get_series_one_time_mulit_uids( uids, qval_dict, trans = None, good_start=0, path=None, + exposure_dose = None, dead_time = 0, + num_bufs =8, save_g2=True, + md = None, imgs=None, direct_load_data= False ): + ''' Calculate a dose depedent series of one time correlations from two time + Parameters: + uids: list, a list of uid + trans: list, same length as uids, the transmission list + exposure_dose: list, a list x-ray exposure dose; + by default is None, namely, = [ max_frame_number ], + can be [3.34 334, 3340] in unit of ms, in unit of exp_time(ms)*N(fram num)*att( attenuation) + path: string, where to load the two time, if None, ask for it + the real g12 path is two_time_path + uid + '/' + qval_dict: the dictionary for q values + Return: + taus_uids, with keys as uid, and + taus_uids[uid] is also a dict, with keys as dose_frame + g2_uids, with keys as uid, and + g2_uids[uid] is also a dict, with keys as dose_frame + will also save g2 results to the 'path' + ''' + + if path is None: + print( 'Please calculate two time function first by using get_two_time_mulit_uids function.') + else: + taus_uids = {} + g2_uids = {} + for i, uid in enumerate(uids): + print('UID: %s is in processing...'%uid) + if not direct_load_data: + md = get_meta_data( uid ) + imgs = load_data( uid, md['detector'], reverse= True ) + #print(md) + detectors = md['detector'] + if isinstance( detectors,list): + if len(detectors)>1: + if '_image' in md['detector']: + pref = md['detector'][:-5] + else: + pref=md['detector'] + for k in [ 'beam_center_x', 'beam_center_y','cam_acquire_time','cam_acquire_period','cam_num_images', + 'wavelength', 'det_distance', 'photon_energy']: + md[k] = md[ pref + '%s'%k] + + else: + pass + N = len(imgs) + if exposure_dose is None: + exposure_dose = [N] + try: + g2_path = path + uid + '/' + g12b = np.load( g2_path + 'uid=%s_g12b.npy'%uid) + except: + g2_path = path + md['uid'] + '/' + g12b = np.load( g2_path + 'uid=%s_g12b.npy'%uid) + try: + exp_time = float( md['cam_acquire_time']) #*1000 #from second to ms + except: + exp_time = float( md['exposure time']) #* 1000 #from second to ms + if trans is None: + try: + transi = md['transmission'] + except: + transi = [1] + else: + transi = trans[i] + fra_num_by_dose = get_fra_num_by_dose( exp_dose = exposure_dose, + exp_time =exp_time, dead_time = dead_time, att = transi ) + + print( 'uid: %s--> fra_num_by_dose: %s'%(uid, fra_num_by_dose ) ) + + taus_uid, g2_uid = get_series_g2_from_g12( g12b, fra_num_by_dose=fra_num_by_dose, + dose_label = exposure_dose, + good_start=good_start, num_bufs=num_bufs, + time_step = exp_time)#md['cam_acquire_period'] ) + g2_uids['uid_%03d=%s'%(i,uid)] = g2_uid + taus_uids['uid_%03d=%s'%(i,uid)] = taus_uid + if save_g2: + for k in list( g2_uid.keys()): + #print(k) + uid_ = uid + '_fra_%s_%s'%(good_start, k ) + save_g2_general( g2_uid[k], taus=taus_uid[k],qr=np.array( list( qval_dict.values() ) )[:,0], + uid=uid_+'_g2.csv', path= g2_path, return_res=False ) + return taus_uids, g2_uids + + + + +def plot_dose_g2( taus_uids, g2_uids, qval_dict, qth_interest = None, ylim=[0.95, 1.05], vshift=0.1, + fit_res= None, geometry= 'saxs',filename= 'dose'+'_g2', legend_size=None, + path= None, function= None, g2_labels=None, ylabel= 'g2_dose', append_name= '_dose', + return_fig=False): + '''Plot a does-dependent g2 + taus_uids, dict, with format as {uid1: { dose1: tau_1, dose2: tau_2...}, uid2: ...} + g2_uids, dict, with format as {uid1: { dose1: g2_1, dose2: g2_2...}, uid2: ...} + qval_dict: a dict of qvals + vshift: float, vertical shift value of different dose of g2 + + ''' + + uids = sorted( list( taus_uids.keys() ) ) + #print( uids ) + dose = sorted( list( taus_uids[ uids[0] ].keys() ) ) + if qth_interest is None: + g2_dict= {} + taus_dict = {} + if g2_labels is None: + g2_labels = [] + for i in range( len( dose )): + g2_dict[i + 1] = [] + taus_dict[i +1 ] = [] + #print ( i ) + for j in range( len( uids )): + #print( uids[i] , dose[j]) + g2_dict[i +1 ].append( g2_uids[ uids[j] ][ dose[i] ] + vshift*i ) + taus_dict[i +1 ].append( taus_uids[ uids[j] ][ dose[i] ] ) + if j ==0: + g2_labels.append( 'Dose_%s'%dose[i] ) + + plot_g2_general( g2_dict, taus_dict, + ylim=[ylim[0], ylim[1] + vshift * len(dose)], + qval_dict = qval_dict, fit_res= None, geometry= geometry,filename= filename, + path= path, function= function, ylabel= ylabel, g2_labels=g2_labels, append_name= append_name ) + + else: + fig,ax= plt.subplots() + q = qval_dict[qth_interest-1][0] + j = 0 + for uid in uids: + #uid = uids[0] + #print( uid ) + dose_list = sorted( list(taus_uids['%s'%uid].keys()) ) + #print( dose_list ) + for i, dose in enumerate(dose_list): + dose = float(dose) + if j ==0: + legend= 'dose_%s'%round(dose,2) + else: + legend = '' + + #print( markers[i], colors[i] ) + + plot1D(x= taus_uids['%s'%uid][dose_list[i]], + y =g2_uids['%s'%uid][dose_list[i]][:,qth_interest] + i*vshift, + logx=True, ax=ax, legend= legend, m = markers[i], c= colors[i], + lw=3, title='%s_Q=%s'%(uid, q) + r'$\AA^{-1}$', legend_size=legend_size ) + ylabel='g2--Dose (trans*exptime_sec)' + j +=1 + + ax.set_ylabel( r"$%s$"%ylabel + '(' + r'$\tau$' + ')' ) + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + ax.set_ylim ( ylim ) + if return_fig: + return fig, ax + #return taus_dict, g2_dict + + + + +def run_xpcs_xsvs_single( uid, run_pargs, md_cor=None, return_res=False,reverse=True, clear_plot=False ): + '''Y.G. Dec 22, 2016 + Run XPCS XSVS analysis for a single uid + Parameters: + uid: unique id + run_pargs: dict, control run type and setup parameters, such as q range et.al. + reverse:,True, revserse the image upside down + Return: + save analysis result to csv/png/h5 files + return_res: if true, return a dict, containing g2,g4,g12,contrast et.al. depending on the run type + An example for the run_pargs: + + run_pargs= dict( + scat_geometry = 'gi_saxs' #suport 'saxs', 'gi_saxs', 'ang_saxs' (for anisotropics saxs or flow-xpcs) + force_compress = True,#False, + para_compress = True, + run_fit_form = False, + run_waterfall = True,#False, + run_t_ROI_Inten = True, + #run_fit_g2 = True, + fit_g2_func = 'stretched', + run_one_time = True,#False, + run_two_time = True,#False, + run_four_time = False, + run_xsvs=True, + att_pdf_report = True, + show_plot = False, + + CYCLE = '2016_3', + mask_path = '/XF11ID/analysis/2016_3/masks/', + mask_name = 'Nov28_4M_SAXS_mask.npy', + good_start = 5, + + uniformq = True, + inner_radius= 0.005, #0.005 for 50 nm, 0.006, #for 10nm/coralpor + outer_radius = 0.04, #0.04 for 50 nm, 0.05, #for 10nm/coralpor + num_rings = 12, + gap_ring_number = 6, + number_rings= 1, + #qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 + #width = 0.0002 + qth_interest = 1, #the intested single qth + use_sqnorm = False, + use_imgsum_norm = True, + + pdf_version = '_1' #for pdf report name + ) + + md_cor: if not None, will update the metadata with md_cor + + ''' + + scat_geometry = run_pargs['scat_geometry'] + force_compress = run_pargs['force_compress'] + para_compress = run_pargs['para_compress'] + run_fit_form = run_pargs['run_fit_form'] + run_waterfall = run_pargs['run_waterfall'] + run_t_ROI_Inten = run_pargs['run_t_ROI_Inten'] + + #run_fit_g2 = run_pargs['run_fit_g2'], + fit_g2_func = run_pargs['fit_g2_func'] + run_one_time = run_pargs['run_one_time'] + run_two_time = run_pargs['run_two_time'] + run_four_time = run_pargs['run_four_time'] + run_xsvs=run_pargs['run_xsvs'] + try: + run_dose = run_pargs['run_dose'] + except: + run_dose= False + ############################################################### + if scat_geometry =='gi_saxs': #to be done for other types + run_xsvs = False; + ############################################################### + + ############################################################### + if scat_geometry == 'ang_saxs': + run_xsvs= False;run_waterfall=False;run_two_time=False;run_four_time=False;run_t_ROI_Inten=False; + ############################################################### + if 'bin_frame' in list( run_pargs.keys() ): + bin_frame = run_pargs['bin_frame'] + bin_frame_number= run_pargs['bin_frame_number'] + else: + bin_frame = False + if not bin_frame: + bin_frame_number = 1 + + att_pdf_report = run_pargs['att_pdf_report'] + show_plot = run_pargs['show_plot'] + CYCLE = run_pargs['CYCLE'] + mask_path = run_pargs['mask_path'] + mask_name = run_pargs['mask_name'] + good_start = run_pargs['good_start'] + use_imgsum_norm = run_pargs['use_imgsum_norm'] + try: + use_sqnorm = run_pargs['use_sqnorm'] + except: + use_sqnorm = False + try: + inc_x0 = run_pargs['inc_x0'] + inc_y0 = run_pargs['inc_y0'] + except: + inc_x0 = None + inc_y0= None + + #for different scattering geogmetry, we only need to change roi_mask + #and qval_dict + qval_dict = run_pargs['qval_dict'] + if scat_geometry != 'ang_saxs': + roi_mask = run_pargs['roi_mask'] + qind, pixelist = roi.extract_label_indices( roi_mask ) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + + else: + roi_mask_p = run_pargs['roi_mask_p'] + qval_dict_p = run_pargs['qval_dict_p'] + roi_mask_v = run_pargs['roi_mask_v'] + qval_dict_v = run_pargs['qval_dict_v'] + + if scat_geometry == 'gi_saxs': + refl_x0 = run_pargs['refl_x0'] + refl_y0 = run_pargs['refl_y0'] + Qr, Qz, qr_map, qz_map = run_pargs['Qr'], run_pargs['Qz'], run_pargs['qr_map'], run_pargs['qz_map'] + + + taus=None;g2=None;tausb=None;g2b=None;g12b=None;taus4=None;g4=None;times_xsv=None;contrast_factorL=None; + qth_interest = run_pargs['qth_interest'] + pdf_version = run_pargs['pdf_version'] + + + try: + username = run_pargs['username'] + except: + username = getpass.getuser() + + data_dir0 = os.path.join('/XF11ID/analysis/', CYCLE, username, 'Results/') + os.makedirs(data_dir0, exist_ok=True) + print('Results from this analysis will be stashed in the directory %s' % data_dir0) + #uid = (sys.argv)[1] + print ('*'*40) + print ( '*'*5 + 'The processing uid is: %s'%uid + '*'*5) + print ('*'*40) + suid = uid #[:6] + data_dir = os.path.join(data_dir0, '%s/'%suid) + os.makedirs(data_dir, exist_ok=True) + print('Results from this analysis will be stashed in the directory %s' % data_dir) + md = get_meta_data( uid ) + uidstr = 'uid=%s'%uid[:6] + imgs = load_data( uid, md['detector'], reverse= reverse ) + md.update( imgs.md ) + Nimg = len(imgs) + if md_cor is not None: + md.update( md_cor ) + + + if inc_x0 is not None: + md['beam_center_x']= inc_x0 + if inc_y0 is not None: + md['beam_center_y']= inc_y0 + + #print( run_pargs ) + #print( run_pargs['inc_x0'],run_pargs['inc_y0'] ) + #print( inc_x0, inc_y0 ) + + if md['detector'] =='eiger1m_single_image': + Chip_Mask=np.load( '/XF11ID/analysis/2017_1/masks/Eiger1M_Chip_Mask.npy') + elif md['detector'] =='eiger4m_single_image' or md['detector'] == 'image': + Chip_Mask= np.array(np.load( '/XF11ID/analysis/2017_1/masks/Eiger4M_chip_mask.npy'), dtype=bool) + BadPix = np.load('/XF11ID/analysis/2018_1/BadPix_4M.npy' ) + Chip_Mask.ravel()[BadPix] = 0 + elif md['detector'] =='eiger500K_single_image': + Chip_Mask= 1 #to be defined the chip mask + else: + Chip_Mask = 1 + #show_img(Chip_Mask) + + center = [ int(md['beam_center_y']),int( md['beam_center_x'] ) ] #beam center [y,x] for python image + + + pixel_mask = 1- np.int_( np.array( imgs.md['pixel_mask'], dtype= bool) ) + print( 'The data are: %s' %imgs ) + + if False: + print_dict( md, ['suid', 'number of images', 'uid', 'scan_id', 'start_time', 'stop_time', 'sample', 'Measurement', + 'acquire period', 'exposure time', + 'det_distanc', 'beam_center_x', 'beam_center_y', ] ) + ## Overwrite Some Metadata if Wrong Input + dpix, lambda_, Ldet, exposuretime, timeperframe, center = check_lost_metadata( + md, Nimg, inc_x0 = inc_x0, inc_y0= inc_y0, pixelsize = 7.5*10*(-5) ) + + print( 'The beam center is: %s'%center ) + + timeperframe *= bin_frame_number + + setup_pargs=dict(uid=uidstr, dpix= dpix, Ldet=Ldet, lambda_= lambda_, exposuretime=exposuretime, + timeperframe=timeperframe, center=center, path= data_dir) + #print_dict( setup_pargs ) + + mask = load_mask(mask_path, mask_name, plot_ = False, image_name = uidstr + '_mask', reverse=reverse ) + mask *= pixel_mask + if md['detector'] =='eiger4m_single_image': + mask[:,2069] =0 # False #Concluded from the previous results + show_img(mask,image_name = uidstr + '_mask', save=True, path=data_dir) + mask_load=mask.copy() + imgsa = apply_mask( imgs, mask ) + + + img_choice_N = 2 + img_samp_index = random.sample( range(len(imgs)), img_choice_N) + avg_img = get_avg_img( imgsa, img_samp_index, plot_ = False, uid =uidstr) + + if avg_img.max() == 0: + print('There are no photons recorded for this uid: %s'%uid) + print('The data analysis should be terminated! Please try another uid.') + + else: + if scat_geometry !='saxs': + show_img( avg_img, vmin=.1, vmax=np.max(avg_img*.1), logs=True, + image_name= uidstr + '_%s_frames_avg'%img_choice_N, save=True, path=data_dir) + else: + show_saxs_qmap( avg_img, setup_pargs, width=400, show_pixel = False, + vmin=.1, vmax= np.max(avg_img), logs=True, image_name= uidstr + '_%s_frames_avg'%img_choice_N ) + + compress=True + photon_occ = len( np.where(avg_img)[0] ) / ( imgsa[0].size) + #compress = photon_occ < .4 #if the photon ocupation < 0.5, do compress + print ("The non-zeros photon occupation is %s."%( photon_occ)) + print("Will " + 'Always ' + ['NOT', 'DO'][compress] + " apply compress process.") + #good_start = 5 #make the good_start at least 0 + t0= time.time() + filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%md['uid'] + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename, + force_compress= force_compress, para_compress= para_compress, bad_pixel_threshold= 1e14, + bins=bin_frame_number, num_sub= 100, num_max_para_process= 500, with_pickle=True ) + min_inten = 10 + good_start = max(good_start, np.where( np.array(imgsum) > min_inten )[0][0] ) + print ('The good_start frame number is: %s '%good_start) + FD = Multifile(filename, good_start, len(imgs)) + #FD = Multifile(filename, good_start, 100) + uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end) + print( uid_ ) + plot1D( y = imgsum[ np.array( [i for i in np.arange(good_start, len(imgsum)) if i not in bad_frame_list])], + title =uidstr + '_imgsum', xlabel='Frame', ylabel='Total_Intensity', legend='imgsum' ) + run_time(t0) + + mask = mask * Chip_Mask + + #%system free && sync && echo 3 > /proc/sys/vm/drop_caches && free + ## Get bad frame list by a polynominal fit + bad_frame_list = get_bad_frame_list( imgsum, fit=True, plot=True,polyfit_order = 30, + scale= 5.5, good_start = good_start, uid= uidstr, path=data_dir) + print( 'The bad frame list length is: %s'%len(bad_frame_list) ) + + ### Creat new mask by masking the bad pixels and get new avg_img + if False: + mask = mask_exclude_badpixel( bp, mask, md['uid']) + avg_img = get_avg_imgc( FD, sampling = 1, bad_frame_list=bad_frame_list ) + + show_img( avg_img, vmin=.001, vmax= np.max(avg_img), logs=True, aspect=1, #save_format='tif', + image_name= uidstr + '_img_avg', save=True, path=data_dir, cmap = cmap_albula ) + + imgsum_y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])] + imgsum_x = np.arange( len( imgsum_y)) + save_lists( [imgsum_x, imgsum_y], label=['Frame', 'Total_Intensity'], + filename=uidstr + '_img_sum_t', path= data_dir ) + plot1D( y = imgsum_y, title = uidstr + '_img_sum_t', xlabel='Frame', + ylabel='Total_Intensity', legend='imgsum', save=True, path=data_dir) + + + ############for SAXS and ANG_SAXS (Flow_SAXS) + if scat_geometry =='saxs' or scat_geometry =='ang_saxs': + + #show_saxs_qmap( avg_img, setup_pargs, width=600, vmin=.1, vmax=np.max(avg_img*.1), logs=True, + # image_name= uidstr + '_img_avg', save=True) + #np.save( data_dir + 'uid=%s--img-avg'%uid, avg_img) + + #try: + # hmask = create_hot_pixel_mask( avg_img, threshold = 1000, center=center, center_radius= 600) + #except: + # hmask=1 + hmask=1 + qp_saxs, iq_saxs, q_saxs = get_circular_average( avg_img * Chip_Mask, mask * hmask * Chip_Mask, pargs=setup_pargs, save=True ) + + plot_circular_average( qp_saxs, iq_saxs, q_saxs, pargs= setup_pargs, + xlim=[q_saxs.min(), q_saxs.max()], ylim = [iq_saxs.min(), iq_saxs.max()] ) + + #pd = trans_data_to_pd( np.where( hmask !=1), + # label=[md['uid']+'_hmask'+'x', md['uid']+'_hmask'+'y' ], dtype='list') + + #pd.to_csv('/XF11ID/analysis/Commissioning/eiger4M_badpixel.csv', mode='a' ) + + #mask =np.array( mask * hmask, dtype=bool) + #show_img( mask ) + + if run_fit_form: + form_res = fit_form_factor( q_saxs,iq_saxs, guess_values={'radius': 2500, 'sigma':0.05, + 'delta_rho':1E-10 }, fit_range=[0.0001, 0.015], fit_variables={'radius': T, 'sigma':T, + 'delta_rho':T}, res_pargs=setup_pargs, xlim=[0.0001, 0.015]) + + show_ROI_on_image( avg_img, roi_mask, center, label_on = False, rwidth =700, alpha=.9, + save=True, path=data_dir, uid=uidstr, vmin= np.min(avg_img), vmax= np.max(avg_img) ) + + qr = np.array( [ qval_dict[k][0] for k in list( qval_dict.keys()) ] ) + plot_qIq_with_ROI( q_saxs, iq_saxs, qr, logs=True, uid=uidstr, xlim=[q_saxs.min(), q_saxs.max()], + ylim = [iq_saxs.min(), iq_saxs.max()], save=True, path=data_dir) + + if scat_geometry != 'ang_saxs': + Nimg = FD.end - FD.beg + time_edge = create_time_slice( N= Nimg, slice_num= 3, slice_width= 1, edges = None ) + time_edge = np.array( time_edge ) + good_start + #print( time_edge ) + qpt, iqst, qt = get_t_iqc( FD, time_edge, mask* Chip_Mask, pargs=setup_pargs, nx=1500 ) + plot_t_iqc( qt, iqst, time_edge, pargs=setup_pargs, xlim=[qt.min(), qt.max()], + ylim = [iqst.min(), iqst.max()], save=True ) + + elif scat_geometry == 'gi_waxs': + #roi_mask[badpixel] = 0 + qr = np.array( [ qval_dict[k][0] for k in list( qval_dict.keys()) ] ) + show_ROI_on_image( avg_img, roi_mask, label_on = True, alpha=.5,save=True, path= data_dir, uid=uidstr)#, vmin=1, vmax=15) + + elif scat_geometry == 'gi_saxs': + show_img( avg_img, vmin=.1, vmax=np.max(avg_img*.1), + logs=True, image_name= uidstr + '_img_avg', save=True, path=data_dir) + ticks_ = get_qzr_map( qr_map, qz_map, inc_x0, Nzline=10, Nrline=10 ) + ticks = ticks_[:4] + plot_qzr_map( qr_map, qz_map, inc_x0, ticks = ticks_, data= avg_img, uid= uidstr, path = data_dir ) + show_qzr_roi( avg_img, roi_mask, inc_x0, ticks, alpha=0.5, save=True, path=data_dir, uid=uidstr ) + qr_1d_pds = cal_1d_qr( avg_img, Qr, Qz, qr_map, qz_map, inc_x0, setup_pargs=setup_pargs ) + plot_qr_1d_with_ROI( qr_1d_pds, qr_center=np.unique( np.array(list( qval_dict.values() ) )[:,0] ), + loglog=False, save=True, uid=uidstr, path = data_dir) + + Nimg = FD.end - FD.beg + time_edge = create_time_slice( N= Nimg, slice_num= 3, slice_width= 1, edges = None ) + time_edge = np.array( time_edge ) + good_start + qrt_pds = get_t_qrc( FD, time_edge, Qr, Qz, qr_map, qz_map, path=data_dir, uid = uidstr ) + plot_qrt_pds( qrt_pds, time_edge, qz_index = 0, uid = uidstr, path = data_dir ) + + + + ############################## + ##the below works for all the geometries + ######################################## + if scat_geometry !='ang_saxs': + roi_inten = check_ROI_intensity( avg_img, roi_mask, ring_number= qth_interest, uid =uidstr, save=True, path=data_dir ) + if scat_geometry =='saxs' or scat_geometry =='gi_saxs' or scat_geometry =='gi_waxs': + if run_waterfall: + wat = cal_waterfallc( FD, roi_mask, + qindex= qth_interest, save = True, path=data_dir,uid=uidstr) + if run_waterfall: + plot_waterfallc( wat, qindex=qth_interest, aspect=None, + vmax= np.max(wat), uid=uidstr, save =True, + path=data_dir, beg= FD.beg) + ring_avg = None + + if run_t_ROI_Inten: + times_roi, mean_int_sets = cal_each_ring_mean_intensityc(FD, roi_mask, timeperframe = None, multi_cor=True ) + plot_each_ring_mean_intensityc( times_roi, mean_int_sets, uid = uidstr, save=True, path=data_dir ) + roi_avg = np.average( mean_int_sets, axis=0) + + uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end) + lag_steps = None + + if use_sqnorm: + norm = get_pixelist_interp_iq( qp_saxs, iq_saxs, roi_mask, center) + else: + norm=None + + define_good_series = False + if define_good_series: + FD = Multifile(filename, beg = good_start, end = Nimg) + uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end) + print( uid_ ) + + if 'g2_fit_variables' in list( run_pargs.keys() ): + g2_fit_variables = run_pargs['g2_fit_variables'] + else: + g2_fit_variables = {'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True} + + if 'g2_guess_values' in list( run_pargs.keys() ): + g2_guess_values = run_pargs['g2_guess_values'] + else: + g2_guess_values= {'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,} + + if 'g2_guess_limits' in list( run_pargs.keys()): + g2_guess_limits = run_pargs['g2_guess_limits'] + else: + g2_guess_limits = dict( baseline =[1, 2], alpha=[0, 2], beta = [0, 1], relaxation_rate= [0.001, 5000]) + + if run_one_time: + if use_imgsum_norm: + imgsum_ = imgsum + else: + imgsum_ = None + if scat_geometry !='ang_saxs': + t0 = time.time() + g2, lag_steps = cal_g2p( FD, roi_mask, bad_frame_list,good_start, num_buf = 8, num_lev= None, + imgsum= imgsum_, norm=norm ) + run_time(t0) + taus = lag_steps * timeperframe + g2_pds = save_g2_general( g2, taus=taus,qr=np.array( list( qval_dict.values() ) )[:,0], + uid=uid_+'_g2.csv', path= data_dir, return_res=True ) + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, + function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, + fit_variables= g2_fit_variables, + guess_values= g2_guess_values, + guess_limits = g2_guess_limits) + + g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_g2_fit_paras.csv', path=data_dir ) + + #if run_one_time: + #plot_g2_general( g2_dict={1:g2}, taus_dict={1:taus},vlim=[0.95, 1.05], qval_dict = qval_dict, fit_res= None, + # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') + + plot_g2_general( g2_dict={1:g2, 2:g2_fit}, taus_dict={1:taus, 2:taus_fit},vlim=[0.95, 1.05], + qval_dict = qval_dict, fit_res= g2_fit_result, geometry=scat_geometry,filename=uid_ + '_g2', + path= data_dir, function= fit_g2_func, ylabel='g2', append_name= '_fit') + + D0, qrate_fit_res = get_q_rate_fit_general( qval_dict, g2_fit_paras['relaxation_rate'], geometry= scat_geometry ) + plot_q_rate_fit_general( qval_dict, g2_fit_paras['relaxation_rate'], qrate_fit_res, + geometry= scat_geometry,uid=uid_ , path= data_dir ) + + + else: + t0 = time.time() + g2_v, lag_steps_v = cal_g2p( FD, roi_mask_v, bad_frame_list,good_start, num_buf = 8, num_lev= None, + imgsum= imgsum_, norm=norm ) + g2_p, lag_steps_p = cal_g2p( FD, roi_mask_p, bad_frame_list,good_start, num_buf = 8, num_lev= None, + imgsum= imgsum_, norm=norm ) + run_time(t0) + + taus_v = lag_steps_v * timeperframe + g2_pds_v = save_g2_general( g2_v, taus=taus_v,qr=np.array( list( qval_dict_v.values() ) )[:,0], + uid=uid_+'_g2v.csv', path= data_dir, return_res=True ) + + taus_p = lag_steps_p * timeperframe + g2_pds_p = save_g2_general( g2_p, taus=taus_p,qr=np.array( list( qval_dict_p.values() ) )[:,0], + uid=uid_+'_g2p.csv', path= data_dir, return_res=True ) + + fit_g2_func_v = 'stretched' #for vertical + g2_fit_result_v, taus_fit_v, g2_fit_v = get_g2_fit_general( g2_v, taus_v, + function = fit_g2_func_v, vlim=[0.95, 1.05], fit_range= None, + fit_variables={'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True}, + guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,}) + g2_fit_paras_v = save_g2_fit_para_tocsv(g2_fit_result_v, filename= uid_ +'_g2_fit_paras_v.csv', path=data_dir ) + + fit_g2_func_p ='flow_para' #for parallel + g2_fit_result_p, taus_fit_p, g2_fit_p = get_g2_fit_general( g2_p, taus_p, + function = fit_g2_func_p, vlim=[0.95, 1.05], fit_range= None, + fit_variables={'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True,'flow_velocity':True}, + guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,'flow_velocity':1}) + g2_fit_paras_p = save_g2_fit_para_tocsv(g2_fit_result_p, filename= uid_ +'_g2_fit_paras_p.csv', path=data_dir ) + + + + plot_g2_general( g2_dict={1:g2_v, 2:g2_fit_v}, taus_dict={1:taus_v, 2:taus_fit_v},vlim=[0.95, 1.05], + qval_dict = qval_dict_v, fit_res= g2_fit_result_v, geometry=scat_geometry,filename= uid_+'_g2_v', + path= data_dir, function= fit_g2_func_v, ylabel='g2_v', append_name= '_fit') + + plot_g2_general( g2_dict={1:g2_p, 2:g2_fit_p}, taus_dict={1:taus_p, 2:taus_fit_p},vlim=[0.95, 1.05], + qval_dict = qval_dict_p, fit_res= g2_fit_result_p, geometry=scat_geometry,filename= uid_+'_g2_p', + path= data_dir, function= fit_g2_func_p, ylabel='g2_p', append_name= '_fit') + + combine_images( [data_dir + uid_+'_g2_v_fit.png', data_dir + uid_+'_g2_p_fit.png'], data_dir + uid_+'_g2_fit.png', outsize=(2000, 2400) ) + + + D0_v, qrate_fit_res_v = get_q_rate_fit_general( qval_dict_v, g2_fit_paras_v['relaxation_rate'], geometry= scat_geometry ) + plot_q_rate_fit_general( qval_dict_v, g2_fit_paras_v['relaxation_rate'], qrate_fit_res_v, + geometry= scat_geometry,uid=uid_ +'_vert' , path= data_dir ) + + D0_p, qrate_fit_res_p = get_q_rate_fit_general( qval_dict_p, g2_fit_paras_p['relaxation_rate'], geometry= scat_geometry ) + plot_q_rate_fit_general( qval_dict_p, g2_fit_paras_p['relaxation_rate'], qrate_fit_res_p, + geometry= scat_geometry,uid=uid_ +'_para' , path= data_dir ) + + + combine_images( [data_dir + uid_+ '_vert_Q_Rate_fit.png', data_dir + uid_+ '_para_Q_Rate_fit.png'], data_dir + uid_+'_Q_Rate_fit.png', outsize=(2000, 2400) ) + + + # For two-time + data_pixel = None + if run_two_time: + + data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm=norm ).get_data() + t0=time.time() + g12b = auto_two_Arrayc( data_pixel, roi_mask, index = None ) + if run_dose: + np.save( data_dir + 'uid=%s_g12b'%uid, g12b) + + + if lag_steps is None: + num_bufs=8 + noframes = FD.end - FD.beg + num_levels = int(np.log( noframes/(num_bufs-1))/np.log(2) +1) +1 + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + max_taus= lag_steps.max() + lag_steps = lag_steps[ lag_steps < Nimg - good_start ] + + run_time( t0 ) + + show_C12(g12b, q_ind= qth_interest, N1= FD.beg, N2=min( FD.end,5000), vmin= 0.99, vmax=1.3, + timeperframe=timeperframe,save=True, cmap=cmap_albula, + path= data_dir, uid = uid_ ) + + #print('here') + #show_C12(g12b, q_ind= 3, N1= 5, N2=min(5000,5000), vmin=.8, vmax=1.31, cmap=cmap_albula, + # timeperframe= timeperframe,save=False, path= data_dir, uid = uid_ +'_' + k) + max_taus = Nimg + t0=time.time() + #g2b = get_one_time_from_two_time(g12b)[:max_taus] + g2b = get_one_time_from_two_time(g12b)[lag_steps] + + tausb = lag_steps *timeperframe + run_time(t0) + + + #tausb = np.arange( g2b.shape[0])[:max_taus] *timeperframe + g2b_pds = save_g2_general( g2b, taus=tausb, qr= np.array( list( qval_dict.values() ) )[:,0], + qz=None, uid=uid_ +'_g2b.csv', path= data_dir, return_res=True ) + + + g2_fit_resultb, taus_fitb, g2_fitb = get_g2_fit_general( g2b, tausb, + function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, + fit_variables=g2_fit_variables, guess_values=g2_guess_values, guess_limits =g2_guess_limits) + + g2b_fit_paras = save_g2_fit_para_tocsv(g2_fit_resultb, + filename= uid_ + '_g2b_fit_paras.csv', path=data_dir ) + + D0b, qrate_fit_resb = get_q_rate_fit_general( qval_dict, g2b_fit_paras['relaxation_rate'], + fit_range=None, geometry= scat_geometry ) + + + #print( qval_dict, g2b_fit_paras['relaxation_rate'], qrate_fit_resb ) + plot_q_rate_fit_general( qval_dict, g2b_fit_paras['relaxation_rate'], qrate_fit_resb, + geometry= scat_geometry,uid=uid_ +'_two_time' , path= data_dir ) + + + + plot_g2_general( g2_dict={1:g2b, 2:g2_fitb}, taus_dict={1:tausb, 2:taus_fitb},vlim=[0.95, 1.05], + qval_dict=qval_dict, fit_res= g2_fit_resultb, geometry=scat_geometry,filename=uid_+'_g2', + path= data_dir, function= fit_g2_func, ylabel='g2', append_name= '_b_fit') + + if run_two_time and run_one_time: + plot_g2_general( g2_dict={1:g2, 2:g2b}, taus_dict={1:taus, 2:tausb},vlim=[0.95, 1.05], + qval_dict=qval_dict, g2_labels=['from_one_time', 'from_two_time'], + geometry=scat_geometry,filename=uid_+'_g2_two_g2', path= data_dir, ylabel='g2', ) + + + + # Four Time Correlation + + if run_four_time: #have to run one and two first + t0=time.time() + g4 = get_four_time_from_two_time(g12b, g2=g2b)[:max_taus] + run_time(t0) + + taus4 = np.arange( g4.shape[0])*timeperframe + g4_pds = save_g2_general( g4, taus=taus4, qr=np.array( list( qval_dict.values() ) )[:,0], + qz=None, uid=uid_ +'_g4.csv', path= data_dir, return_res=True ) + plot_g2_general( g2_dict={1:g4}, taus_dict={1:taus4},vlim=[0.95, 1.05], qval_dict=qval_dict, fit_res= None, + geometry=scat_geometry,filename=uid_+'_g4',path= data_dir, ylabel='g4') + + if run_dose: + get_two_time_mulit_uids( [uid], roi_mask, norm= norm, bin_frame_number=bin_frame_number, + path= data_dir0, force_generate=False ) + N = len(imgs) + try: + tr = md['transmission'] + except: + tr = 1 + if 'dose_frame' in list(run_pargs.keys()): + dose_frame = run_pargs['dose_frame'] + else: + dose_frame = np.int_([ N/8, N/4 ,N/2, 3*N/4, N*0.99 ] ) + #N/32, N/16, N/8, N/4 ,N/2, 3*N/4, N*0.99 + exposure_dose = tr * exposuretime * dose_frame + taus_uids, g2_uids = get_series_one_time_mulit_uids( [ uid ], qval_dict, good_start=good_start, + path= data_dir0, exposure_dose = exposure_dose, num_bufs =8, save_g2= False, + dead_time = 0, trans = [ tr ] ) + + plot_dose_g2( taus_uids, g2_uids, ylim=[0.95, 1.2], vshift= 0.00, + qval_dict = qval_dict, fit_res= None, geometry= scat_geometry, + filename= '%s_dose_analysis'%uid_, + path= data_dir, function= None, ylabel='g2_Dose', g2_labels= None, append_name= '' ) + + # Speckel Visiblity + if run_xsvs: + max_cts = get_max_countc(FD, roi_mask ) + qind, pixelist = roi.extract_label_indices( roi_mask ) + noqs = len( np.unique(qind) ) + nopr = np.bincount(qind, minlength=(noqs+1))[1:] + #time_steps = np.array( utils.geometric_series(2, len(imgs) ) ) + time_steps = [0,1] #only run the first two levels + num_times = len(time_steps) + times_xsvs = exposuretime + (2**( np.arange( len(time_steps) ) ) -1 ) *timeperframe + print( 'The max counts are: %s'%max_cts ) + + ### Do historam + if roi_avg is None: + times_roi, mean_int_sets = cal_each_ring_mean_intensityc(FD, roi_mask, timeperframe = None, ) + roi_avg = np.average( mean_int_sets, axis=0) + + t0=time.time() + spec_bins, spec_his, spec_std = xsvsp( FD, np.int_(roi_mask), norm=None, + max_cts=int(max_cts+2), bad_images=bad_frame_list, only_two_levels=True ) + spec_kmean = np.array( [roi_avg * 2**j for j in range( spec_his.shape[0] )] ) + run_time(t0) + + run_xsvs_all_lags = False + if run_xsvs_all_lags: + times_xsvs = exposuretime + lag_steps * acquisition_period + if data_pixel is None: + data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm=norm ).get_data() + t0=time.time() + spec_bins, spec_his, spec_std, spec_kmean = get_binned_his_std(data_pixel, np.int_(ro_mask), lag_steps ) + run_time(t0) + spec_pds = save_bin_his_std( spec_bins, spec_his, spec_std, filename=uid_+'_spec_res.csv', path=data_dir ) + + ML_val, KL_val,K_ = get_xsvs_fit( spec_his, spec_kmean, spec_std, max_bins=2,varyK= False, ) + + #print( 'The observed average photon counts are: %s'%np.round(K_mean,4)) + #print( 'The fitted average photon counts are: %s'%np.round(K_,4)) + print( 'The difference sum of average photon counts between fit and data are: %s'%np.round( + abs(np.sum( spec_kmean[0,:] - K_ )),4)) + print( '#'*30) + qth= 10 + print( 'The fitted M for Qth= %s are: %s'%(qth, ML_val[qth]) ) + print( K_[qth]) + print( '#'*30) + + + plot_xsvs_fit( spec_his, ML_val, KL_val, K_mean = spec_kmean, spec_std=spec_std, + xlim = [0,10], vlim =[.9, 1.1], + uid=uid_, qth= qth_interest, logy= True, times= times_xsvs, q_ring_center=qr, path=data_dir) + + plot_xsvs_fit( spec_his, ML_val, KL_val, K_mean = spec_kmean, spec_std = spec_std, + xlim = [0,15], vlim =[.9, 1.1], + uid=uid_, qth= None, logy= True, times= times_xsvs, q_ring_center=qr, path=data_dir ) + + ### Get contrast + contrast_factorL = get_contrast( ML_val) + spec_km_pds = save_KM( spec_kmean, KL_val, ML_val, qs=qr, level_time=times_xsvs, uid=uid_ , path = data_dir ) + #print( spec_km_pds ) + + plot_g2_contrast( contrast_factorL, g2, times_xsvs, taus, qr, + vlim=[0.8,1.2], qth = qth_interest, uid=uid_,path = data_dir, legend_size=14) + + plot_g2_contrast( contrast_factorL, g2, times_xsvs, taus, qr, + vlim=[0.8,1.2], qth = None, uid=uid_,path = data_dir, legend_size=4) + + + + + + md['mask_file']= mask_path + mask_name + md['mask'] = mask + md['NOTEBOOK_FULL_PATH'] = None + md['good_start'] = good_start + md['bad_frame_list'] = bad_frame_list + md['avg_img'] = avg_img + md['roi_mask'] = roi_mask + + if scat_geometry == 'gi_saxs': + md['Qr'] = Qr + md['Qz'] = Qz + md['qval_dict'] = qval_dict + md['beam_center_x'] = inc_x0 + md['beam_center_y']= inc_y0 + md['beam_refl_center_x'] = refl_x0 + md['beam_refl_center_y'] = refl_y0 + + elif scat_geometry == 'saxs' or 'gi_waxs': + md['qr']= qr + #md['qr_edge'] = qr_edge + md['qval_dict'] = qval_dict + md['beam_center_x'] = center[1] + md['beam_center_y']= center[0] + + elif scat_geometry == 'ang_saxs': + md['qval_dict_v'] = qval_dict_v + md['qval_dict_p'] = qval_dict_p + md['beam_center_x'] = center[1] + md['beam_center_y']= center[0] + + + md['beg'] = FD.beg + md['end'] = FD.end + md['metadata_file'] = data_dir + 'md.csv-&-md.pkl' + psave_obj( md, data_dir + 'uid=%s_md'%uid[:6] ) #save the setup parameters + #psave_obj( md, data_dir + 'uid=%s_md'%uid ) #save the setup parameters + save_dict_csv( md, data_dir + 'uid=%s_md.csv'%uid, 'w') + + Exdt = {} + if scat_geometry == 'gi_saxs': + for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list', 'qr_1d_pds'], + [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list, qr_1d_pds] ): + Exdt[ k ] = v + elif scat_geometry == 'saxs': + for k,v in zip( ['md', 'q_saxs', 'iq_saxs','iqst','qt','roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [md, q_saxs, iq_saxs, iqst, qt,roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + Exdt[ k ] = v + elif scat_geometry == 'gi_waxs': + for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + Exdt[ k ] = v + elif scat_geometry == 'ang_saxs': + for k,v in zip( ['md', 'q_saxs', 'iq_saxs','roi_mask_v','roi_mask_p', + 'qval_dict_v','qval_dict_p','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [md, q_saxs, iq_saxs, roi_mask_v,roi_mask_p, + qval_dict_v,qval_dict_p, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + Exdt[ k ] = v + + if run_waterfall:Exdt['wat'] = wat + if run_t_ROI_Inten:Exdt['times_roi'] = times_roi;Exdt['mean_int_sets']=mean_int_sets + if run_one_time: + if scat_geometry != 'ang_saxs': + for k,v in zip( ['taus','g2','g2_fit_paras'], [taus,g2,g2_fit_paras] ):Exdt[ k ] = v + else: + for k,v in zip( ['taus_v','g2_v','g2_fit_paras_v'], [taus_v,g2_v,g2_fit_paras_v] ):Exdt[ k ] = v + for k,v in zip( ['taus_p','g2_p','g2_fit_paras_p'], [taus_p,g2_p,g2_fit_paras_p] ):Exdt[ k ] = v + if run_two_time: + for k,v in zip( ['tausb','g2b','g2b_fit_paras', 'g12b'], [tausb,g2b,g2b_fit_paras,g12b] ):Exdt[ k ] = v + if run_four_time: + for k,v in zip( ['taus4','g4'], [taus4,g4] ):Exdt[ k ] = v + if run_xsvs: + for k,v in zip( ['spec_kmean','spec_pds','times_xsvs','spec_km_pds','contrast_factorL'], + [ spec_kmean,spec_pds,times_xsvs,spec_km_pds,contrast_factorL] ):Exdt[ k ] = v + + + export_xpcs_results_to_h5( 'uid=%s_Res.h5'%md['uid'], data_dir, export_dict = Exdt ) + #extract_dict = extract_xpcs_results_from_h5( filename = 'uid=%s_Res.h5'%md['uid'], import_dir = data_dir ) + # Creat PDF Report + pdf_out_dir = os.path.join('/XF11ID/analysis/', CYCLE, username, 'Results/') + pdf_filename = "XPCS_Analysis_Report_for_uid=%s%s.pdf"%(uid,pdf_version) + if run_xsvs: + pdf_filename = "XPCS_XSVS_Analysis_Report_for_uid=%s%s.pdf"%(uid,pdf_version) + #pdf_filename + + print( data_dir, uid[:6], pdf_out_dir, pdf_filename, username ) + + make_pdf_report( data_dir, uid[:6], pdf_out_dir, pdf_filename, username, + run_fit_form, run_one_time, run_two_time, run_four_time, run_xsvs, run_dose=run_dose, + report_type= scat_geometry + ) + ## Attach the PDF report to Olog + if att_pdf_report: + os.environ['HTTPS_PROXY'] = 'https://proxy:8888' + os.environ['no_proxy'] = 'cs.nsls2.local,localhost,127.0.0.1' + pname = pdf_out_dir + pdf_filename + atch=[ Attachment(open(pname, 'rb')) ] + try: + update_olog_uid( uid= md['uid'], text='Add XPCS Analysis PDF Report', attachments= atch ) + except: + print("I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file."%pname) + + if show_plot: + plt.show() + #else: + # plt.close('all') + if clear_plot: + plt.close('all') + if return_res: + res = {} + if scat_geometry == 'saxs': + for k,v in zip( ['md', 'q_saxs', 'iq_saxs','iqst','qt','avg_img','mask', 'imgsum','bad_frame_list','roi_mask', 'qval_dict'], + [ md, q_saxs, iq_saxs, iqst, qt, avg_img,mask,imgsum,bad_frame_list,roi_mask, qval_dict ] ): + res[ k ] = v + + elif scat_geometry == 'ang_saxs': + for k,v in zip( [ 'md', 'q_saxs', 'iq_saxs','roi_mask_v','roi_mask_p', + 'qval_dict_v','qval_dict_p','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [ md, q_saxs, iq_saxs, roi_mask_v,roi_mask_p, + qval_dict_v,qval_dict_p, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + res[ k ] = v + + elif scat_geometry == 'gi_saxs': + for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list', 'qr_1d_pds'], + [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list, qr_1d_pds] ): + res[ k ] = v + + elif scat_geometry == 'gi_waxs': + for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'], + [md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ): + res[ k ] = v + + if run_waterfall: + res['wat'] = wat + if run_t_ROI_Inten: + res['times_roi'] = times_roi; + res['mean_int_sets']=mean_int_sets + if run_one_time: + if scat_geometry != 'ang_saxs': + res['g2'] = g2 + res['taus']=taus + else: + res['g2_p'] = g2_p + res['taus_p']=taus_p + res['g2_v'] = g2_v + res['taus_v']=taus_v + + if run_two_time: + res['tausb'] = tausb + res['g12b'] = g12b + res['g2b'] = g2b + if run_four_time: + res['g4']= g4 + res['taus4']=taus4 + if run_xsvs: + res['spec_kmean']=spec_kmean + res['spec_pds']= spec_pds + res['contrast_factorL'] = contrast_factorL + res['times_xsvs']= times_xsvs + return res + +#uid = '3ff4ee' +#run_xpcs_xsvs_single( uid, run_pargs ) + + + + + diff --git a/pyCHX/backups/pyCHX-backup/backups/xpcs_timepixel_05012024.py b/pyCHX/backups/pyCHX-backup/backups/xpcs_timepixel_05012024.py new file mode 100644 index 0000000..286141e --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/backups/xpcs_timepixel_05012024.py @@ -0,0 +1,830 @@ +from numpy import pi,sin,arctan,sqrt,mgrid,where,shape,exp,linspace,std,arange +from numpy import power,log,log10,array,zeros,ones,reshape,mean,histogram,round,int_ +from numpy import indices,hypot,digitize,ma,histogramdd,apply_over_axes,sum +from numpy import around,intersect1d, ravel, unique,hstack,vstack,zeros_like +from numpy import save, load, dot +from numpy.linalg import lstsq +from numpy import polyfit,poly1d; +import sys,os +import pickle as pkl + +import matplotlib.pyplot as plt +#from Init_for_Timepix import * # the setup file +import time + +import struct +import numpy as np +from tqdm import tqdm +import pandas as pds +from pyCHX.chx_libs import multi_tau_lags +from pyCHX.chx_compress import Multifile, go_through_FD, pass_FD + + + + +def get_timepixel_data( data_dir, filename, time_unit= 1 ): + '''give a csv file of a timepixel data, return x,y,t + x, pos_x in pixel + y, pos_y in pixel + t, arrival time + time_unit, t*time_unit will convert to second, in reality, this value is 6.1e-12 + return x,y,t (in second, starting from zero) + + ''' + data = pds.read_csv( data_dir + filename ) + #'#Col', ' #Row', ' #ToA', + #return np.array( data['Col'] ), np.array(data['Row']), np.array(data['GlobalTimeFine']) #*6.1 #in ps + if time_unit !=1: + try: + x,y,t=np.array( data['#Col'] ), np.array(data['#Row']), np.array(data['#ToA'] ) * time_unit + except: + x,y,t=np.array( data['#Col'] ), np.array(data[' #Row']), np.array(data[' #ToA'] ) * time_unit + else: + try: + x,y,t=np.array( data['#Col'] ), np.array(data['#Row']), np.array(data['#ToA'] ) + except: + x,y,t=np.array( data['#Col'] ), np.array(data[' #Row']), np.array(data[' #ToA'] ) + return x,y, t-t.min() #* 25/4096. #in ns + + +def get_pvlist_from_post( p, t, binstep=100, detx=256, dety=256 ): + '''YG.DEV@CHX Nov, 2017 to get a pos, val list of phonton hitting detector by giving + p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + p: array, int64, coordinate-x * det_x + coordinate-y + t: list, int64, photon hit time + binstep: int, binstep (in t unit) period + detx,dety: int/int, the detector size in x and y + Output: + positions: int array, (x*detx +y) + vals: int array, counts of that positions + counts: int array, counts of that positions in each binstep + ''' + v = ( t - t[0])//binstep + L= np.max( v ) + 1 + arr = np.ravel_multi_index( [ p, v ], [detx * dety,L ] ) + uval, ind, count = np.unique( arr, return_counts=True, return_index=True) + ind2 = np.lexsort( ( p[ind], v[ind] ) ) + ps = (p[ind])[ind2] + vs = count[ind2] + cs = np.bincount(v[ind]) + return ps,vs,cs + + + +def histogram_pt( p, t, binstep=100, detx=256, dety=256 ): + '''YG.DEV@CHX Nov, 2017 to get a histogram of phonton counts by giving + p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + p: coordinate-x * det_x + coordinate-y + t: photon hit time + bin t in binstep (in t unit) period + detx,dety: the detector size in x and y + Output: + the hitorgram of photons with bins as binstep (in time unit) + ''' + L= np.max( (t-t[0])//binstep ) + 1 + #print(L,x,y, (t-t[0])//binstep) + arr = np.ravel_multi_index( [ p, (t-t[0])//binstep ], [detx * dety,L ] ) + M,N = arr.max(),arr.min() + da = np.zeros( [detx * dety, L ] ) + da.flat[np.arange(N, M ) ] = np.bincount( arr- N ) + return da + +def histogram_xyt( x, y, t, binstep=100, detx=256, dety=256 ): + '''YG.DEV@CHX Mar, 2017 to get a histogram of phonton counts by giving + x (photon hit pos_x), y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + x: coordinate-x + y: coordinate-y + t: photon hit time + bin t in binstep (in t unit) period + detx,dety: the detector size in x and y + Output: + the hitorgram of photons with bins as binstep (in time unit) + + + ''' + L= np.max( (t-t[0])//binstep ) + 1 + #print(L,x,y, (t-t[0])//binstep) + arr = np.ravel_multi_index( [x, y, (t-t[0])//binstep ], [detx, dety,L ] ) + M,N = arr.max(),arr.min() + da = np.zeros( [detx, dety, L ] ) + da.flat[np.arange(N, M ) ] = np.bincount( arr- N ) + return da + + + +def get_FD_end_num(FD, maxend=1e10): + N = maxend + for i in range(0,int(maxend)): + try: + FD.seekimg(i) + except: + N = i + break + FD.seekimg(0) + return N + +def compress_timepix_data( pos, t, tbins, filename=None, md=None, force_compress=False, nobytes=2, + with_pickle=True ): + + ''' YG.Dev@CHX Nov 20, 2017 + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * y + x + t: arrival time in sec + filename: the output filename + md: a dict to describle the data info + force_compress: if False, + if already compressed, just it + else: compress + if True, compress and, if exist, overwrite the already-coompress data + Return: + avg_img, imgsum, N (frame number) + + ''' + if filename is None: + filename= '/XF11ID/analysis/Compressed_Data' +'/timpix_uid_%s.cmp'%md['uid'] + + if force_compress: + print ("Create a new compress file with filename as :%s."%filename) + return init_compress_timepix_data( pos, t, tbins, filename=filename, md=md, nobytes= nobytes, + with_pickle=with_pickle ) + else: + if not os.path.exists( filename ): + print ("Create a new compress file with filename as :%s."%filename) + return init_compress_timepix_data( pos, t, tbins, filename=filename, md=md, nobytes= nobytes, + with_pickle=with_pickle ) + else: + print ("Using already created compressed file with filename as :%s."%filename) + return pkl.load( open(filename + '.pkl', 'rb' ) ) + + #FD = Multifile(filename, 0, int(1e25) ) + #return get_FD_end_num(FD) + + + + + +def create_timepix_compress_header( md, filename, nobytes=2, bins=1 ): + ''' + Create the head for a compressed eiger data, this function is for parallel compress + ''' + fp = open( filename,'wb' ) + #Make Header 1024 bytes + #md = images.md + if bins!=1: + nobytes=8 + Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + + nobytes, md['sy'], md['sx'], + 0,256, + 0,256 + ) + fp.write( Header) + fp.close() + + +def init_compress_timepix_data( pos, t, binstep, filename, mask=None, + md = None, nobytes=2,with_pickle=True ): + ''' YG.Dev@CHX Nov 19, 2017 with optimal algorithm by using complex index techniques + + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * x + y #can't be 256*x + y + t: arrival time in sec + binstep: int, binstep (in t unit) period + filename: the output filename + md: a dict to describle the data info + Return: + N (frame number) + + ''' + fp = open( filename,'wb' ) + if md is None: + md={} + md['beam_center_x'] = 0 + md['beam_center_y'] = 0 + md['count_time'] = 0 + md['detector_distance'] = 0 + md['frame_time'] = 0 + md['incident_wavelength'] =0 + md['x_pixel_size'] = 45 + md['y_pixel_size'] = 45 + #nobytes = 2 + md['sx'] = 256 + md['sy'] = 256 + + + #TODList: for different detector using different md structure, March 2, 2017, + + #8d include, + #'bytes', 'nrows', 'ncols', (detsize) + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + + nobytes, md['sy'], md['sx'], + 0,256, + 0,256 + ) + fp.write( Header) + + N_ = np.int( np.ceil( (t.max() -t.min()) / binstep ) ) + print('There are %s frames to be compressed...'%(N_-1)) + + ps,vs,cs = get_pvlist_from_post( pos, t, binstep, detx= md['sx'], dety= md['sy'] ) + N = len(cs) - 1 #the last one might don't have full number for bings, so kick off + css = np.cumsum(cs) + imgsum = np.zeros( N ) + good_count = 0 + avg_img = np.zeros( [ md['sy'], md['sx'] ], dtype= np.float64 ) # changed depreciated np.float to np.float64 LW @06/11/2023 + + for i in tqdm( range(0,N) ): + if i ==0: + ind1 = 0 + ind2 = css[i] + else: + ind1 = css[i-1] + ind2 = css[i] + #print( ind1, ind2 ) + good_count +=1 + psi = ps[ ind1:ind2 ] + vsi = vs[ ind1:ind2 ] + dlen = cs[i] + imgsum[i] = vsi.sum() + np.ravel(avg_img )[psi] += vsi + #print(vs.sum()) + fp.write( struct.pack( '@I', dlen )) + fp.write( struct.pack( '@{}i'.format( dlen), *psi)) + fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *vsi)) + fp.close() + avg_img /= good_count + #return N -1 + if with_pickle: + pkl.dump( [ avg_img, imgsum, N ], open(filename + '.pkl', 'wb' ) ) + return avg_img, imgsum, N + + + + + +def init_compress_timepix_data_light_duty( pos, t, binstep, filename, mask=None, + md = None, nobytes=2,with_pickle=True ): + ''' YG.Dev@CHX Nov 19, 2017 + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * x + y #can't be 256*x + y + t: arrival time in sec + filename: the output filename + md: a dict to describle the data info + Return: + N (frame number) + + ''' + fp = open( filename,'wb' ) + if md is None: + md={} + md['beam_center_x'] = 0 + md['beam_center_y'] = 0 + md['count_time'] = 0 + md['detector_distance'] = 0 + md['frame_time'] = 0 + md['incident_wavelength'] =0 + md['x_pixel_size'] = 45 + md['y_pixel_size'] = 45 + #nobytes = 2 + md['sx'] = 256 + md['sy'] = 256 + + + #TODList: for different detector using different md structure, March 2, 2017, + + #8d include, + #'bytes', 'nrows', 'ncols', (detsize) + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + + nobytes, md['sy'], md['sx'], + 0,256, + 0,256 + ) + fp.write( Header) + + tx = np.arange( t.min(), t.max(), binstep ) + N = len(tx) + imgsum = np.zeros( N-1 ) + print('There are %s frames to be compressed...'%(N-1)) + good_count = 0 + avg_img = np.zeros( [ md['sy'], md['sx'] ], dtype= np.float64 ) # changed depreciated np.float to np.float64 LW @06/11/2023 + for i in tqdm( range(N-1) ): + ind1 = np.argmin( np.abs( tx[i] - t) ) + ind2 = np.argmin( np.abs( tx[i+1] - t ) ) + #print( 'N=%d:'%i, ind1, ind2 ) + p_i = pos[ind1: ind2] + ps,vs = np.unique( p_i, return_counts= True ) + np.ravel(avg_img )[ps] += vs + good_count +=1 + dlen = len(ps) + imgsum[i] = vs.sum() + #print(vs.sum()) + fp.write( struct.pack( '@I', dlen )) + fp.write( struct.pack( '@{}i'.format( dlen), *ps)) + fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *vs)) + fp.close() + avg_img /= good_count + #return N -1 + if with_pickle: + pkl.dump( [ avg_img, imgsum, N-1 ], open(filename + '.pkl', 'wb' ) ) + return avg_img, imgsum, N-1 + + + + + + +def compress_timepix_data_old( data_pixel, filename, rois=None, + md = None, nobytes=2 ): + ''' + Compress the timepixeldata + md: a dict to describle the data info + rois: [y1,y2, x1, x2] + + ''' + fp = open( filename,'wb' ) + if md is None: + md={} + md['beam_center_x'] = 0 + md['beam_center_y'] = 0 + md['count_time'] = 0 + md['detector_distance'] = 0 + md['frame_time'] = 0 + md['incident_wavelength'] =0 + md['x_pixel_size'] =25 + md['y_pixel_size'] =25 + #nobytes = 2 + md['sx'] = 256 + md['sy'] = 256 + md['roi_rb']= 0 + md['roi_re']= md['sy'] + md['roi_cb']= 0 + md['roi_ce']= md['sx'] + if rois is not None: + md['roi_rb']= rois[2] + md['roi_re']= rois[3] + md['roi_cb']= rois[1] + md['roi_ce']= rois[0] + + md['sy'] = md['roi_cb'] - md['roi_ce'] + md['sx'] = md['roi_re'] - md['roi_rb'] + + #TODList: for different detector using different md structure, March 2, 2017, + + #8d include, + #'bytes', 'nrows', 'ncols', (detsize) + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + + nobytes, md['sy'], md['sx'], + md['roi_rb'], md['roi_re'],md['roi_cb'],md['roi_ce'] + ) + + fp.write( Header) + fp.write( data_pixel ) + + + +class Get_TimePixel_Arrayc(object): + ''' + a class to get intested pixels from a images sequence, + load ROI of all images into memory + get_data: to get a 2-D array, shape as (len(images), len(pixellist)) + + One example: + data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() + ''' + + def __init__(self, pos, hitime, tbins, pixelist, beg=None, end=None, norm=None,flat_correction=None, + detx = 256, dety = 256): + ''' + indexable: a images sequences + pixelist: 1-D array, interest pixel list + #flat_correction, normalized by flatfield + #norm, normalized by total intensity, like a incident beam intensity + ''' + self.hitime = hitime + self.tbins = tbins + self.tx = np.arange( self.hitime.min(), self.hitime.max(), self.tbins ) + N = len(self.tx) + if beg is None: + beg = 0 + if end is None: + end = N + + self.beg = beg + self.end = end + self.length = self.end - self.beg + self.pos = pos + self.pixelist = pixelist + self.norm = norm + self.flat_correction = flat_correction + self.detx = detx + self.dety = dety + + def get_data(self ): + ''' + To get intested pixels array + Return: 2-D array, shape as (len(images), len(pixellist)) + ''' + norm = self.norm + data_array = np.zeros([ self.length-1,len(self.pixelist)]) + print( data_array.shape) + + #fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( self.detx * self.dety, dtype=np.int32 ) + timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) + n=0 + tx = self.tx + N = len(self.tx) + print( 'The Produced Array Length is %d.'%(N-1) ) + flat_correction = self.flat_correction + #imgsum = np.zeros( N ) + for i in tqdm( range(N-1) ): + ind1 = np.argmin( np.abs( tx[i] - self.hitime ) ) + ind2 = np.argmin( np.abs( tx[i+1] - self.hitime ) ) + #print( 'N=%d:'%i, ind1, ind2 ) + p_i = self.pos[ind1: ind2] + pos,val = np.unique( p_i, return_counts= True ) + #print( val.sum() ) + w = np.where( timg[pos] )[0] + pxlist = timg[ pos[w] ] -1 + #print( val[w].sum() ) + #fra_pix[ pxlist] = v[w] + if flat_correction is not None: + #normalized by flatfield + data_array[n][ pxlist] = val[w] + else: + data_array[n][ pxlist] = val[w] / flat_correction[pxlist] #-1.0 + if norm is not None: + #normalized by total intensity, like a incident beam intensity + data_array[n][ pxlist] /= norm[i] + n += 1 + return data_array + + + +def apply_timepix_mask( x,y,t, roi ): + y1,y2, x1,x2 = roi + w = (x < x2) & (x >= x1) & (y < y2) & (y >= y1) + return x[w],y[w], t[w] + + + + + + +def get_timepixel_data_from_series( data_dir, filename_prefix, + total_filenum = 72, colms = int(1e5) ): + x = np.zeros( total_filenum * colms ) + y = np.zeros( total_filenum * colms ) + t = zeros( total_filenum * colms ) + for n in range( total_filenum): + filename = filename_prefix + '_%s.csv'%n + data = get_timepixel_data( data_dir, filename ) + if n!=total_filenum-1: + ( x[n*colms: (n+1)*colms ], y[n*colms: (n+1)*colms ], t[n*colms: (n+1)*colms ] )= ( + data[0], data[1], data[2]) + else: + #print( filename_prefix + '_%s.csv'%n ) + ln = len(data[0]) + #print( ln ) + ( x[n*colms: n*colms + ln ], y[n*colms: n*colms + ln ], t[n*colms: n*colms + ln ] )= ( + data[0], data[1], data[2]) + + return x[:n*colms + ln] ,y[:n*colms + ln],t[:n*colms + ln] + + + +def get_timepixel_avg_image( x,y,t, det_shape = [256, 256], delta_time = None ): + '''YG.Dev@CHX, 2016 + give x,y, t data to get image in a period of delta_time (in second) + x, pos_x in pixel + y, pos_y in pixel + t, arrival time + + + ''' + t0 = t.min() + tm = t.max() + + if delta_time is not None: + delta_time *=1e12 + if delta_time > tm: + delta_time = tm + else: + delta_time = t.max() + #print( delta_time) + t_ = t[t10: #print progress... + if n %( noframes / 10) ==0: + sys.stdout.write("#") + sys.stdout.flush() + elapsed_time = time.time() - start_time + print ( 'Total time: %.2f min' %(elapsed_time/60.) ) + return g2 + + + def plot(self, y,x=None): + '''a simple plot''' + if x is None:x=arange( len(y)) + plt.plot(x,y,'ro', ls='-') + plt.show() + + + def g2_to_pds(self, dly, g2, tscale = None): + '''convert g2 to a pandas frame''' + if len(g2.shape)==1:g2=g2.reshape( [len(g2),1] ) + tn, qn = g2.shape + tindex=xrange( tn ) + qcolumns = ['t'] + [ 'g2' ] + if tscale is None:tscale = 1.0 + g2t = hstack( [dly[:tn].reshape(tn,1) * tscale, g2 ]) + g2p = pd.DataFrame(data=g2t, index=tindex,columns=qcolumns) + return g2p + + def show(self,g2p,title): + t = g2p.t + N = len( g2p ) + ylim = [g2p.g2.min(),g2p[1:N].g2.max()] + g2p.plot(x=t,y='g2',marker='o',ls='--',logx=T,ylim=ylim); + plt.xlabel('time delay, ns',fontsize=12) + plt.title(title) + plt.savefig( RES_DIR + title +'.png' ) + plt.show() + + + +###################################################### + +if False: + xp=xpcs(); #use the xpcs class + dly = xp.delays() + if T: + fnum = 100 + g2=xp.autocor( fnum ) + filename='g2_-%s-'%(fnum) + save( RES_DIR + FOUT + filename, g2) + ##g2= load(RES_DIR + FOUT + filename +'.npy') + g2p = xp.g2_to_pds(dly,g2, tscale = 20) + xp.show(g2p,'g2_run_%s'%fnum) diff --git a/pyCHX/backups/pyCHX-backup/chx_Fitters2D.py b/pyCHX/backups/pyCHX-backup/chx_Fitters2D.py new file mode 100644 index 0000000..a2f27ab --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_Fitters2D.py @@ -0,0 +1,337 @@ +import numpy as np +from lmfit import Model, Parameters + +""" +This module is for functions specific to fitting of spatial correlation +""" + + +def gauss_func(x, xc, amp, sigma, baseline): + return amp * np.exp(-((x - xc) ** 2) / 2.0 / sigma**2) + baseline + + +def gauss2D_func(x, y, xc, amp, sigmax, yc, sigmay, baseline): + return amp * np.exp(-((x - xc) ** 2) / 2.0 / sigmax**2) * np.exp(-((y - yc) ** 2) / 2.0 / sigmay**2) + baseline + + +def extract_param(bestfits, key): + Nframes = len(bestfits) + params = np.zeros(Nframes) + for i in range(Nframes): + params[i] = bestfits[i][key] + return params + + +def plarrows(xs, ys, dxs, dys, ax, **kwargs): + for i in range(len(xs)): + ax.arrow(xs[i], ys[i], dxs[i], dys[i], **kwargs) + + +class VectorField2DFitter: + """Base class for fitting a 2D vector field. + Must be inherited. + """ + + def __init__(self, params=None): + if params is None: + raise ValueError("Sorry parameters not set, cannot continue") + self.params = params + + self._res = None + + def __call__(self, x, y, vx, vy, **kwargs): + """The call function will fit the 2D vector field [vx, vy] + to the fit function specified in the object. + fig,ax=plt.subplots() + qmag=np.hypot(qxs[w],qys[w]) + ax.scatter(qmag,vecmags[w]) Need to specify components and vectors. + + Parameters + --------- + + x : 2D np.ndarray + x component + + y : 2D np.ndarray + y component + + vx : 2D np.ndarray + the x component of the vector + + vy : 2D np.ndarray + the y component of the vector + + kwargs : the initial guesses. + + Returns + ------- + The best fit + """ + guesskeys = self.guess() + params = self.params.copy() + + # make the parameters from the kwargs + for key in self.params.keys(): + if key in kwargs.keys() and key is not "XY": + params[key].value = kwargs[key] + else: + # then guess + params[key].value = guesskeys[key] + + self.mod = Model(self.fitfunc, independent_vars=["x", "y"], param_names=self.params.keys()) + # assumes first var is dependent var, and save last params + V = np.array([vx, vy]) + self._res = self.mod.fit(V, x=x, y=y, params=params) + self._x = x + self._y = y + self._params = params + + return self._res.best_values + + def last_result(self): + """Return fitted result of the last fit.""" + if self._res is None: + return ValueError("Please run fit first") + return self._res.best_fit + + def last_values(self): + """Return fitted values of the last fit.""" + if self._params is None: + return ValueError("Please run fit first") + return self._params + + def fitfunc(self, *args, **kwargs): + raise NotImplementedError + + def guess(self, *args, **kwargs): + raise NotImplementedError + + +class VectorField2DLinearFitter(VectorField2DFitter): + def __init__(self, params=None): + """Fit a vector field to a linear model: + [vx] = [ gammaxx, gammaxy] . [x] + [vy] [ gammayx, gammayy] [y] + """ + params = Parameters() + params.add("gammaxx", 1) + params.add("gammaxy", 0) + params.add("gammayx", 0) + params.add("gammayy", 1) + + super(VectorField2DLinearFitter, self).__init__(params=params) + + def fitfunc(self, x, y, gammaxx=0, gammaxy=0, gammayx=0, gammayy=0): + """Fit function. Specify the matrix parameters for the fit. + Matrix terms are: + [vx] = [ gammaxx, gammaxy] . [x] + [vy] [ gammayx, gammayy] [y] + """ + mat = np.array([[gammaxx, gammaxy], [gammayx, gammayy]]) + r = np.array([x, y]) + return np.tensordot(mat, r, axes=(1, 0)) + + def guess(self, **kwargs): + """No guess for this one. Just [1,0] + [0,1] + """ + paramsdict = dict(gammaxx=1.0, gammaxy=0.0, gammayx=0.0, gammayy=1.0) + + if kwargs is not None: + for key in kwargs.keys(): + if key in paramsdict and key is not "xy": + paramsdict[key] = kwargs[key] + + return paramsdict + + +class LineShape2DFitter: + """Base class for all lineshape 2D Fitters.""" + + def __init__(self, params=None): + """Initialize. If you set an initial guess + this will be the static used guess function. + If not, this will call the guess routine. + You need to implement the guess routine. + + Parameters + ---------- + + params : Parameters instance + object specifying the default value and bounds of the + parameters + """ + if params is None: + raise ValueError("Sorry parameters not set, cannot continue") + self.params = params + + def __call__(self, XY, img, **kwargs): + """The call function will fit the function + img to the fit function specified in the object. + + Parameters + --------- + + XY : np.ndarray, 3 dimensional + the XY array for the [X,Y] coordinates of img + + kwargs : the initial guesses. + + Returns + ------- + The best fit + """ + params = self.params.copy() + guesskeys = self.guess(img, XY=XY) + + # make the parameters from the kwargs + for key in self.params.keys(): + if key in kwargs.keys() and key is not "XY": + params[key].value = kwargs[key] + else: + # then guess + params[key].value = guesskeys[key] + + self.mod = Model(self.fitfunc, independent_vars=["XY"], param_names=self.params.keys()) + # assumes first var is dependent var + res = self.mod.fit(img.ravel(), XY=(XY[0].ravel(), XY[1].ravel()), params=params, **kwargs) + ## old version, only return values + # add reduced chisq to parameter list + # res.best_values['chisq']=res.redchi + # return res.best_values + ## new version, also return the std + resf = {} + ks = list(res.params.keys()) + for var in ks: + resf[var] = res.params[var].value + resf[var + "_std"] = res.params[var].stderr + resf["chisq"] = res.redchi + return resf + + def fitfunc(self): + raise NotImplementedError + + def guess(self, img): + raise NotImplementedError + + +class Gauss2DFitter(LineShape2DFitter): + """A simple Gaussian 2D fitter.""" + + def __init__(self, **kwargs): + """Initialize a Gaussian 2D Fitter object + + Parameters + --------- + kwargs : default arguments for fit + in particular weights=1/errorbars + """ + params = self.init_parameters() + super(Gauss2DFitter, self).__init__(params=params) + + def init_parameters(self, **kwargs): + params = Parameters() + params.add("baseline", value=0) + # params.add('amp', value=.1,min=0,max=.5) + params.add("amp", value=0.1) # 02, max=.1 )#,min=0,max=2) + + params.add("xc", value=10.0, min=0.0, max=50.0) + params.add("yc", value=10.0, min=0.0, max=50.0) + + params.add("sigmax", value=0.5, min=1e-6, max=50.0) + params.add("sigmay", value=0.5, min=1e-6, max=50.0) + + for key in kwargs.keys(): + if key in params: + params[key].value = kwargs[key] + + return params + + def __call__(self, img, x=None, y=None, **kwargs): + """fit for a Gaussian on the image, where x and y can be + defined. If not defined, then guess""" + if x is None: + x = np.arange(img.shape[1]) + if y is None: + y = np.arange(img.shape[0]) + + # x = np.arange(img.shape[1]) + # y = np.arange(img.shape[0]) + # print(img.shape) + XY = np.array(np.meshgrid(x, y)) + self.XY = XY + self.x = x + self.y = y + # doesn't make sense that the amplitude is negative here + self.params["amp"].min = 0 + return super(Gauss2DFitter, self).__call__(XY, img, **kwargs) + + def fitfunc(self, XY, xc=None, yc=None, amp=1.0, baseline=0.0, sigmax=1.0, sigmay=1.0): + """ + xy : 2 by N by N matrix containing x and y + xy[0] : x + xy[1] : y + + xc, yc is center in (col, row) format, i.e. img[yc,xc] + """ + X = XY[0] + Y = XY[1] + + if xc is None: + xc = X.shape[1] // 2 + + if yc is None: + yc = X.shape[0] // 2 + + return ( + amp * np.exp(-((X - xc) ** 2) / 2.0 / sigmax**2) * np.exp(-((Y - yc) ** 2) / 2.0 / sigmay**2) + + baseline + ) + + def guess(self, img, XY=None, **kwargs): + """Make a guess from the image of the Gaussian parameters. Set + the parameters with kwargs to bypass guessing for those specific + parameters. + + Parameters + ---------- + img : 2d np.ndarray + the image to base the guess on + + XY : 3d np.ndarray + the values for x (cols) and y (rows) + x, y = XY + default is to assume zero based integer numbering + + **kwargs : the keyword arguments to override guess with + + Returns + ------- + paramsdict : dict + dictionary of guesses + """ + # just guess image center + paramsdict = dict() + # yc, xc = np.array(img.shape)//2 + mx1 = np.argmax(img.ravel()) + xc, yc = mx1 % img.shape[1], mx1 // img.shape[0] + + if XY is not None: + xc, yc = int(xc), int(yc) + xc = np.minimum(np.maximum(0, xc), XY[0].shape[1] - 1) + yc = np.minimum(np.maximum(0, yc), XY[0].shape[0] - 1) + xc = XY[0][0, int(xc)] + yc = XY[1][int(yc), 0] + + paramsdict["xc"] = xc + paramsdict["yc"] = yc + paramsdict["amp"] = img[int(yc), int(xc)] + paramsdict["baseline"] = np.average(img) + paramsdict["sigmax"] = 1 # make it one pixel in size + paramsdict["sigmay"] = 1 + # print( paramsdict ) + for key in kwargs.keys(): + if key in paramsdict and key is not "xy": + paramsdict[key] = kwargs[key] + # print( paramsdict ) + return paramsdict diff --git a/pyCHX/backups/pyCHX-backup/chx_compress.py b/pyCHX/backups/pyCHX-backup/chx_compress.py new file mode 100644 index 0000000..57dcb0b --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_compress.py @@ -0,0 +1,1481 @@ +import gc +import os +import pickle as pkl +import shutil +import struct +import sys +from contextlib import closing +from glob import iglob +from multiprocessing import Pool, cpu_count + +import dill +import matplotlib.pyplot as plt + +# imports handler from CHX +# this is where the decision is made whether or not to use dask +# from chxtools.handlers import EigerImages, EigerHandler +from eiger_io.fs_handler import EigerHandler, EigerImages +from tqdm import tqdm + +from pyCHX.chx_generic_functions import ( + copy_data, + create_time_slice, + delete_data, + get_detector, + get_eigerImage_per_file, + get_sid_filenames, + load_data, + reverse_updown, + rot90_clockwise, +) +from pyCHX.chx_libs import RUN_GUI, LogNorm, datetime, db, getpass, np, os, roi, time + + +def run_dill_encoded(what): + fun, args = dill.loads(what) + return fun(*args) + + +def apply_async(pool, fun, args, callback=None): + return pool.apply_async(run_dill_encoded, (dill.dumps((fun, args)),), callback=callback) + + +def map_async(pool, fun, args): + return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),)) + + +def pass_FD(FD, n): + # FD.rdframe(n) + try: + FD.seekimg(n) + except: + pass + return False + + +def go_through_FD(FD): + if not pass_FD(FD, FD.beg): + for i in range(FD.beg, FD.end): + pass_FD(FD, i) + else: + pass + + +def compress_eigerdata( + images, + mask, + md, + filename=None, + force_compress=False, + bad_pixel_threshold=1e15, + bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, + nobytes=2, + bins=1, + bad_frame_list=None, + para_compress=False, + num_sub=100, + dtypes="uid", + reverse=True, + rot90=False, + num_max_para_process=500, + with_pickle=False, + direct_load_data=True, + data_path=None, + images_per_file=100, + copy_rawdata=True, + new_path="/tmp_data/data/", +): + """ + Init 2016, YG@CHX + DEV 2018, June, make images_per_file a dummy, will be determined by get_eigerImage_per_file if direct_load_data + Add copy_rawdata opt. + + """ + + end = len(images) // bins + if filename is None: + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % md["uid"] + if dtypes != "uid": + para_compress = False + else: + if para_compress: + images = "foo" + # para_compress= True + # print( dtypes ) + if direct_load_data: + images_per_file = get_eigerImage_per_file(data_path) + if data_path is None: + sud = get_sid_filenames(db[uid]) + data_path = sud[2][0] + if force_compress: + print("Create a new compress file with filename as :%s." % filename) + if para_compress: + # stop connection to be before forking... (let it reset again); 11/09/2024 this seems to fail with 'registry doesn't have attribute disconnect... -> try making this optional; this might have been a leftover: if compression happens "natuarally" (not as force_compress=True) this disconnect/reconnect is already missing...we definitely had this error before... + try: + db.reg.disconnect() + db.mds.reset_connection() + except: + pass + print("Using a multiprocess to compress the data.") + return para_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + num_sub=num_sub, + dtypes=dtypes, + rot90=rot90, + reverse=reverse, + num_max_para_process=num_max_para_process, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + copy_rawdata=copy_rawdata, + new_path=new_path, + ) + else: + return init_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + ) + else: + if not os.path.exists(filename): + print("Create a new compress file with filename as :%s." % filename) + if para_compress: + print("Using a multiprocess to compress the data.") + return para_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + num_sub=num_sub, + dtypes=dtypes, + reverse=reverse, + rot90=rot90, + num_max_para_process=num_max_para_process, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + copy_rawdata=copy_rawdata, + new_path=new_path + ) + else: + return init_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + ) + else: + print("Using already created compressed file with filename as :%s." % filename) + beg = 0 + return read_compressed_eigerdata( + mask, + filename, + beg, + end, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + bad_frame_list=bad_frame_list, + with_pickle=with_pickle, + direct_load_data=direct_load_data, + data_path=data_path, + images_per_file=images_per_file, + ) + + +def read_compressed_eigerdata( + mask, + filename, + beg, + end, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + bad_frame_list=None, + with_pickle=False, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + Read already compress eiger data + Return + mask + avg_img + imsum + bad_frame_list + + """ + # should use try and except instead of with_pickle in the future! + CAL = False + if not with_pickle: + CAL = True + else: + try: + mask, avg_img, imgsum, bad_frame_list_ = pkl.load(open(filename + ".pkl", "rb")) + except: + CAL = True + if CAL: + FD = Multifile(filename, beg, end) + imgsum = np.zeros(FD.end - FD.beg, dtype=np.float64) + avg_img = np.zeros([FD.md["ncols"], FD.md["nrows"]], dtype=np.float64) + imgsum, bad_frame_list_ = get_each_frame_intensityc( + FD, + sampling=1, + bad_pixel_threshold=bad_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + hot_pixel_threshold=hot_pixel_threshold, + plot_=False, + bad_frame_list=bad_frame_list, + ) + avg_img = get_avg_imgc(FD, beg=None, end=None, sampling=1, plot_=False, bad_frame_list=bad_frame_list_) + FD.FID.close() + + return mask, avg_img, imgsum, bad_frame_list_ + + +def para_compress_eigerdata( + images, + mask, + md, + filename, + num_sub=128, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + dtypes="uid", + reverse=True, + rot90=False, + num_max_para_process=500, + cpu_core_number=0, + with_pickle=True, + direct_load_data=False, + data_path=None, + images_per_file=100, + copy_rawdata=True, + new_path="/tmp_data/data/", +): + + data_path_ = data_path + if dtypes == "uid": + uid = md["uid"] # images + if not direct_load_data: + detector = get_detector(db[uid]) + images_ = load_data(uid, detector, reverse=reverse, rot90=rot90) + else: + # print('Here for images_per_file: %s'%images_per_file) + # images_ = EigerImages( data_path, images_per_file=images_per_file) + # print('here') + if not copy_rawdata: + images_ = EigerImages(data_path, images_per_file, md) + else: + print("Due to a IO problem running on GPFS. The raw data will be copied to /tmp_data/Data.") + print("Copying...") + copy_data(data_path, new_path) + # print(data_path, new_path) + new_master_file = new_path + os.path.basename(data_path) + data_path_ = new_master_file + images_ = EigerImages(new_master_file, images_per_file, md) + # print(md) + if reverse: + images_ = reverse_updown(images_) # Why not np.flipud? + if rot90: + images_ = rot90_clockwise(images_) + + N = len(images_) + + else: + N = len(images) + + if cpu_core_number == 0: + cpu_core_number = cpu_count() + + N = int(np.ceil(N / bins)) + Nf = int(np.ceil(N / num_sub)) + if Nf > cpu_core_number: + print("The process number is larger than %s (current server's core threads)" % cpu_core_number) + num_sub_old = num_sub + num_sub = int(np.ceil(N / cpu_core_number)) + Nf = int(np.ceil(N / num_sub)) + print("The sub compressed file number was changed from %s to %s" % (num_sub_old, num_sub)) + create_compress_header(md, filename + "-header", nobytes, bins, rot90=rot90) + # print( 'done for header here') + # print(data_path_, images_per_file) + results = para_segment_compress_eigerdata( + images=images, + mask=mask, + md=md, + filename=filename, + num_sub=num_sub, + bad_pixel_threshold=bad_pixel_threshold, + hot_pixel_threshold=hot_pixel_threshold, + bad_pixel_low_threshold=bad_pixel_low_threshold, + nobytes=nobytes, + bins=bins, + dtypes=dtypes, + num_max_para_process=num_max_para_process, + reverse=reverse, + rot90=rot90, + direct_load_data=direct_load_data, + data_path=data_path_, + images_per_file=images_per_file, + ) + + res_ = [results[k].get() for k in list(sorted(results.keys()))] + imgsum = np.zeros(N) + bad_frame_list = np.zeros(N, dtype=bool) + good_count = 1 + for i in range(Nf): + mask_, avg_img_, imgsum_, bad_frame_list_ = res_[i] + imgsum[i * num_sub : (i + 1) * num_sub] = imgsum_ + bad_frame_list[i * num_sub : (i + 1) * num_sub] = bad_frame_list_ + if i == 0: + mask = mask_ + avg_img = np.zeros_like(avg_img_) + else: + mask *= mask_ + if not np.sum(np.isnan(avg_img_)): + avg_img += avg_img_ + good_count += 1 + + bad_frame_list = np.where(bad_frame_list)[0] + avg_img /= good_count + + if len(bad_frame_list): + print("Bad frame list are: %s" % bad_frame_list) + else: + print("No bad frames are involved.") + print("Combining the seperated compressed files together...") + combine_compressed(filename, Nf, del_old=True) + del results + del res_ + if with_pickle: + pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) + if copy_rawdata: + delete_data(data_path, new_path) + return mask, avg_img, imgsum, bad_frame_list + + +def combine_compressed(filename, Nf, del_old=True): + old_files = [filename + "-header"] + for i in range(Nf): + old_files.append(filename + "_temp-%i.tmp" % i) + combine_binary_files(filename, old_files, del_old) + + +def combine_binary_files(filename, old_files, del_old=False): + """Combine binary files together""" + fn_ = open(filename, "wb") + for ftemp in old_files: + shutil.copyfileobj(open(ftemp, "rb"), fn_) + if del_old: + os.remove(ftemp) + fn_.close() + + +def para_segment_compress_eigerdata( + images, + mask, + md, + filename, + num_sub=100, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + dtypes="images", + reverse=True, + rot90=False, + num_max_para_process=50, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + parallelly compressed eiger data without header, this function is for parallel compress + """ + if dtypes == "uid": + uid = md["uid"] # images + if not direct_load_data: + detector = get_detector(db[uid]) + images_ = load_data(uid, detector, reverse=reverse, rot90=rot90) + else: + images_ = EigerImages(data_path, images_per_file, md) + if reverse: + images_ = reverse_updown(images_) + if rot90: + images_ = rot90_clockwise(images_) + + N = len(images_) + + else: + N = len(images) + + # N = int( np.ceil( N/ bins ) ) + num_sub *= bins + if N % num_sub: + Nf = N // num_sub + 1 + print("The average image intensity would be slightly not correct, about 1% error.") + print("Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image") + else: + Nf = N // num_sub + print("It will create %i temporary files for parallel compression." % Nf) + + if Nf > num_max_para_process: + N_runs = np.int(np.ceil(Nf / float(num_max_para_process))) + print("The parallel run number: %s is larger than num_max_para_process: %s" % (Nf, num_max_para_process)) + else: + N_runs = 1 + result = {} + # print( mask_filename )# + '*'* 10 + 'here' ) + for nr in range(N_runs): + if (nr + 1) * num_max_para_process > Nf: + inputs = range(num_max_para_process * nr, Nf) + else: + inputs = range(num_max_para_process * nr, num_max_para_process * (nr + 1)) + fns = [filename + "_temp-%i.tmp" % i for i in inputs] + # print( nr, inputs, ) + pool = Pool(processes=len(inputs)) # , maxtasksperchild=1000 ) + # print( inputs ) + for i in inputs: + if i * num_sub <= N: + result[i] = pool.apply_async( + segment_compress_eigerdata, + [ + images, + mask, + md, + filename + "_temp-%i.tmp" % i, + bad_pixel_threshold, + hot_pixel_threshold, + bad_pixel_low_threshold, + nobytes, + bins, + i * num_sub, + (i + 1) * num_sub, + dtypes, + reverse, + rot90, + direct_load_data, + data_path, + images_per_file, + ], + ) + + pool.close() + pool.join() + pool.terminate() + return result + + +def segment_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + N1=None, + N2=None, + dtypes="images", + reverse=True, + rot90=False, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + Create a compressed eiger data without header, this function is for parallel compress + for parallel compress don't pass any non-scalar parameters + """ + if dtypes == "uid": + uid = md["uid"] # images + if not direct_load_data: + detector = get_detector(db[uid]) + images = load_data(uid, detector, reverse=reverse, rot90=rot90)[N1:N2] + else: + images = EigerImages(data_path, images_per_file, md)[N1:N2] + if reverse: + images = reverse_updown(EigerImages(data_path, images_per_file, md))[N1:N2] + if rot90: + images = rot90_clockwise(images) + + Nimg_ = len(images) + M, N = images[0].shape + avg_img = np.zeros([M, N], dtype=np.float64) + Nopix = float(avg_img.size) + n = 0 + good_count = 0 + # frac = 0.0 + if nobytes == 2: + dtype = np.int16 + elif nobytes == 4: + dtype = np.int32 + elif nobytes == 8: + dtype = np.float64 + else: + print("Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype = np.int32 + + # Nimg = Nimg_//bins + Nimg = int(np.ceil(Nimg_ / bins)) + time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bins)) + # print( time_edge, Nimg_, Nimg, bins, N1, N2 ) + imgsum = np.zeros(Nimg) + if bins != 1: + # print('The frames will be binned by %s'%bins) + dtype = np.float64 + + fp = open(filename, "wb") + for n in range(Nimg): + t1, t2 = time_edge[n] + if bins != 1: + img = np.array(np.average(images[t1:t2], axis=0), dtype=dtype) + else: + img = np.array(images[t1], dtype=dtype) + mask &= img < hot_pixel_threshold + p = np.where((np.ravel(img) > 0) * np.ravel(mask))[0] # don't use masked data + v = np.ravel(np.array(img, dtype=dtype))[p] + dlen = len(p) + imgsum[n] = v.sum() + if (dlen == 0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <= bad_pixel_low_threshold): + dlen = 0 + fp.write(struct.pack("@I", dlen)) + else: + np.ravel(avg_img)[p] += v + good_count += 1 + fp.write(struct.pack("@I", dlen)) + fp.write(struct.pack("@{}i".format(dlen), *p)) + if bins == 1: + fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) + else: + fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) # n +=1 + del p, v, img + fp.flush() + fp.close() + avg_img /= good_count + bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) + sys.stdout.write("#") + sys.stdout.flush() + # del images, mask, avg_img, imgsum, bad_frame_list + # print( 'Should release memory here') + return mask, avg_img, imgsum, bad_frame_list + + +def create_compress_header(md, filename, nobytes=4, bins=1, rot90=False): + """ + Create the head for a compressed eiger data, this function is for parallel compress + """ + fp = open(filename, "wb") + # Make Header 1024 bytes + # md = images.md + if bins != 1: + nobytes = 8 + flag = True + # print( list(md.keys()) ) + # print(md) + if "pixel_mask" in list(md.keys()): + sx, sy = md["pixel_mask"].shape[0], md["pixel_mask"].shape[1] + elif "img_shape" in list(md.keys()): + sx, sy = md["img_shape"][0], md["img_shape"][1] + else: + sx, sy = 2167, 2070 # by default for 4M + # print(flag) + klst = [ + "beam_center_x", + "beam_center_y", + "count_time", + "detector_distance", + "frame_time", + "incident_wavelength", + "x_pixel_size", + "y_pixel_size", + ] + vs = [0, 0, 0, 0, 0, 0, 75, 75] + for i, k in enumerate(klst): + if k in list(md.keys()): + vs[i] = md[k] + if flag: + if rot90: + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + vs[0], + vs[1], + vs[2], + vs[3], + vs[4], + vs[5], + vs[6], + vs[7], + nobytes, + sx, + sy, + 0, + sx, + 0, + sy, + ) + + else: + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + vs[0], + vs[1], + vs[2], + vs[3], + vs[4], + vs[5], + vs[6], + vs[7], + # md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], #md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + nobytes, + sy, + sx, + 0, + sy, + 0, + sx, + ) + + fp.write(Header) + fp.close() + + +def init_compress_eigerdata( + images, + mask, + md, + filename, + bad_pixel_threshold=1e15, + hot_pixel_threshold=2**30, + bad_pixel_low_threshold=0, + nobytes=4, + bins=1, + with_pickle=True, + reverse=True, + rot90=False, + direct_load_data=False, + data_path=None, + images_per_file=100, +): + """ + Compress the eiger data + + Create a new mask by remove hot_pixel + Do image average + Do each image sum + Find badframe_list for where image sum above bad_pixel_threshold + Generate a compressed data with filename + + if bins!=1, will bin the images with bin number as bins + + Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ] + + Return + mask + avg_img + imsum + bad_frame_list + + """ + fp = open(filename, "wb") + # Make Header 1024 bytes + # md = images.md + if bins != 1: + nobytes = 8 + if "count_time" not in list(md.keys()): + md["count_time"] = 0 + if "detector_distance" not in list(md.keys()): + md["detector_distance"] = 0 + if "frame_time" not in list(md.keys()): + md["frame_time"] = 0 + if "incident_wavelength" not in list(md.keys()): + md["incident_wavelength"] = 0 + if "y_pixel_size" not in list(md.keys()): + md["y_pixel_size"] = 0 + if "x_pixel_size" not in list(md.keys()): + md["x_pixel_size"] = 0 + if "beam_center_x" not in list(md.keys()): + md["beam_center_x"] = 0 + if "beam_center_y" not in list(md.keys()): + md["beam_center_y"] = 0 + + if not rot90: + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["pixel_mask"].shape[1], + md["pixel_mask"].shape[0], + 0, + md["pixel_mask"].shape[1], + 0, + md["pixel_mask"].shape[0], + ) + else: + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMP0001", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["pixel_mask"].shape[0], + md["pixel_mask"].shape[1], + 0, + md["pixel_mask"].shape[0], + 0, + md["pixel_mask"].shape[1], + ) + + fp.write(Header) + + Nimg_ = len(images) + avg_img = np.zeros_like(images[0], dtype=np.float64) + Nopix = float(avg_img.size) + n = 0 + good_count = 0 + frac = 0.0 + if nobytes == 2: + dtype = np.int16 + elif nobytes == 4: + dtype = np.int32 + elif nobytes == 8: + dtype = np.float64 + else: + print("Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]") + dtype = np.int32 + + Nimg = Nimg_ // bins + time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bins)) + + imgsum = np.zeros(Nimg) + if bins != 1: + print("The frames will be binned by %s" % bins) + + for n in tqdm(range(Nimg)): + t1, t2 = time_edge[n] + img = np.average(images[t1:t2], axis=0) + mask &= img < hot_pixel_threshold + p = np.where((np.ravel(img) > 0) & np.ravel(mask))[0] # don't use masked data + v = np.ravel(np.array(img, dtype=dtype))[p] + dlen = len(p) + imgsum[n] = v.sum() + if (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <= bad_pixel_low_threshold): + # if imgsum[n] >=bad_pixel_threshold : + dlen = 0 + fp.write(struct.pack("@I", dlen)) + else: + np.ravel(avg_img)[p] += v + good_count += 1 + frac += dlen / Nopix + # s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2]) + fp.write(struct.pack("@I", dlen)) + fp.write(struct.pack("@{}i".format(dlen), *p)) + if bins == 1: + if nobytes != 8: + fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) + else: + fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) + else: + fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) + # n +=1 + + fp.close() + frac /= good_count + print("The fraction of pixel occupied by photon is %6.3f%% " % (100 * frac)) + avg_img /= good_count + + bad_frame_list = np.where( + (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) + )[0] + # bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0] + # bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0] + # bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) ) + + if len(bad_frame_list): + print("Bad frame list are: %s" % bad_frame_list) + else: + print("No bad frames are involved.") + if with_pickle: + pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) + return mask, avg_img, imgsum, bad_frame_list + + +""" Description: + + This is code that Mark wrote to open the multifile format + in compressed mode, translated to python. + This seems to work for DALSA, FCCD and EIGER in compressed mode. + It should be included in the respective detector.i files + Currently, this refers to the compression mode being '6' + Each file is image descriptor files chunked together as follows: + Header (1024 bytes) + |--------------IMG N begin--------------| + | Dlen + |---------------------------------------| + | Pixel positions (dlen*4 bytes | + | (0 based indexing in file) | + |---------------------------------------| + | Pixel data(dlen*bytes bytes) | + | (bytes is found in header | + | at position 116) | + |--------------IMG N end----------------| + |--------------IMG N+1 begin------------| + |----------------etc.....---------------| + + + Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', + 'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', + bytes per pixel (either 2 or 4 (Default)), + Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End, + + + +""" + + +class Multifile: + """The class representing the multifile. + The recno is in 1 based numbering scheme (first record is 1) + This is efficient for reading in increasing order. + Note: reading same image twice in a row is like reading an earlier + numbered image and means the program starts for the beginning again. + + """ + + def __init__(self, filename, beg, end, reverse=False): + """Multifile initialization. Open the file. + Here I use the read routine which returns byte objects + (everything is an object in python). I use struct.unpack + to convert the byte object to other data type (int object + etc) + NOTE: At each record n, the file cursor points to record n+1 + """ + self.FID = open(filename, "rb") + # self.FID.seek(0,os.SEEK_SET) + self.filename = filename + # br: bytes read + br = self.FID.read(1024) + self.beg = beg + self.end = end + self.reverse = reverse + ms_keys = [ + "beam_center_x", + "beam_center_y", + "count_time", + "detector_distance", + "frame_time", + "incident_wavelength", + "x_pixel_size", + "y_pixel_size", + "bytes", + "nrows", + "ncols", + "rows_begin", + "rows_end", + "cols_begin", + "cols_end", + ] + + magic = struct.unpack("@16s", br[:16]) + md_temp = struct.unpack("@8d7I916x", br[16:]) + self.md = dict(zip(ms_keys, md_temp)) + + self.imgread = 0 + self.recno = 0 + + if reverse: + nrows = self.md["nrows"] + ncols = self.md["ncols"] + self.md["nrows"] = ncols + self.md["ncols"] = nrows + rbeg = self.md["rows_begin"] + rend = self.md["rows_end"] + cbeg = self.md["cols_begin"] + cend = self.md["cols_end"] + self.md["rows_begin"] = cbeg + self.md["rows_end"] = cend + self.md["cols_begin"] = rbeg + self.md["cols_end"] = rend + + # some initialization stuff + self.byts = self.md["bytes"] + if self.byts == 2: + self.valtype = np.uint16 + elif self.byts == 4: + self.valtype = np.uint32 + elif self.byts == 8: + self.valtype = np.float64 + # now convert pieces of these bytes to our data + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + + # now read first image + # print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts) + + def _readHeader(self): + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + + def _readImageRaw(self): + + p = np.fromfile(self.FID, dtype=np.int32, count=self.dlen) + v = np.fromfile(self.FID, dtype=self.valtype, count=self.dlen) + self.imgread = 1 + return (p, v) + + def _readImage(self): + (p, v) = self._readImageRaw() + img = np.zeros((self.md["ncols"], self.md["nrows"])) + np.put(np.ravel(img), p, v) + return img + + def seekimg(self, n=None): + """Position file to read the nth image. + For now only reads first image ignores n + """ + # the logic involving finding the cursor position + if n is None: + n = self.recno + if n < self.beg or n > self.end: + raise IndexError("Error, record out of range") + # print (n, self.recno, self.FID.tell() ) + if (n == self.recno) and (self.imgread == 0): + pass # do nothing + + else: + if n <= self.recno: # ensure cursor less than search pos + self.FID.seek(1024, os.SEEK_SET) + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + self.recno = 0 + self.imgread = 0 + if n == 0: + return + # have to iterate on seeking since dlen varies + # remember for rec recno, cursor is always at recno+1 + if self.imgread == 0: # move to next header if need to + self.FID.seek(self.dlen * (4 + self.byts), os.SEEK_CUR) + for i in range(self.recno + 1, n): + # the less seeks performed the faster + # print (i) + self.dlen = np.fromfile(self.FID, dtype=np.int32, count=1)[0] + # print 's',self.dlen + self.FID.seek(self.dlen * (4 + self.byts), os.SEEK_CUR) + + # we are now at recno in file, read the header and data + # self._clearImage() + self._readHeader() + self.imgread = 0 + self.recno = n + + def rdframe(self, n): + if self.seekimg(n) != -1: + return self._readImage() + + def rdrawframe(self, n): + if self.seekimg(n) != -1: + return self._readImageRaw() + + +class Multifile_Bins(object): + """ + Bin a compressed file with bins number + See Multifile for details for Multifile_class + """ + + def __init__(self, FD, bins=100): + """ + FD: the handler of a compressed Eiger frames + bins: bins number + """ + + self.FD = FD + if (FD.end - FD.beg) % bins: + print("Please give a better bins number and make the length of FD/bins= integer") + else: + self.bins = bins + self.md = FD.md + # self.beg = FD.beg + self.beg = 0 + Nimg = FD.end - FD.beg + slice_num = Nimg // bins + self.end = slice_num + self.time_edge = np.array(create_time_slice(N=Nimg, slice_num=slice_num, slice_width=bins)) + FD.beg + self.get_bin_frame() + + def get_bin_frame(self): + FD = self.FD + self.frames = np.zeros([FD.md["ncols"], FD.md["nrows"], len(self.time_edge)]) + for n in tqdm(range(len(self.time_edge))): + # print (n) + t1, t2 = self.time_edge[n] + # print( t1, t2) + self.frames[:, :, n] = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False, show_progress=False) + + def rdframe(self, n): + return self.frames[:, :, n] + + def rdrawframe(self, n): + x_ = np.ravel(self.rdframe(n)) + p = np.where(x_)[0] + v = np.array(x_[p]) + return (np.array(p, dtype=np.int32), v) + + +class MultifileBNL: + """ + Re-write multifile from scratch. + """ + + HEADER_SIZE = 1024 + + def __init__(self, filename, mode="rb"): + """ + Prepare a file for reading or writing. + mode : either 'rb' or 'wb' + """ + if mode == "wb": + raise ValueError("Write mode 'wb' not supported yet") + if mode != "rb" and mode != "wb": + raise ValueError("Error, mode must be 'rb' or 'wb'" "got : {}".format(mode)) + self._filename = filename + self._mode = mode + # open the file descriptor + # create a memmap + if mode == "rb": + self._fd = np.memmap(filename, dtype="c") + elif mode == "wb": + self._fd = open(filename, "wb") + # these are only necessary for writing + self.md = self._read_main_header() + self._cols = int(self.md["nrows"]) + self._rows = int(self.md["ncols"]) + # some initialization stuff + self.nbytes = self.md["bytes"] + if self.nbytes == 2: + self.valtype = " self.Nframes: + raise KeyError("Error, only {} frames, asked for {}".format(self.Nframes, n)) + # dlen is 4 bytes + cur = self.frame_indexes[n] + dlen = np.frombuffer(self._fd[cur : cur + 4], dtype=" nbytes + vals = self._fd[cur : cur + dlen * self.nbytes] + vals = np.frombuffer(vals, dtype=self.valtype) + return pos, vals + + def rdframe(self, n): + # read header then image + pos, vals = self._read_raw(n) + img = np.zeros((self._rows * self._cols,)) + img[pos] = vals + return img.reshape((self._rows, self._cols)) + + def rdrawframe(self, n): + # read header then image + return self._read_raw(n) + + +class MultifileBNLCustom(MultifileBNL): + def __init__(self, filename, beg=0, end=None, **kwargs): + super().__init__(filename, **kwargs) + self.beg = beg + if end is None: + end = self.Nframes - 1 + self.end = end + + def rdframe(self, n): + if n > self.end or n < self.beg: + raise IndexError("Index out of range") + # return super().rdframe(n - self.beg) + return super().rdframe(n) + + def rdrawframe(self, n): + # return super().rdrawframe(n - self.beg) + if n > self.end or n < self.beg: + raise IndexError("Index out of range") + return super().rdrawframe(n) + + +def get_avg_imgc( + FD, beg=None, end=None, sampling=100, plot_=False, bad_frame_list=None, show_progress=True, *argv, **kwargs +): + """Get average imagef from a data_series by every sampling number to save time""" + # avg_img = np.average(data_series[:: sampling], axis=0) + + if beg is None: + beg = FD.beg + if end is None: + end = FD.end + + avg_img = FD.rdframe(beg) + n = 1 + flag = True + if show_progress: + # print( sampling-1 + beg , end, sampling ) + if bad_frame_list is None: + bad_frame_list = [] + fra_num = int((end - beg) / sampling) - len(bad_frame_list) + for i in tqdm(range(sampling - 1 + beg, end, sampling), desc="Averaging %s images" % fra_num): + if bad_frame_list is not None: + if i in bad_frame_list: + flag = False + else: + flag = True + # print(i, flag) + if flag: + (p, v) = FD.rdrawframe(i) + if len(p) > 0: + np.ravel(avg_img)[p] += v + n += 1 + else: + for i in range(sampling - 1 + beg, end, sampling): + if bad_frame_list is not None: + if i in bad_frame_list: + flag = False + else: + flag = True + if flag: + (p, v) = FD.rdrawframe(i) + if len(p) > 0: + np.ravel(avg_img)[p] += v + n += 1 + + avg_img /= n + if plot_: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots() + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + im = ax.imshow(avg_img, cmap="viridis", origin="lower", norm=LogNorm(vmin=0.001, vmax=1e2)) + # ax.set_title("Masked Averaged Image") + ax.set_title("uid= %s--Masked-Averaged-Image-" % uid) + fig.colorbar(im) + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-" % uid + ".png" + plt.savefig(fp, dpi=fig.dpi) + # plt.show() + return avg_img + + +def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor=False): + """Compute the mean intensity for each ROI in the compressed file (FD), support parallel computation + + Parameters + ---------- + FD: Multifile class + compressed file + labeled_array : array + labeled array; 0 is background. + Each ROI is represented by a nonzero integer. It is not required that + the ROI labels are contiguous + index : int, list, optional + The ROI's to use. If None, this function will extract averages for all + ROIs + + Returns + ------- + mean_intensity : array + The mean intensity of each ROI for all `images` + Dimensions: + len(mean_intensity) == len(index) + len(mean_intensity[0]) == len(images) + index : list + The labels for each element of the `mean_intensity` list + """ + + qind, pixelist = roi.extract_label_indices(labeled_array) + sx, sy = (FD.rdframe(FD.beg)).shape + if labeled_array.shape != (sx, sy): + raise ValueError( + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" + % (sx, sy, labeled_array.shape[0], labeled_array.shape[1]) + ) + # handle various input for `index` + if index is None: + index = list(np.unique(labeled_array)) + index.remove(0) + else: + try: + len(index) + except TypeError: + index = [index] + + index = np.array(index) + # print ('here') + good_ind = np.zeros(max(qind), dtype=np.int32) + good_ind[index - 1] = np.arange(len(index)) + 1 + w = np.where(good_ind[qind - 1])[0] + qind = good_ind[qind[w] - 1] + pixelist = pixelist[w] + + # pre-allocate an array for performance + # might be able to use list comprehension to make this faster + + mean_intensity = np.zeros([int((FD.end - FD.beg) / sampling), len(index)]) + # fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + # maxqind = max(qind) + norm = np.bincount(qind)[1:] + n = 0 + # for i in tqdm(range( FD.beg , FD.end )): + if not multi_cor: + for i in tqdm(range(FD.beg, FD.end, sampling), desc="Get ROI intensity of each frame"): + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + mean_intensity[n] = np.bincount(qind[pxlist], weights=v[w], minlength=len(index) + 1)[1:] + n += 1 + else: + ring_masks = [np.array(labeled_array == i, dtype=np.int64) for i in np.unique(labeled_array)[1:]] + inputs = range(len(ring_masks)) + go_through_FD(FD) + pool = Pool(processes=len(inputs)) + print("Starting assign the tasks...") + results = {} + for i in tqdm(inputs): + results[i] = apply_async(pool, _get_mean_intensity_one_q, (FD, sampling, ring_masks[i])) + pool.close() + print("Starting running the tasks...") + res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] + # return res + for i in inputs: + mean_intensity[:, i] = res[i] + print("ROI mean_intensit calculation is DONE!") + del results + del res + + mean_intensity /= norm + return mean_intensity, index + + +def _get_mean_intensity_one_q(FD, sampling, labels): + mi = np.zeros(int((FD.end - FD.beg) / sampling)) + n = 0 + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + for i in range(FD.beg, FD.end, sampling): + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + mi[n] = np.bincount(qind[pxlist], weights=v[w], minlength=2)[1:] + n += 1 + return mi + + +def get_each_frame_intensityc( + FD, + sampling=1, + bad_pixel_threshold=1e10, + bad_pixel_low_threshold=0, + hot_pixel_threshold=2**30, + plot_=False, + bad_frame_list=None, + save=False, + *argv, + **kwargs +): + """Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + """ + + # print ( argv, kwargs ) + # mask &= img < hot_pixel_threshold + imgsum = np.zeros(int((FD.end - FD.beg) / sampling)) + n = 0 + for i in tqdm(range(FD.beg, FD.end, sampling), desc="Get each frame intensity"): + (p, v) = FD.rdrawframe(i) + if len(p) > 0: + imgsum[n] = np.sum(v) + n += 1 + + if plot_: + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + fig, ax = plt.subplots() + ax.plot(imgsum, "bo") + ax.set_title("uid= %s--imgsum" % uid) + ax.set_xlabel("Frame_bin_%s" % sampling) + ax.set_ylabel("Total_Intensity") + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + plt.show() + + bad_frame_list_ = ( + np.where((np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold))[0] + + FD.beg + ) + + if bad_frame_list is not None: + bad_frame_list = np.unique(np.concatenate([bad_frame_list, bad_frame_list_])) + else: + bad_frame_list = bad_frame_list_ + + if len(bad_frame_list): + print("Bad frame list length is: %s" % len(bad_frame_list)) + else: + print("No bad frames are involved.") + return imgsum, bad_frame_list diff --git a/pyCHX/backups/pyCHX-backup/chx_compress_analysis.py b/pyCHX/backups/pyCHX-backup/chx_compress_analysis.py new file mode 100644 index 0000000..102ddfa --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_compress_analysis.py @@ -0,0 +1,383 @@ +from __future__ import absolute_import, division, print_function + +import logging +import os +import struct +from collections import namedtuple + +import matplotlib.pyplot as plt +from skbeam.core.roi import extract_label_indices +from skbeam.core.utils import multi_tau_lags +from tqdm import tqdm + +from pyCHX.chx_generic_functions import save_arrays + +# from pyCHX.chx_generic_functions import (get_circular_average) +# from pyCHX.XPCS_SAXS import (get_circular_average) +from pyCHX.chx_libs import ( + RUN_GUI, + Figure, + LogNorm, + colors, + colors_, + datetime, + db, + getpass, + markers, + markers_, + np, + os, + roi, + time, +) + +logger = logging.getLogger(__name__) + +from modest_image import imshow + +from pyCHX.chx_compress import ( + Multifile, + compress_eigerdata, + get_avg_imgc, + get_each_frame_intensityc, + init_compress_eigerdata, + mean_intensityc, + pass_FD, + read_compressed_eigerdata, +) +from pyCHX.chx_generic_functions import find_bad_pixels_FD + +# from pyCHX.chx_compress import * + + +def get_time_edge_avg_img(FD, frame_edge, show_progress=True, apply_threshold=False, threshold=15): + """YG Dev Nov 14, 2017@CHX + Update@2019/6/12 with option of apply a threshold for each frame + Get averaged img by giving FD and frame edges + Parameters + ---------- + FD: Multifile class + compressed file + frame_edge: np.array, can be created by create_time_slice( Nimg, slice_num= 3, + slice_width= 1, edges = None ) + e.g., np.array([[ 5, 6], + [2502, 2503], + [4999, 5000]]) + apply_threshold: if True, will mask out all the pixels with intensity above the threshold + threshold: 15 (for Eiger500K burst mode) + Return: + array: (N of frame_edge, averaged image) , i.e., d[0] gives the first averaged image + """ + + Nt = len(frame_edge) + d = np.zeros(Nt, dtype=object) + if apply_threshold: + avg_imgi = FD.rdframe(FD.beg) + for i in tqdm(range(Nt)): + t1, t2 = frame_edge[i] + if not apply_threshold: + d[i] = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False, show_progress=show_progress) + else: + dti = np.zeros([t2 - t1, avg_imgi.shape[0], avg_imgi.shape[1]]) + j = 0 + for ti in range(t1, t2): + # print( j, ti ) + badpi = find_bad_pixels_FD( + np.arange(ti, ti + 1), FD, img_shape=avg_imgi.shape, threshold=threshold, show_progress=False + ) + badpi = np.array(badpi, dtype=float) + badpi[badpi == 0] = np.nan + dti[j] = FD.rdframe(ti) * badpi + j += 1 + # print(dti.shape) + d[i] = np.nanmean(dti, axis=0) # average_array_withNan( dti, axis=0 ) + return d + + +def plot_imgs(imgs, image_name=None, *argv, **kwargs): + # NOT WORKing NOW.... + N = len(imgs) + sx = np.ceil(np.sqrt(N)) + pass + + +def cal_waterfallc( + FD, labeled_array, qindex=1, bin_waterfall=False, waterfall_roi_size=None, save=False, *argv, **kwargs +): + """Compute the mean intensity for each ROI in the compressed file (FD) + + Parameters + ---------- + FD: Multifile class + compressed file + labeled_array : array + labeled array; 0 is background. + Each ROI is represented by a nonzero integer. It is not required that + the ROI labels are contiguous + qindex : int, qindex=1, give the first ring in SAXS geometry. NOTE: qindex=0 is non-photon pixels. + The ROI's to use. + + bin_waterfall: if True, will bin the waterfall along y-axis + waterfall_roi_size: the size of waterfall roi, (x-size, y-size), if bin, will bin along y + save: save the waterfall + + Returns + ------- + waterfall : array + The mean intensity of each ROI for all `images` + Dimensions: + len(mean_intensity) == len(index) + len(mean_intensity[0]) == len(images) + index : list + The labels for each element of the `mean_intensity` list + """ + sampling = 1 + + labeled_array_ = np.array(labeled_array == qindex, dtype=np.int64) + + qind, pixelist = roi.extract_label_indices(labeled_array_) + + if labeled_array_.shape != (FD.md["ncols"], FD.md["nrows"]): + raise ValueError( + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" + % (FD.md["ncols"], FD.md["nrows"], labeled_array_.shape[0], labeled_array_.shape[1]) + ) + + # pre-allocate an array for performance + # might be able to use list comprehension to make this faster + + watf = np.zeros([int((FD.end - FD.beg) / sampling), len(qind)]) + + # fra_pix = np.zeros_like( pixelist, dtype=np.float64) + + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + + # maxqind = max(qind) + norm = np.bincount(qind)[1:] + n = 0 + # for i in tqdm(range( FD.beg , FD.end )): + for i in tqdm(range(FD.beg, FD.end, sampling), desc="Get waterfall for q index=%s" % qindex): + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + + watf[n][pxlist] = v[w] + n += 1 + + if bin_waterfall: + watf_ = watf.copy() + watf = np.zeros([watf_.shape[0], waterfall_roi_size[0]]) + for i in range(waterfall_roi_size[1]): + watf += watf_[:, waterfall_roi_size[0] * i : waterfall_roi_size[0] * (i + 1)] + watf /= waterfall_roi_size[0] + + if save: + path = kwargs["path"] + uid = kwargs["uid"] + np.save(path + "%s_waterfall" % uid, watf) + + return watf + + +def plot_waterfallc( + wat, + qindex=1, + aspect=None, + vmax=None, + vmin=None, + interpolation="none", + save=False, + return_fig=False, + cmap="viridis", + *argv, + **kwargs +): + """plot waterfall for a giving compressed file + + FD: class object, the compressed file handler + labeled_array: np.array, a ROI mask + qindex: the index number of q, will calculate where( labeled_array == qindex) + aspect: the aspect ratio of the plot + + Return waterfall + Plot the waterfall + + """ + # wat = cal_waterfallc( FD, labeled_array, qindex=qindex) + if RUN_GUI: + fig = Figure(figsize=(8, 6)) + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots(figsize=(8, 6)) + + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fig, ax = plt.subplots(figsize=(8,6)) + ax.set_ylabel("Pixel") + ax.set_xlabel("Frame") + ax.set_title("%s_Waterfall_Plot_@qind=%s" % (uid, qindex)) + if "beg" in kwargs: + beg = kwargs["beg"] + else: + beg = 0 + extent = [beg, len(wat) + beg, 0, len(wat.T)] + if vmax is None: + vmax = wat.max() + if vmin is None: + vmin = wat.min() + if aspect is None: + aspect = wat.shape[0] / wat.shape[1] + im = imshow(ax, wat.T, cmap=cmap, vmax=vmax, extent=extent, interpolation=interpolation) + # im = ax.imshow(wat.T, cmap='viridis', vmax=vmax,extent= extent,interpolation = interpolation ) + fig.colorbar(im) + ax.set_aspect(aspect) + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "%s_waterfall" % uid + ".png" + plt.savefig(fp, dpi=fig.dpi) + + # plt.show() + if return_fig: + return fig, ax, im + + +def get_waterfallc(FD, labeled_array, qindex=1, aspect=1.0, vmax=None, save=False, *argv, **kwargs): + """plot waterfall for a giving compressed file + + FD: class object, the compressed file handler + labeled_array: np.array, a ROI mask + qindex: the index number of q, will calculate where( labeled_array == qindex) + aspect: the aspect ratio of the plot + + Return waterfall + Plot the waterfall + """ + + wat = cal_waterfallc(FD, labeled_array, qindex=qindex) + + fig, ax = plt.subplots(figsize=(8, 6)) + ax.set_ylabel("Pixel") + ax.set_xlabel("Frame") + ax.set_title("Waterfall_Plot_@qind=%s" % qindex) + + im = ax.imshow(wat.T, cmap="viridis", vmax=vmax) + fig.colorbar(im) + ax.set_aspect(aspect) + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--Waterfall-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + # plt.show() + return wat + + +def cal_each_ring_mean_intensityc(FD, ring_mask, sampling=1, timeperframe=None, multi_cor=False, *argv, **kwargs): + """ + get time dependent mean intensity of each ring + """ + + mean_int_sets, index_list = mean_intensityc(FD, ring_mask, sampling, index=None, multi_cor=multi_cor) + if timeperframe is None: + times = np.arange(FD.end - FD.beg) + FD.beg # get the time for each frame + else: + times = (FD.beg + np.arange(FD.end - FD.beg)) * timeperframe + num_rings = len(np.unique(ring_mask)[1:]) + return times, mean_int_sets + + +def plot_each_ring_mean_intensityc(times, mean_int_sets, xlabel="Frame", save=False, *argv, **kwargs): + """ + Plot time dependent mean intensity of each ring + """ + num_rings = mean_int_sets.shape[1] + + fig, ax = plt.subplots(figsize=(8, 8)) + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + ax.set_title("%s--Mean intensity of each ROI" % uid) + for i in range(num_rings): + # print( markers[i], colors[i] ) + ax.plot(times, mean_int_sets[:, i], label="ROI " + str(i + 1), marker=markers[i], color=colors[i], ls="-") + ax.set_xlabel(xlabel) + ax.set_ylabel("Mean Intensity") + ax.legend(loc="best", fontsize="x-small", fancybox=True, framealpha=0.5) + + if save: + path = kwargs["path"] + fp = path + "%s_t_ROIs" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + save_arrays( + np.hstack([times.reshape(len(times), 1), mean_int_sets]), + label=["frame"] + ["ROI_%d" % i for i in range(num_rings)], + filename="%s_t_ROIs" % uid, + path=path, + ) + # plt.show() + + +def get_each_ring_mean_intensityc( + FD, ring_mask, sampling=1, timeperframe=None, plot_=False, save=False, *argv, **kwargs +): + """ + get time dependent mean intensity of each ring + """ + + mean_int_sets, index_list = mean_intensityc(FD, ring_mask, sampling, index=None) + if timeperframe is None: + times = np.arange(FD.end - FD.beg) + FD.beg # get the time for each frame + else: + times = (FD.beg + np.arange(FD.end - FD.beg)) * timeperframe + num_rings = len(np.unique(ring_mask)[1:]) + + if plot_: + fig, ax = plt.subplots(figsize=(8, 8)) + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + + ax.set_title("%s--Mean intensity of each ROI" % uid) + for i in range(num_rings): + ax.plot(times, mean_int_sets[:, i], label="ROI " + str(i + 1), marker="o", ls="-") + if timeperframe is not None: + ax.set_xlabel("Time, sec") + else: + ax.set_xlabel("Frame") + ax.set_ylabel("Mean Intensity") + ax.legend(loc="best", fontsize="x-small") + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + # fp = path + "uid= %s--Mean intensity of each ring-"%uid + CurTime + '.png' + fp = path + "%s_Mean_intensity_of_each_ROI" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + save_arrays( + np.hstack([times.reshape(len(times), 1), mean_int_sets]), + label=["frame"] + ["ROI_%d" % i for i in range(num_rings)], + filename="%s_t_ROIs" % uid, + path=path, + ) + + # plt.show() + + return times, mean_int_sets diff --git a/pyCHX/backups/pyCHX-backup/chx_correlation.py b/pyCHX/backups/pyCHX-backup/chx_correlation.py new file mode 100644 index 0000000..d636ae7 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_correlation.py @@ -0,0 +1,1167 @@ +# ###################################################################### +# Developed at the NSLS-II, Brookhaven National Laboratory # + +# # +# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven # +# National Laboratory. All rights reserved. # +# # +# Redistribution and use in source and binary forms, with or without # +# modification, are permitted provided that the following conditions # +# are met: # +# # +# * Redistributions of source code must retain the above copyright # +# notice, this list of conditions and the following disclaimer. # +# # +# * Redistributions in binary form must reproduce the above copyright # +# notice this list of conditions and the following disclaimer in # +# the documentation and/or other materials provided with the # +# distribution. # +# # +# * Neither the name of the Brookhaven Science Associates, Brookhaven # +# National Laboratory nor the names of its contributors may be used # +# to endorse or promote products derived from this software without # +# specific prior written permission. # +# # +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # +# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # +# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # +# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING # +# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # +# POSSIBILITY OF SUCH DAMAGE. # +######################################################################## + +""" +This module is for functions specific to time correlation +""" +from __future__ import absolute_import, division, print_function + +from collections import namedtuple + +import numpy as np +from scipy.signal import fftconvolve +from skbeam.core.roi import extract_label_indices +from skbeam.core.utils import multi_tau_lags + +# for a convenient status bar +try: + from tqdm import tqdm +except ImportError: + + def tqdm(iterator): + return iterator + + +import logging + +logger = logging.getLogger(__name__) + + +def _one_time_process( + buf, + G, + past_intensity_norm, + future_intensity_norm, + label_array, + num_bufs, + num_pixels, + img_per_level, + level, + buf_no, + norm, + lev_len, +): + """Reference implementation of the inner loop of multi-tau one time + correlation + This helper function calculates G, past_intensity_norm and + future_intensity_norm at each level, symmetric normalization is used. + .. warning :: This modifies inputs in place. + Parameters + ---------- + buf : array + image data array to use for correlation + G : array + matrix of auto-correlation function without normalizations + past_intensity_norm : array + matrix of past intensity normalizations + future_intensity_norm : array + matrix of future intensity normalizations + label_array : array + labeled array where all nonzero values are ROIs + num_bufs : int, even + number of buffers(channels) + num_pixels : array + number of pixels in certain ROI's + ROI's, dimensions are : [number of ROI's]X1 + img_per_level : array + to track how many images processed in each level + level : int + the current multi-tau level + buf_no : int + the current buffer number + norm : dict + to track bad images + lev_len : array + length of each level + Notes + ----- + .. math:: + G = + .. math:: + past_intensity_norm = + .. math:: + future_intensity_norm = + """ + img_per_level[level] += 1 + # in multi-tau correlation, the subsequent levels have half as many + # buffers as the first + i_min = num_bufs // 2 if level else 0 + for i in range(i_min, min(img_per_level[level], num_bufs)): + # compute the index into the autocorrelation matrix + t_index = level * num_bufs // 2 + i + delay_no = (buf_no - i) % num_bufs + + # get the images for correlating + past_img = buf[level, delay_no] + future_img = buf[level, buf_no] + + # find the normalization that can work both for bad_images + # and good_images + ind = int(t_index - lev_len[:level].sum()) + normalize = img_per_level[level] - i - norm[level + 1][ind] + + # take out the past_ing and future_img created using bad images + # (bad images are converted to np.nan array) + if np.isnan(past_img).any() or np.isnan(future_img).any(): + norm[level + 1][ind] += 1 + else: + for w, arr in zip( + [past_img * future_img, past_img, future_img], [G, past_intensity_norm, future_intensity_norm] + ): + binned = np.bincount(label_array, weights=w)[1:] + arr[t_index] += (binned / num_pixels - arr[t_index]) / normalize + return None # modifies arguments in place! + + +results = namedtuple("correlation_results", ["g2", "lag_steps", "internal_state"]) + +_internal_state = namedtuple( + "correlation_state", + [ + "buf", + "G", + "past_intensity", + "future_intensity", + "img_per_level", + "label_array", + "track_level", + "cur", + "pixel_list", + "num_pixels", + "lag_steps", + "norm", + "lev_len", + ], +) + +_two_time_internal_state = namedtuple( + "two_time_correlation_state", + [ + "buf", + "img_per_level", + "label_array", + "track_level", + "cur", + "pixel_list", + "num_pixels", + "lag_steps", + "g2", + "count_level", + "current_img_time", + "time_ind", + "norm", + "lev_len", + ], +) + + +def _init_state_one_time(num_levels, num_bufs, labels): + """Initialize a stateful namedtuple for the generator-based multi-tau + for one time correlation + Parameters + ---------- + num_levels : int + num_bufs : int + labels : array + Two dimensional labeled array that contains ROI information + Returns + ------- + internal_state : namedtuple + The namedtuple that contains all the state information that + `lazy_one_time` requires so that it can be used to pick up + processing after it was interrupted + """ + ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + + # G holds the un normalized auto- correlation result. We + # accumulate computations into G as the algorithm proceeds. + # print(num_rois) + G = np.zeros(((num_levels + 1) * num_bufs // 2, num_rois), dtype=np.float64) + # matrix for normalizing G into g2 + past_intensity = np.zeros_like(G) + # matrix for normalizing G into g2 + future_intensity = np.zeros_like(G) + + return _internal_state( + buf, + G, + past_intensity, + future_intensity, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + norm, + lev_len, + ) + + +def lazy_one_time(image_iterable, num_levels, num_bufs, labels, internal_state=None): + """Generator implementation of 1-time multi-tau correlation + If you do not want multi-tau correlation, set num_levels to 1 and + num_bufs to the number of images you wish to correlate + Parameters + ---------- + image_iterable : iterable of 2D arrays + num_levels : int + how many generations of downsampling to perform, i.e., the depth of + the binomial tree of averaged frames + num_bufs : int, must be even + maximum lag step to compute in each generation of downsampling + labels : array + Labeled array of the same shape as the image stack. + Each ROI is represented by sequential integers starting at one. For + example, if you have four ROIs, they must be labeled 1, 2, 3, + 4. Background is labeled as 0 + internal_state : namedtuple, optional + internal_state is a bucket for all of the internal state of the + generator. It is part of the `results` object that is yielded from + this generator + Yields + ------ + namedtuple + A `results` object is yielded after every image has been processed. + This `reults` object contains, in this order: + - `g2`: the normalized correlation + shape is (len(lag_steps), num_rois) + - `lag_steps`: the times at which the correlation was computed + - `_internal_state`: all of the internal state. Can be passed back in + to `lazy_one_time` as the `internal_state` parameter + Notes + ----- + The normalized intensity-intensity time-autocorrelation function + is defined as + .. math:: + g_2(q, t') = \\frac{ }{^2} + t' > 0 + Here, ``I(q, t)`` refers to the scattering strength at the momentum + transfer vector ``q`` in reciprocal space at time ``t``, and the brackets + ``<...>`` refer to averages over time ``t``. The quantity ``t'`` denotes + the delay time + This implementation is based on published work. [1]_ + References + ---------- + .. [1] D. Lumma, L. B. Lurio, S. G. J. Mochrie and M. Sutton, + "Area detector based photon correlation in the regime of + short data batches: Data reduction for dynamic x-ray + scattering," Rev. Sci. Instrum., vol 71, p 3274-3289, 2000. + """ + + if internal_state is None: + internal_state = _init_state_one_time(num_levels, num_bufs, labels) + # create a shorthand reference to the results and state named tuple + s = internal_state + + # iterate over the images to compute multi-tau correlation + for image in image_iterable: + # Compute the correlations for all higher levels. + level = 0 + + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = np.ravel(image)[s.pixel_list] + buf_no = s.cur[0] - 1 + # Compute the correlations between the first level + # (undownsampled) frames. This modifies G, + # past_intensity, future_intensity, + # and img_per_level in place! + _one_time_process( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) + + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = True + processing = False + else: + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs + + s.buf[level, s.cur[level] - 1] = ( + s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] + ) / 2 + + # make the track_level zero once that level is processed + s.track_level[level] = False + + # call processing_func for each multi-tau level greater + # than one. This is modifying things in place. See comment + # on previous call above. + buf_no = s.cur[level] - 1 + _one_time_process( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) + level += 1 + + # Checking whether there is next level for processing + processing = level < num_levels + + # If any past intensities are zero, then g2 cannot be normalized at + # those levels. This if/else code block is basically preventing + # divide-by-zero errors. + if len(np.where(s.past_intensity == 0)[0]) != 0: + g_max = np.where(s.past_intensity == 0)[0][0] + else: + g_max = s.past_intensity.shape[0] + + g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) + yield results(g2, s.lag_steps[:g_max], s) + + +def multi_tau_auto_corr(num_levels, num_bufs, labels, images): + """Wraps generator implementation of multi-tau + Original code(in Yorick) for multi tau auto correlation + author: Mark Sutton + For parameter description, please reference the docstring for + lazy_one_time. Note that there is an API difference between this function + and `lazy_one_time`. The `images` arugment is at the end of this function + signature here for backwards compatibility, but is the first argument in + the `lazy_one_time()` function. The semantics of the variables remain + unchanged. + """ + gen = lazy_one_time(images, num_levels, num_bufs, labels) + for result in gen: + pass + return result.g2, result.lag_steps + + +def auto_corr_scat_factor(lags, beta, relaxation_rate, baseline=1): + """ + This model will provide normalized intensity-intensity time + correlation data to be minimized. + Parameters + ---------- + lags : array + delay time + beta : float + optical contrast (speckle contrast), a sample-independent + beamline parameter + relaxation_rate : float + relaxation time associated with the samples dynamics. + baseline : float, optional + baseline of one time correlation + equal to one for ergodic samples + Returns + ------- + g2 : array + normalized intensity-intensity time autocorreltion + Notes : + ------- + The intensity-intensity autocorrelation g2 is connected to the intermediate + scattering factor(ISF) g1 + .. math:: + g_2(q, \\tau) = \\beta_1[g_1(q, \\tau)]^{2} + g_\infty + For a system undergoing diffusive dynamics, + .. math:: + g_1(q, \\tau) = e^{-\gamma(q) \\tau} + .. math:: + g_2(q, \\tau) = \\beta_1 e^{-2\gamma(q) \\tau} + g_\infty + These implementation are based on published work. [1]_ + References + ---------- + .. [1] L. Li, P. Kwasniewski, D. Orsi, L. Wiegart, L. Cristofolini, + C. Caronna and A. Fluerasu, " Photon statistics and speckle + visibility spectroscopy with partially coherent X-rays," + J. Synchrotron Rad. vol 21, p 1288-1295, 2014 + """ + return beta * np.exp(-2 * relaxation_rate * lags) + baseline + + +def two_time_corr(labels, images, num_frames, num_bufs, num_levels=1): + """Wraps generator implementation of multi-tau two time correlation + This function computes two-time correlation + Original code : author: Yugang Zhang + Returns + ------- + results : namedtuple + For parameter definition, see the docstring for the `lazy_two_time()` + function in this module + """ + gen = lazy_two_time(labels, images, num_frames, num_bufs, num_levels) + for result in gen: + pass + return two_time_state_to_results(result) + + +def lazy_two_time(labels, images, num_frames, num_bufs, num_levels=1, two_time_internal_state=None): + """Generator implementation of two-time correlation + If you do not want multi-tau correlation, set num_levels to 1 and + num_bufs to the number of images you wish to correlate + Multi-tau correlation uses a scheme to achieve long-time correlations + inexpensively by downsampling the data, iteratively combining successive + frames. + The longest lag time computed is ``num_levels * num_bufs``. + See Also + -------- + comments on `multi_tau_auto_corr` + Parameters + ---------- + labels : array + labeled array of the same shape as the image stack; + each ROI is represented by a distinct label (i.e., integer) + images : iterable of 2D arrays + dimensions are: (rr, cc), iterable of 2D arrays + num_frames : int + number of images to use + default is number of images + num_bufs : int, must be even + maximum lag step to compute in each generation of + downsampling + num_levels : int, optional + how many generations of downsampling to perform, i.e., + the depth of the binomial tree of averaged frames + default is one + Yields + ------ + namedtuple + A ``results`` object is yielded after every image has been processed. + This `reults` object contains, in this order: + - ``g2``: the normalized correlation + shape is (num_rois, len(lag_steps), len(lag_steps)) + - ``lag_steps``: the times at which the correlation was computed + - ``_internal_state``: all of the internal state. Can be passed back in + to ``lazy_one_time`` as the ``internal_state`` parameter + Notes + ----- + The two-time correlation function is defined as + .. math:: + C(q,t_1,t_2) = \\frac{}{} + Here, the ensemble averages are performed over many pixels of detector, + all having the same ``q`` value. The average time or age is equal to + ``(t1+t2)/2``, measured by the distance along the ``t1 = t2`` diagonal. + The time difference ``t = |t1 - t2|``, with is distance from the + ``t1 = t2`` diagonal in the perpendicular direction. + In the equilibrium system, the two-time correlation functions depend only + on the time difference ``t``, and hence the two-time correlation contour + lines are parallel. + [1]_ + References + ---------- + .. [1] A. Fluerasu, A. Moussaid, A. Mandsen and A. Schofield, + "Slow dynamics and aging in collodial gels studied by x-ray + photon correlation spectroscopy," Phys. Rev. E., vol 76, p + 010401(1-4), 2007. + """ + if two_time_internal_state is None: + two_time_internal_state = _init_state_two_time(num_levels, num_bufs, labels, num_frames) + # create a shorthand reference to the results and state named tuple + s = two_time_internal_state + + for img in images: + s.cur[0] = (1 + s.cur[0]) % num_bufs # increment buffer + + s.count_level[0] = 1 + s.count_level[0] + + # get the current image time + s = s._replace(current_img_time=(s.current_img_time + 1)) + + # Put the image into the ring buffer. + s.buf[0, s.cur[0] - 1] = (np.ravel(img))[s.pixel_list] + + # print( np.sum( s.buf[0, s.cur[0] - 1] ) ) + + # Compute the two time correlations between the first level + # (undownsampled) frames. two_time and img_per_level in place! + _two_time_process( + s.buf, + s.g2, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + s.lag_steps, + s.current_img_time, + level=0, + buf_no=s.cur[0] - 1, + ) + + # time frame for each level + s.time_ind[0].append(s.current_img_time) + + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + + # Compute the correlations for all higher levels. + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = 1 + processing = False + else: + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs + s.count_level[level] = 1 + s.count_level[level] + + s.buf[level, s.cur[level] - 1] = ( + s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] + ) / 2 + + t1_idx = (s.count_level[level] - 1) * 2 + + current_img_time = ((s.time_ind[level - 1])[t1_idx] + (s.time_ind[level - 1])[t1_idx + 1]) / 2.0 + + # time frame for each level + s.time_ind[level].append(current_img_time) + + # make the track_level zero once that level is processed + s.track_level[level] = 0 + + # call the _two_time_process function for each multi-tau level + # for multi-tau levels greater than one + # Again, this is modifying things in place. See comment + # on previous call above. + _two_time_process( + s.buf, + s.g2, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + s.lag_steps, + current_img_time, + level=level, + buf_no=s.cur[level] - 1, + ) + level += 1 + + # Checking whether there is next level for processing + processing = level < num_levels + yield s + + +def two_time_state_to_results(state): + """Convert the internal state of the two time generator into usable results + Parameters + ---------- + state : namedtuple + The internal state that is yielded from `lazy_two_time` + Returns + ------- + results : namedtuple + A results object that contains the two time correlation results + and the lag steps + """ + for q in range(np.max(state.label_array)): + x0 = (state.g2)[q, :, :] + (state.g2)[q, :, :] = np.tril(x0) + np.tril(x0).T - np.diag(np.diag(x0)) + return results(state.g2, state.lag_steps, state) + + +def _two_time_process( + buf, g2, label_array, num_bufs, num_pixels, img_per_level, lag_steps, current_img_time, level, buf_no +): + """ + Parameters + ---------- + buf: array + image data array to use for two time correlation + g2: array + two time correlation matrix + shape (number of labels(ROI), number of frames, number of frames) + label_array: array + Elements not inside any ROI are zero; elements inside each + ROI are 1, 2, 3, etc. corresponding to the order they are specified + in edges and segments + num_bufs: int, even + number of buffers(channels) + num_pixels : array + number of pixels in certain ROI's + ROI's, dimensions are len(np.unique(label_array)) + img_per_level: array + to track how many images processed in each level + lag_steps : array + delay or lag steps for the multiple tau analysis + shape num_levels + current_img_time : int + the current image number + level : int + the current multi-tau level + buf_no : int + the current buffer number + """ + img_per_level[level] += 1 + + # in multi-tau correlation other than first level all other levels + # have to do the half of the correlation + if level == 0: + i_min = 0 + else: + i_min = num_bufs // 2 + + for i in range(i_min, min(img_per_level[level], num_bufs)): + t_index = level * num_bufs / 2 + i + delay_no = (buf_no - i) % num_bufs + past_img = buf[level, delay_no] + future_img = buf[level, buf_no] + + # get the matrix of correlation function without normalizations + tmp_binned = np.bincount(label_array, weights=past_img * future_img)[1:] + # get the matrix of past intensity normalizations + pi_binned = np.bincount(label_array, weights=past_img)[1:] + + # get the matrix of future intensity normalizations + fi_binned = np.bincount(label_array, weights=future_img)[1:] + tind1 = current_img_time - 1 + tind2 = current_img_time - lag_steps[t_index] - 1 + + # print( current_img_time ) + + if not isinstance(current_img_time, int): + nshift = 2 ** (level - 1) + for i in range(-nshift + 1, nshift + 1): + g2[:, int(tind1 + i), int(tind2 + i)] = (tmp_binned / (pi_binned * fi_binned)) * num_pixels + else: + g2[:, tind1, tind2] = tmp_binned / (pi_binned * fi_binned) * num_pixels + + +def _init_state_two_time(num_levels, num_bufs, labels, num_frames): + """Initialize a stateful namedtuple for two time correlation + Parameters + ---------- + num_levels : int + num_bufs : int + labels : array + Two dimensional labeled array that contains ROI information + num_frames : int + number of images to use + default is number of images + Returns + ------- + internal_state : namedtuple + The namedtuple that contains all the state information that + `lazy_two_time` requires so that it can be used to pick up processing + after it was interrupted + """ + ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + + # to count images in each level + count_level = np.zeros(num_levels, dtype=np.int64) + + # current image time + current_img_time = 0 + + # generate a time frame for each level + time_ind = {key: [] for key in range(num_levels)} + + # two time correlation results (array) + g2 = np.zeros((num_rois, num_frames, num_frames), dtype=np.float64) + + return _two_time_internal_state( + buf, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + g2, + count_level, + current_img_time, + time_ind, + norm, + lev_len, + ) + + +def _validate_and_transform_inputs(num_bufs, num_levels, labels): + """ + This is a helper function to validate inputs and create initial state + inputs for both one time and two time correlation + Parameters + ---------- + num_bufs : int + num_levels : int + labels : array + labeled array of the same shape as the image stack; + each ROI is represented by a distinct label (i.e., integer) + Returns + ------- + label_array : array + labels of the required region of interests(ROI's) + pixel_list : array + 1D array of indices into the raveled image for all + foreground pixels (labeled nonzero) + e.g., [5, 6, 7, 8, 14, 15, 21, 22] + num_rois : int + number of region of interests (ROI) + num_pixels : array + number of pixels in each ROI + lag_steps : array + the times at which the correlation was computed + buf : array + image data for correlation + img_per_level : array + to track how many images processed in each level + track_level : array + to track processing each level + cur : array + to increment the buffer + norm : dict + to track bad images + lev_len : array + length of each levels + """ + if num_bufs % 2 != 0: + raise ValueError("There must be an even number of `num_bufs`. You " "provided %s" % num_bufs) + label_array, pixel_list = extract_label_indices(labels) + + # map the indices onto a sequential list of integers starting at 1 + label_mapping = {label: n + 1 for n, label in enumerate(np.unique(label_array))} + # remap the label array to go from 1 -> max(_labels) + for label, n in label_mapping.items(): + label_array[label_array == label] = n + + # number of ROI's + num_rois = len(label_mapping) + + # stash the number of pixels in the mask + num_pixels = np.bincount(label_array)[1:] + + # Convert from num_levels, num_bufs to lag frames. + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + + # these norm and lev_len will help to find the one time correlation + # normalization norm will updated when there is a bad image + norm = {key: [0] * len(dict_lag[key]) for key in (dict_lag.keys())} + lev_len = np.array([len(dict_lag[i]) for i in (dict_lag.keys())]) + + # Ring buffer, a buffer with periodic boundary conditions. + # Images must be keep for up to maximum delay in buf. + buf = np.zeros((num_levels, num_bufs, len(pixel_list)), dtype=np.float64) + # to track how many images processed in each level + img_per_level = np.zeros(num_levels, dtype=np.int64) + # to track which levels have already been processed + track_level = np.zeros(num_levels, dtype=bool) + # to increment buffer + cur = np.ones(num_levels, dtype=np.int64) + + return ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) + + +def one_time_from_two_time(two_time_corr): + """ + This will provide the one-time correlation data from two-time + correlation data. + Parameters + ---------- + two_time_corr : array + matrix of two time correlation + shape (number of labels(ROI's), number of frames, number of frames) + Returns + ------- + one_time_corr : array + matrix of one time correlation + shape (number of labels(ROI's), number of frames) + """ + + one_time_corr = np.zeros((two_time_corr.shape[0], two_time_corr.shape[2])) + for g in two_time_corr: + for j in range(two_time_corr.shape[2]): + one_time_corr[:, j] = np.trace(g, offset=j) / two_time_corr.shape[2] + return one_time_corr + + +class CrossCorrelator: + """ + Compute a 1D or 2D cross-correlation on data. + This uses a mask, which may be binary (array of 0's and 1's), + or a list of non-negative integer id's to compute cross-correlations + separately on. + The symmetric averaging scheme introduced here is inspired by a paper + from Schätzel, although the implementation is novel in that it + allows for the usage of arbitrary masks. [1]_ + Examples + -------- + >> ccorr = CrossCorrelator(mask.shape, mask=mask) + >> # correlated image + >> cimg = cc(img1) + or, mask may m + >> cc = CrossCorrelator(ids) + #(where ids is same shape as img1) + >> cc1 = cc(img1) + >> cc12 = cc(img1, img2) + # if img2 shifts right of img1, point of maximum correlation is shifted + # right from correlation center + References + ---------- + .. [1] Schätzel, Klaus, Martin Drewel, and Sven Stimac. “Photon + correlation measurements at large lag times: improving + statistical accuracy.” Journal of Modern Optics 35.4 (1988): + 711-718. + """ + + # TODO : when mask is None, don't compute a mask, submasks + def __init__(self, shape, mask=None, normalization=None, wrap=False): + """ + Prepare the spatial correlator for various regions specified by the + id's in the image. + Parameters + ---------- + shape : 1 or 2-tuple + The shape of the incoming images or curves. May specify 1D or + 2D shapes by inputting a 1 or 2-tuple + mask : 1D or 2D np.ndarray of int, optional + Each non-zero integer represents unique bin. Zero integers are + assumed to be ignored regions. If None, creates a mask with + all points set to 1 + normalization: string or list of strings, optional + These specify the normalization and may be any of the + following: + 'regular' : divide by pixel number + 'symavg' : use symmetric averaging + Defaults to ['regular'] normalization + wrap : bool, optional + If False, assume dimensions don't wrap around. If True + assume they do. The latter is useful for circular + dimensions such as angle. + """ + if normalization is None: + normalization = ["regular"] + elif not isinstance(normalization, list): + normalization = list([normalization]) + + self.wrap = wrap + self.normalization = normalization + + if mask is None: + mask = np.ones(shape) + + # the IDs for the image, called mask + self.mask = mask + # initialize all the masks for the correlation + + # Making a list of arrays holding the masks for each id. Ideally, mask + # is binary so this is one element to quickly index original images + self.pxlsts = list() + self.submasks = list() + # to quickly index the sub images + self.subpxlsts = list() + # the temporary images (double the size for the cross correlation) + self.tmpimgs = list() + self.tmpimgs2 = list() + self.centers = list() + self.shapes = list() # the shapes of each correlation + # the positions of each axes of each correlation + self.positions = list() + + self.ids = np.sort(np.unique(mask)) + # remove the zero since we ignore, but only if it is there (sometimes + # may not be) + if self.ids[0] == 0: + self.ids = self.ids[1:] + + self.nids = len(self.ids) + self.maskcorrs = list() + # regions where the correlations are not zero + self.pxlst_maskcorrs = list() + + # basically saving bunch of mask related stuff like indexing etc, just + # to save some time when actually computing the cross correlations + for idno in self.ids: + masktmp = mask == idno + self.pxlsts.append(np.where(masktmp.ravel() == 1)[0]) + + # this could be replaced by skimage cropping and padding + submasktmp = _crop_from_mask(masktmp) + + if self.wrap is False: + submask = _expand_image(submasktmp) + + tmpimg = np.zeros_like(submask) + + self.submasks.append(submask) + self.subpxlsts.append(np.where(submask.ravel() == 1)[0]) + self.tmpimgs.append(tmpimg) + # make sure it's a copy and not a ref + self.tmpimgs2.append(tmpimg.copy()) + maskcorr = _cross_corr(submask) + # quick fix for finite numbers should be integer so + # choose some small value to threshold + maskcorr *= maskcorr > 0.5 + self.maskcorrs.append(maskcorr) + self.pxlst_maskcorrs.append(maskcorr > 0) + # centers are shape//2 as performed by fftshift + center = np.array(maskcorr.shape) // 2 + self.centers.append(np.array(maskcorr.shape) // 2) + self.shapes.append(np.array(maskcorr.shape)) + if mask.ndim == 1: + self.positions.append(np.arange(maskcorr.shape[0]) - center[0]) + elif mask.ndim == 2: + self.positions.append( + [np.arange(maskcorr.shape[0]) - center[0], np.arange(maskcorr.shape[1]) - center[1]] + ) + + if len(self.ids) == 1: + self.positions = self.positions[0] + self.centers = self.centers[0] + self.shapes = self.shapes[0] + + def __call__(self, img1, img2=None, normalization=None): + """Run the cross correlation on an image/curve or against two + images/curves + Parameters + ---------- + img1 : 1D or 2D np.ndarray + The image (or curve) to run the cross correlation on + img2 : 1D or 2D np.ndarray + If not set to None, run cross correlation of this image (or + curve) against img1. Default is None. + normalization : string or list of strings + normalization types. If not set, use internally saved + normalization parameters + Returns + ------- + ccorrs : 1d or 2d np.ndarray + An image of the correlation. The zero correlation is + located at shape//2 where shape is the 1 or 2-tuple + shape of the array + """ + if normalization is None: + normalization = self.normalization + + if img2 is None: + self_correlation = True + img2 = img1 + else: + self_correlation = False + + ccorrs = list() + rngiter = tqdm(range(self.nids)) + + for i in rngiter: + self.tmpimgs[i] *= 0 + self.tmpimgs[i].ravel()[self.subpxlsts[i]] = img1.ravel()[self.pxlsts[i]] + if not self_correlation: + self.tmpimgs2[i] *= 0 + self.tmpimgs2[i].ravel()[self.subpxlsts[i]] = img2.ravel()[self.pxlsts[i]] + + # multiply by maskcorrs > 0 to ignore invalid regions + if self_correlation: + ccorr = _cross_corr(self.tmpimgs[i]) * (self.maskcorrs[i] > 0) + else: + ccorr = _cross_corr(self.tmpimgs[i], self.tmpimgs2[i]) * (self.maskcorrs[i] > 0) + + # now handle the normalizations + if "symavg" in normalization: + # do symmetric averaging + Icorr = _cross_corr(self.tmpimgs[i] * self.submasks[i], self.submasks[i]) + if self_correlation: + Icorr2 = _cross_corr(self.submasks[i], self.tmpimgs[i] * self.submasks[i]) + else: + Icorr2 = _cross_corr(self.submasks[i], self.tmpimgs2[i] * self.submasks[i]) + # there is an extra condition that Icorr*Icorr2 != 0 + w = np.where(np.abs(Icorr * Icorr2) > 0) + ccorr[w] *= self.maskcorrs[i][w] / Icorr[w] / Icorr2[w] + + if "regular" in normalization: + # only run on overlapping regions for correlation + w = self.pxlst_maskcorrs[i] + ccorr[w] /= self.maskcorrs[i][w] * np.average(self.tmpimgs[i].ravel()[self.subpxlsts[i]]) ** 2 + + ccorrs.append(ccorr) + + if len(ccorrs) == 1: + ccorrs = ccorrs[0] + + return ccorrs + + +def _cross_corr(img1, img2=None): + """Compute the cross correlation of one (or two) images. + Parameters + ---------- + img1 : np.ndarray + the image or curve to cross correlate + img2 : 1d or 2d np.ndarray, optional + If set, cross correlate img1 against img2. A shift of img2 + to the right of img1 will lead to a shift of the point of + highest correlation to the right. + Default is set to None + """ + ndim = img1.ndim + + if img2 is None: + img2 = img1 + + if img1.shape != img2.shape: + errorstr = "Image shapes don't match. " + errorstr += "(img1 : {},{}; img2 : {},{})".format(*img1.shape, *img2.shape) + raise ValueError(errorstr) + + # need to reverse indices for second image + # fftconvolve(A,B) = FFT^(-1)(FFT(A)*FFT(B)) + # but need FFT^(-1)(FFT(A(x))*conj(FFT(B(x)))) = FFT^(-1)(A(x)*B(-x)) + reverse_index = [slice(None, None, -1) for i in range(ndim)] + imgc = fftconvolve(img1, img2[reverse_index], mode="same") + + return imgc + + +def _crop_from_mask(mask): + """ + Crop an image from a given mask + Parameters + ---------- + mask : 1d or 2d np.ndarray + The data to be cropped. This consists of integers >=0. + Regions with 0 are masked and regions > 1 are kept. + Returns + ------- + mask : 1d or 2d np.ndarray + The cropped image. This image is cropped as much as possible + without losing unmasked data. + """ + dims = mask.shape + pxlst = np.where(mask.ravel() != 0)[0] + # this is the assumed width along the fastest-varying dimension + if len(dims) > 1: + imgwidth = dims[1] + else: + imgwidth = 1 + # A[row,col] where row is y and col is x + # (matrix notation) + pixely = pxlst % imgwidth + pixelx = pxlst // imgwidth + + minpixelx = np.min(pixelx) + minpixely = np.min(pixely) + maxpixelx = np.max(pixelx) + maxpixely = np.max(pixely) + + oldimg = np.zeros(dims) + oldimg.ravel()[pxlst] = 1 + + if len(dims) > 1: + mask = np.copy(oldimg[minpixelx : maxpixelx + 1, minpixely : maxpixely + 1]) + else: + mask = np.copy(oldimg[minpixelx : maxpixelx + 1]) + + return mask + + +def _expand_image(img): + """Convenience routine to make an image with twice the size, plus one. + Parameters + ---------- + img : 1d or 2d np.ndarray + The image (or curve) to expand + Returns + ------- + img : 1d or 2d np.ndarray + The expanded image + """ + imgold = img + dims = imgold.shape + if len(dims) > 1: + img = np.zeros((dims[0] * 2 + 1, dims[1] * 2 + 1)) + img[: dims[0], : dims[1]] = imgold + else: + img = np.zeros((dims[0] * 2 + 1)) + img[: dims[0]] = imgold + + return img diff --git a/pyCHX/backups/pyCHX-backup/chx_correlationc.py b/pyCHX/backups/pyCHX-backup/chx_correlationc.py new file mode 100644 index 0000000..02bc754 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_correlationc.py @@ -0,0 +1,1873 @@ +""" +June 10, Developed by Y.G.@CHX with the assistance of Mark Sutton +yuzhang@bnl.gov +This module is for computation of time correlation by using compressing algorithm +""" + +from __future__ import absolute_import, division, print_function + +import logging +from collections import namedtuple + +import numpy as np +import skbeam.core.roi as roi +from skbeam.core.roi import extract_label_indices +from skbeam.core.utils import multi_tau_lags + +logger = logging.getLogger(__name__) +from tqdm import tqdm + + +def _one_time_process( + buf, + G, + past_intensity_norm, + future_intensity_norm, + label_array, + num_bufs, + num_pixels, + img_per_level, + level, + buf_no, + norm, + lev_len, +): + """Reference implementation of the inner loop of multi-tau one time + correlation + This helper function calculates G, past_intensity_norm and + future_intensity_norm at each level, symmetric normalization is used. + .. warning :: This modifies inputs in place. + Parameters + ---------- + buf : array + image data array to use for correlation + G : array + matrix of auto-correlation function without normalizations + past_intensity_norm : array + matrix of past intensity normalizations + future_intensity_norm : array + matrix of future intensity normalizations + label_array : array + labeled array where all nonzero values are ROIs + num_bufs : int, even + number of buffers(channels) + num_pixels : array + number of pixels in certain ROI's + ROI's, dimensions are : [number of ROI's]X1 + img_per_level : array + to track how many images processed in each level + level : int + the current multi-tau level + buf_no : int + the current buffer number + norm : dict + to track bad images + lev_len : array + length of each level + Notes + ----- + .. math:: + G = + .. math:: + past_intensity_norm = + .. math:: + future_intensity_norm = + """ + img_per_level[level] += 1 + # in multi-tau correlation, the subsequent levels have half as many + # buffers as the first + i_min = num_bufs // 2 if level else 0 + # maxqind=G.shape[1] + for i in range(i_min, min(img_per_level[level], num_bufs)): + # compute the index into the autocorrelation matrix + t_index = int(level * num_bufs / 2 + i) + delay_no = (buf_no - i) % num_bufs + # get the images for correlating + past_img = buf[level, delay_no] + future_img = buf[level, buf_no] + # find the normalization that can work both for bad_images + # and good_images + ind = int(t_index - lev_len[:level].sum()) + normalize = img_per_level[level] - i - norm[level + 1][ind] + # take out the past_ing and future_img created using bad images + # (bad images are converted to np.nan array) + if np.isnan(past_img).any() or np.isnan(future_img).any(): + norm[level + 1][ind] += 1 + else: + for w, arr in zip( + [past_img * future_img, past_img, future_img], [G, past_intensity_norm, future_intensity_norm] + ): + binned = np.bincount(label_array, weights=w)[1:] + # nonz = np.where(w)[0] + # binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + arr[t_index] += (binned / num_pixels - arr[t_index]) / normalize + return None # modifies arguments in place! + + +def _one_time_process_error( + buf, + G, + past_intensity_norm, + future_intensity_norm, + label_array, + num_bufs, + num_pixels, + img_per_level, + level, + buf_no, + norm, + lev_len, + G_err, + past_intensity_norm_err, + future_intensity_norm_err, +): + """Reference implementation of the inner loop of multi-tau one time + correlation with the calculation of errorbar (statistical error due to multipixel measurements ) + The statistical error: var( g2(Q) ) = sum( [g2(Qi)- g2(Q)]^2 )/N(N-1), Lumma, RSI, 2000 + This helper function calculates G, past_intensity_norm and + future_intensity_norm at each level, symmetric normalization is used. + .. warning :: This modifies inputs in place. + Parameters + ---------- + buf : array + image data array to use for correlation + G : array + matrix of auto-correlation function without normalizations + past_intensity_norm : array + matrix of past intensity normalizations + future_intensity_norm : array + matrix of future intensity normalizations + label_array : array + labeled array where all nonzero values are ROIs + num_bufs : int, even + number of buffers(channels) + num_pixels : array + number of pixels in certain ROI's + ROI's, dimensions are : [number of ROI's]X1 + img_per_level : array + to track how many images processed in each level + level : int + the current multi-tau level + buf_no : int + the current buffer number + norm : dict + to track bad images + lev_len : array + length of each level + Notes + ----- + .. math:: + G = + .. math:: + past_intensity_norm = + .. math:: + future_intensity_norm = + """ + img_per_level[level] += 1 + # in multi-tau correlation, the subsequent levels have half as many + # buffers as the first + i_min = num_bufs // 2 if level else 0 + # maxqind=G.shape[1] + for i in range(i_min, min(img_per_level[level], num_bufs)): + # compute the index into the autocorrelation matrix + t_index = int(level * num_bufs / 2 + i) + delay_no = (buf_no - i) % num_bufs + # get the images for correlating + past_img = buf[level, delay_no] + future_img = buf[level, buf_no] + # find the normalization that can work both for bad_images + # and good_images + ind = int(t_index - lev_len[:level].sum()) + normalize = img_per_level[level] - i - norm[level + 1][ind] + # take out the past_ing and future_img created using bad images + # (bad images are converted to np.nan array) + if np.isnan(past_img).any() or np.isnan(future_img).any(): + norm[level + 1][ind] += 1 + else: + + # for w, arr in zip([past_img*future_img, past_img, future_img], + # [G, past_intensity_norm, future_intensity_norm, + # ]): + # binned = np.bincount(label_array, weights=w)[1:] + # #nonz = np.where(w)[0] + # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + # arr[t_index] += ((binned / num_pixels - + # arr[t_index]) / normalize) + for w, arr in zip( + [past_img * future_img, past_img, future_img], + [ + G_err, + past_intensity_norm_err, + future_intensity_norm_err, + ], + ): + arr[t_index] += (w - arr[t_index]) / normalize + return None # modifies arguments in place! + + +results = namedtuple("correlation_results", ["g2", "lag_steps", "internal_state"]) + +_internal_state = namedtuple( + "correlation_state", + [ + "buf", + "G", + "past_intensity", + "future_intensity", + "img_per_level", + "label_array", + "track_level", + "cur", + "pixel_list", + "num_pixels", + "lag_steps", + "norm", + "lev_len", + ], +) + +_internal_state_err = namedtuple( + "correlation_state", + [ + "buf", + "G", + "past_intensity", + "future_intensity", + "img_per_level", + "label_array", + "track_level", + "cur", + "pixel_list", + "num_pixels", + "lag_steps", + "norm", + "lev_len", + "G_all", + "past_intensity_all", + "future_intensity_all", + ], +) + + +_two_time_internal_state = namedtuple( + "two_time_correlation_state", + [ + "buf", + "img_per_level", + "label_array", + "track_level", + "cur", + "pixel_list", + "num_pixels", + "lag_steps", + "g2", + "count_level", + "current_img_time", + "time_ind", + "norm", + "lev_len", + ], +) + + +def _validate_and_transform_inputs(num_bufs, num_levels, labels): + """ + This is a helper function to validate inputs and create initial state + inputs for both one time and two time correlation + Parameters + ---------- + num_bufs : int + num_levels : int + labels : array + labeled array of the same shape as the image stack; + each ROI is represented by a distinct label (i.e., integer) + Returns + ------- + label_array : array + labels of the required region of interests(ROI's) + pixel_list : array + 1D array of indices into the raveled image for all + foreground pixels (labeled nonzero) + e.g., [5, 6, 7, 8, 14, 15, 21, 22] + num_rois : int + number of region of interests (ROI) + num_pixels : array + number of pixels in each ROI + lag_steps : array + the times at which the correlation was computed + buf : array + image data for correlation + img_per_level : array + to track how many images processed in each level + track_level : array + to track processing each level + cur : array + to increment the buffer + norm : dict + to track bad images + lev_len : array + length of each levels + """ + if num_bufs % 2 != 0: + raise ValueError("There must be an even number of `num_bufs`. You " "provided %s" % num_bufs) + label_array, pixel_list = extract_label_indices(labels) + + # map the indices onto a sequential list of integers starting at 1 + label_mapping = {label: n + 1 for n, label in enumerate(np.unique(label_array))} + # remap the label array to go from 1 -> max(_labels) + for label, n in label_mapping.items(): + label_array[label_array == label] = n + + # number of ROI's + num_rois = len(label_mapping) + + # stash the number of pixels in the mask + num_pixels = np.bincount(label_array)[1:] + + # Convert from num_levels, num_bufs to lag frames. + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + + # these norm and lev_len will help to find the one time correlation + # normalization norm will updated when there is a bad image + norm = {key: [0] * len(dict_lag[key]) for key in (dict_lag.keys())} + lev_len = np.array([len(dict_lag[i]) for i in (dict_lag.keys())]) + + # Ring buffer, a buffer with periodic boundary conditions. + # Images must be keep for up to maximum delay in buf. + buf = np.zeros((num_levels, num_bufs, len(pixel_list)), dtype=np.float64) + # to track how many images processed in each level + img_per_level = np.zeros(num_levels, dtype=np.int64) + # to track which levels have already been processed + track_level = np.zeros(num_levels, dtype=bool) + # to increment buffer + cur = np.ones(num_levels, dtype=np.int64) + + return ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) + + +def _init_state_one_time(num_levels, num_bufs, labels, cal_error=False): + """Initialize a stateful namedtuple for the generator-based multi-tau + for one time correlation + Parameters + ---------- + num_levels : int + num_bufs : int + labels : array + Two dimensional labeled array that contains ROI information + Returns + ------- + internal_state : namedtuple + The namedtuple that contains all the state information that + `lazy_one_time` requires so that it can be used to pick up + processing after it was interrupted + """ + ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + + # G holds the un normalized auto- correlation result. We + # accumulate computations into G as the algorithm proceeds. + + G = np.zeros((int((num_levels + 1) * num_bufs / 2), num_rois), dtype=np.float64) + + # matrix for normalizing G into g2 + past_intensity = np.zeros_like(G) + # matrix for normalizing G into g2 + future_intensity = np.zeros_like(G) + if cal_error: + G_all = np.zeros((int((num_levels + 1) * num_bufs / 2), len(pixel_list)), dtype=np.float64) + + # matrix for normalizing G into g2 + past_intensity_all = np.zeros_like(G_all) + # matrix for normalizing G into g2 + future_intensity_all = np.zeros_like(G_all) + return _internal_state_err( + buf, + G, + past_intensity, + future_intensity, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + norm, + lev_len, + G_all, + past_intensity_all, + future_intensity_all, + ) + else: + return _internal_state( + buf, + G, + past_intensity, + future_intensity, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + norm, + lev_len, + ) + + +def fill_pixel(p, v, pixelist): + fra_pix = np.zeros_like(pixelist) + fra_pix[np.in1d(pixelist, p)] = v[np.in1d(p, pixelist)] + return fra_pix + + +def lazy_one_time( + FD, + num_levels, + num_bufs, + labels, + internal_state=None, + bad_frame_list=None, + imgsum=None, + norm=None, + cal_error=False, +): + """Generator implementation of 1-time multi-tau correlation + If you do not want multi-tau correlation, set num_levels to 1 and + num_bufs to the number of images you wish to correlate + The number of bins (of size 1) is one larger than the largest value in + `x`. If `minlength` is specified, there will be at least this number + of bins in the output array (though it will be longer if necessary, + depending on the contents of `x`). + Each bin gives the number of occurrences of its index value in `x`. + If `weights` is specified the input array is weighted by it, i.e. if a + value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead + of ``out[n] += 1``. + + Jan 2, 2018 YG. Add error bar calculation + + Parameters + ---------- + image_iterable : FD, a compressed eiger file by Multifile class + num_levels : int + how many generations of downsampling to perform, i.e., the depth of + the binomial tree of averaged frames + num_bufs : int, must be even + maximum lag step to compute in each generation of downsampling + labels : array + Labeled array of the same shape as the image stack. + Each ROI is represented by sequential integers starting at one. For + example, if you have four ROIs, they must be labeled 1, 2, 3, + 4. Background is labeled as 0 + internal_state : namedtuple, optional + internal_state is a bucket for all of the internal state of the + generator. It is part of the `results` object that is yielded from + this generator + + For the sake of normalization: + + imgsum: a list with the same length as FD, sum of each frame + qp, iq: the circular average radius (in pixel) and intensity + center: beam center + + Yields + ------ + + Returns + ------- + + A `results` object is yielded after every image has been processed. + This `reults` object contains, in this order: + - `g2`: the normalized correlation + shape is (len(lag_steps), num_rois) + - `lag_steps`: the times at which the correlation was computed + - `_internal_state`: all of the internal state. Can be passed back in + to `lazy_one_time` as the `internal_state` parameter + Notes + ----- + The normalized intensity-intensity time-autocorrelation function + is defined as + .. math:: + g_2(q, t') = \\frac{ }{^2} + t' > 0 + Here, ``I(q, t)`` refers to the scattering strength at the momentum + transfer vector ``q`` in reciprocal space at time ``t``, and the brackets + ``<...>`` refer to averages over time ``t``. The quantity ``t'`` denotes + the delay time + This implementation is based on published work. [1]_ + References + ---------- + .. [1] D. Lumma, L. B. Lurio, S. G. J. Mochrie and M. Sutton, + "Area detector based photon correlation in the regime of + short data batches: Data reduction for dynamic x-ray + scattering," Rev. Sci. Instrum., vol 71, p 3274-3289, 2000. + """ + + if internal_state is None: + internal_state = _init_state_one_time(num_levels, num_bufs, labels, cal_error) + # create a shorthand reference to the results and state named tuple + s = internal_state + + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + + if bad_frame_list is None: + bad_frame_list = [] + for i in tqdm(range(FD.beg, FD.end)): + if i in bad_frame_list: + fra_pix[:] = np.nan + else: + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + + if imgsum is None: + if norm is None: + fra_pix[pxlist] = v[w] + else: + S = norm.shape + if len(S) > 1: + fra_pix[pxlist] = v[w] / norm[i, pxlist] # -1.0 + else: + fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 + else: + if norm is None: + fra_pix[pxlist] = v[w] / imgsum[i] + else: + S = norm.shape + if len(S) > 1: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[i, pxlist] + else: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:] = 0 + + # print( i, len(p), len(w), len( pixelist)) + + # print ('i= %s init fra_pix'%i ) + buf_no = s.cur[0] - 1 + # Compute the correlations between the first level + # (undownsampled) frames. This modifies G, + # past_intensity, future_intensity, + # and img_per_level in place! + if cal_error: + _one_time_process_error( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + s.G_all, + s.past_intensity_all, + s.future_intensity_all, + ) + else: + _one_time_process( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) + + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = True + processing = False + else: + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs + + s.buf[level, s.cur[level] - 1] = ( + s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] + ) / 2 + + # make the track_level zero once that level is processed + s.track_level[level] = False + + # call processing_func for each multi-tau level greater + # than one. This is modifying things in place. See comment + # on previous call above. + buf_no = s.cur[level] - 1 + if cal_error: + _one_time_process_error( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + s.G_all, + s.past_intensity_all, + s.future_intensity_all, + ) + else: + _one_time_process( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) + + level += 1 + + # Checking whether there is next level for processing + processing = level < num_levels + + # If any past intensities are zero, then g2 cannot be normalized at + # those levels. This if/else code block is basically preventing + # divide-by-zero errors. + if not cal_error: + if len(np.where(s.past_intensity == 0)[0]) != 0: + g_max1 = np.where(s.past_intensity == 0)[0][0] + else: + g_max1 = s.past_intensity.shape[0] + if len(np.where(s.future_intensity == 0)[0]) != 0: + g_max2 = np.where(s.future_intensity == 0)[0][0] + else: + g_max2 = s.future_intensity.shape[0] + g_max = min(g_max1, g_max2) + g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) + yield results(g2, s.lag_steps[:g_max], s) + else: + yield results(None, s.lag_steps, s) + + +def lazy_one_time_debug( + FD, + num_levels, + num_bufs, + labels, + internal_state=None, + bad_frame_list=None, + imgsum=None, + norm=None, + cal_error=False, +): + if internal_state is None: + internal_state = _init_state_one_time(num_levels, num_bufs, labels, cal_error) + # create a shorthand reference to the results and state named tuple + s = internal_state + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + if bad_frame_list is None: + bad_frame_list = [] + for i in range(FD.beg, FD.end): + print(i) + if i in bad_frame_list: + fra_pix[:] = np.nan + else: + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + if imgsum is None: + if norm is None: + fra_pix[pxlist] = v[w] + else: + fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 + else: + if norm is None: + fra_pix[pxlist] = v[w] / imgsum[i] + else: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:] = 0 + # print( i, len(p), len(w), len( pixelist)) + # print ('i= %s init fra_pix'%i ) + buf_no = s.cur[0] - 1 + # Compute the correlations between the first level + # (undownsampled) frames. This modifies G, + # past_intensity, future_intensity, + # and img_per_level in place! + if cal_error: + _one_time_process_error( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + s.G_all, + s.past_intensity_all, + s.future_intensity_all, + ) + else: + _one_time_process( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) + + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = True + processing = False + else: + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs + + s.buf[level, s.cur[level] - 1] = ( + s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] + ) / 2 + # make the track_level zero once that level is processed + s.track_level[level] = False + # call processing_func for each multi-tau level greater + # than one. This is modifying things in place. See comment + # on previous call above. + buf_no = s.cur[level] - 1 + if cal_error: + _one_time_process_error( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + s.G_all, + s.past_intensity_all, + s.future_intensity_all, + ) + else: + _one_time_process( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) + + level += 1 + # Checking whether there is next level for processing + processing = level < num_levels + # If any past intensities are zero, then g2 cannot be normalized at + # those levels. This if/else code block is basically preventing + # divide-by-zero errors. + if not cal_error: + if len(np.where(s.past_intensity == 0)[0]) != 0: + g_max1 = np.where(s.past_intensity == 0)[0][0] + else: + g_max1 = s.past_intensity.shape[0] + if len(np.where(s.future_intensity == 0)[0]) != 0: + g_max2 = np.where(s.future_intensity == 0)[0][0] + else: + g_max2 = s.future_intensity.shape[0] + g_max = min(g_max1, g_max2) + g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) + yield results(g2, s.lag_steps[:g_max], s) + # yield( i ) + + else: + yield results(None, s.lag_steps, s) + + +def auto_corr_scat_factor(lags, beta, relaxation_rate, baseline=1): + """ + This model will provide normalized intensity-intensity time + correlation data to be minimized. + Parameters + ---------- + lags : array + delay time + beta : float + optical contrast (speckle contrast), a sample-independent + beamline parameter + relaxation_rate : float + relaxation time associated with the samples dynamics. + baseline : float, optional + baseline of one time correlation + equal to one for ergodic samples + Returns + ------- + g2 : array + normalized intensity-intensity time autocorreltion + Notes : + ------- + The intensity-intensity autocorrelation g2 is connected to the intermediate + scattering factor(ISF) g1 + .. math:: + g_2(q, \\tau) = \\beta_1[g_1(q, \\tau)]^{2} + g_\infty + For a system undergoing diffusive dynamics, + .. math:: + g_1(q, \\tau) = e^{-\gamma(q) \\tau} + .. math:: + g_2(q, \\tau) = \\beta_1 e^{-2\gamma(q) \\tau} + g_\infty + These implementation are based on published work. [1]_ + References + ---------- + .. [1] L. Li, P. Kwasniewski, D. Orsi, L. Wiegart, L. Cristofolini, + C. Caronna and A. Fluerasu, " Photon statistics and speckle + visibility spectroscopy with partially coherent X-rays," + J. Synchrotron Rad. vol 21, p 1288-1295, 2014 + """ + return beta * np.exp(-2 * relaxation_rate * lags) + baseline + + +def multi_tau_auto_corr( + num_levels, num_bufs, labels, images, bad_frame_list=None, imgsum=None, norm=None, cal_error=False +): + """Wraps generator implementation of multi-tau + Original code(in Yorick) for multi tau auto correlation + author: Mark Sutton + For parameter description, please reference the docstring for + lazy_one_time. Note that there is an API difference between this function + and `lazy_one_time`. The `images` arugment is at the end of this function + signature here for backwards compatibility, but is the first argument in + the `lazy_one_time()` function. The semantics of the variables remain + unchanged. + """ + gen = lazy_one_time( + images, + num_levels, + num_bufs, + labels, + bad_frame_list=bad_frame_list, + imgsum=imgsum, + norm=norm, + cal_error=cal_error, + ) + for result in gen: + pass + if cal_error: + return result.g2, result.lag_steps, result.internal_state + else: + return result.g2, result.lag_steps + + +def multi_tau_two_time_auto_corr(num_lev, num_buf, ring_mask, FD, bad_frame_list=None, imgsum=None, norm=None): + """Wraps generator implementation of multi-tau two time correlation + This function computes two-time correlation + Original code : author: Yugang Zhang + Returns + ------- + results : namedtuple + For parameter definition, see the docstring for the `lazy_two_time()` + function in this module + """ + gen = lazy_two_time( + FD, + num_lev, + num_buf, + ring_mask, + two_time_internal_state=None, + bad_frame_list=bad_frame_list, + imgsum=imgsum, + norm=norm, + ) + for result in gen: + pass + return two_time_state_to_results(result) + + +def lazy_two_time( + FD, num_levels, num_bufs, labels, two_time_internal_state=None, bad_frame_list=None, imgsum=None, norm=None +): + + # def lazy_two_time(labels, images, num_frames, num_bufs, num_levels=1, + # two_time_internal_state=None): + """Generator implementation of two-time correlation + If you do not want multi-tau correlation, set num_levels to 1 and + num_bufs to the number of images you wish to correlate + Multi-tau correlation uses a scheme to achieve long-time correlations + inexpensively by downsampling the data, iteratively combining successive + frames. + The longest lag time computed is num_levels * num_bufs. + ** see comments on multi_tau_auto_corr + Parameters + ---------- + FD: the handler of compressed data + num_levels : int, optional + how many generations of downsampling to perform, i.e., + the depth of the binomial tree of averaged frames + default is one + num_bufs : int, must be even + maximum lag step to compute in each generation of + downsampling + labels : array + labeled array of the same shape as the image stack; + each ROI is represented by a distinct label (i.e., integer) + two_time_internal_state: None + + + Yields + ------ + namedtuple + A ``results`` object is yielded after every image has been processed. + This `reults` object contains, in this order: + - ``g2``: the normalized correlation + shape is (num_rois, len(lag_steps), len(lag_steps)) + - ``lag_steps``: the times at which the correlation was computed + - ``_internal_state``: all of the internal state. Can be passed back in + to ``lazy_one_time`` as the ``internal_state`` parameter + Notes + ----- + The two-time correlation function is defined as + .. math:: + C(q,t_1,t_2) = \\frac{}{} + Here, the ensemble averages are performed over many pixels of detector, + all having the same ``q`` value. The average time or age is equal to + ``(t1+t2)/2``, measured by the distance along the ``t1 = t2`` diagonal. + The time difference ``t = |t1 - t2|``, with is distance from the + ``t1 = t2`` diagonal in the perpendicular direction. + In the equilibrium system, the two-time correlation functions depend only + on the time difference ``t``, and hence the two-time correlation contour + lines are parallel. + References + ---------- + .. [1] + A. Fluerasu, A. Moussaid, A. Mandsen and A. Schofield, "Slow dynamics + and aging in collodial gels studied by x-ray photon correlation + spectroscopy," Phys. Rev. E., vol 76, p 010401(1-4), 2007. + """ + + num_frames = FD.end - FD.beg + if two_time_internal_state is None: + two_time_internal_state = _init_state_two_time(num_levels, num_bufs, labels, num_frames) + # create a shorthand reference to the results and state named tuple + s = two_time_internal_state + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + if bad_frame_list is None: + bad_frame_list = [] + + for i in tqdm(range(FD.beg, FD.end)): + if i in bad_frame_list: + fra_pix[:] = np.nan + else: + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + if imgsum is None: + if norm is None: + fra_pix[pxlist] = v[w] + else: + fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 + else: + if norm is None: + fra_pix[pxlist] = v[w] / imgsum[i] + else: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] + + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + s.count_level[0] = 1 + s.count_level[0] + # get the current image time + s = s._replace(current_img_time=(s.current_img_time + 1)) + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:] = 0 + _two_time_process( + s.buf, + s.g2, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + s.lag_steps, + s.current_img_time, + level=0, + buf_no=s.cur[0] - 1, + ) + # time frame for each level + s.time_ind[0].append(s.current_img_time) + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + # Compute the correlations for all higher levels. + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = 1 + processing = False + else: + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs + s.count_level[level] = 1 + s.count_level[level] + s.buf[level, s.cur[level] - 1] = ( + s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] + ) / 2 + + t1_idx = (s.count_level[level] - 1) * 2 + + current_img_time = ((s.time_ind[level - 1])[t1_idx] + (s.time_ind[level - 1])[t1_idx + 1]) / 2.0 + # time frame for each level + s.time_ind[level].append(current_img_time) + # make the track_level zero once that level is processed + s.track_level[level] = 0 + # call the _two_time_process function for each multi-tau level + # for multi-tau levels greater than one + # Again, this is modifying things in place. See comment + # on previous call above. + _two_time_process( + s.buf, + s.g2, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + s.lag_steps, + current_img_time, + level=level, + buf_no=s.cur[level] - 1, + ) + level += 1 + + # Checking whether there is next level for processing + processing = level < num_levels + # print (s.g2[1,:,1] ) + yield s + + +def two_time_state_to_results(state): + """Convert the internal state of the two time generator into usable results + Parameters + ---------- + state : namedtuple + The internal state that is yielded from `lazy_two_time` + Returns + ------- + results : namedtuple + A results object that contains the two time correlation results + and the lag steps + """ + for q in range(np.max(state.label_array)): + x0 = (state.g2)[q, :, :] + (state.g2)[q, :, :] = np.tril(x0) + np.tril(x0).T - np.diag(np.diag(x0)) + return results(state.g2, state.lag_steps, state) + + +def _two_time_process( + buf, g2, label_array, num_bufs, num_pixels, img_per_level, lag_steps, current_img_time, level, buf_no +): + """ + Parameters + ---------- + buf: array + image data array to use for two time correlation + g2: array + two time correlation matrix + shape (number of labels(ROI), number of frames, number of frames) + label_array: array + Elements not inside any ROI are zero; elements inside each + ROI are 1, 2, 3, etc. corresponding to the order they are specified + in edges and segments + num_bufs: int, even + number of buffers(channels) + num_pixels : array + number of pixels in certain ROI's + ROI's, dimensions are len(np.unique(label_array)) + img_per_level: array + to track how many images processed in each level + lag_steps : array + delay or lag steps for the multiple tau analysis + shape num_levels + current_img_time : int + the current image number + level : int + the current multi-tau level + buf_no : int + the current buffer number + """ + img_per_level[level] += 1 + + # in multi-tau correlation other than first level all other levels + # have to do the half of the correlation + if level == 0: + i_min = 0 + else: + i_min = num_bufs // 2 + + for i in range(i_min, min(img_per_level[level], num_bufs)): + t_index = level * num_bufs / 2 + i + delay_no = (buf_no - i) % num_bufs + past_img = buf[level, delay_no] + future_img = buf[level, buf_no] + + # print( np.sum( past_img ), np.sum( future_img )) + + # get the matrix of correlation function without normalizations + tmp_binned = np.bincount(label_array, weights=past_img * future_img)[1:] + # get the matrix of past intensity normalizations + pi_binned = np.bincount(label_array, weights=past_img)[1:] + + # get the matrix of future intensity normalizations + fi_binned = np.bincount(label_array, weights=future_img)[1:] + + tind1 = current_img_time - 1 + tind2 = current_img_time - lag_steps[int(t_index)] - 1 + # print( current_img_time ) + + if not isinstance(current_img_time, int): + nshift = 2 ** (level - 1) + for i in range(-nshift + 1, nshift + 1): + g2[:, int(tind1 + i), int(tind2 + i)] = (tmp_binned / (pi_binned * fi_binned)) * num_pixels + else: + g2[:, int(tind1), int(tind2)] = tmp_binned / (pi_binned * fi_binned) * num_pixels + + # print( num_pixels ) + + +def _init_state_two_time(num_levels, num_bufs, labels, num_frames): + """Initialize a stateful namedtuple for two time correlation + Parameters + ---------- + num_levels : int + num_bufs : int + labels : array + Two dimensional labeled array that contains ROI information + num_frames : int + number of images to use + default is number of images + Returns + ------- + internal_state : namedtuple + The namedtuple that contains all the state information that + `lazy_two_time` requires so that it can be used to pick up processing + after it was interrupted + """ + ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + + # to count images in each level + count_level = np.zeros(num_levels, dtype=np.int64) + + # current image time + current_img_time = 0 + + # generate a time frame for each level + time_ind = {key: [] for key in range(num_levels)} + + # two time correlation results (array) + g2 = np.zeros((num_rois, num_frames, num_frames), dtype=np.float64) + + return _two_time_internal_state( + buf, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + g2, + count_level, + current_img_time, + time_ind, + norm, + lev_len, + ) + + +def one_time_from_two_time(two_time_corr): + """ + This will provide the one-time correlation data from two-time + correlation data. + Parameters + ---------- + two_time_corr : array + matrix of two time correlation + shape (number of labels(ROI's), number of frames, number of frames) + Returns + ------- + one_time_corr : array + matrix of one time correlation + shape (number of labels(ROI's), number of frames) + """ + + one_time_corr = np.zeros((two_time_corr.shape[0], two_time_corr.shape[2])) + for g in two_time_corr: + for j in range(two_time_corr.shape[2]): + one_time_corr[:, j] = np.trace(g, offset=j) / two_time_corr.shape[2] + return one_time_corr + + +def cal_c12c(FD, ring_mask, bad_frame_list=None, good_start=0, num_buf=8, num_lev=None, imgsum=None, norm=None): + """calculation two_time correlation by using a multi-tau algorithm""" + + # noframes = FD.end - good_start # number of frames, not "no frames" + + FD.beg = max(FD.beg, good_start) + noframes = FD.end - FD.beg # number of frames, not "no frames" + # num_buf = 8 # number of buffers + + if num_lev is None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list) != 0: + print("Bad frame involved and will be precessed!") + noframes -= len(np.where(np.in1d(bad_frame_list, range(good_start, FD.end)))[0]) + print("%s frames will be processed..." % (noframes)) + + c12, lag_steps, state = multi_tau_two_time_auto_corr( + num_lev, num_buf, ring_mask, FD, bad_frame_list, imgsum=imgsum, norm=norm + ) + + print("Two Time Calculation is DONE!") + m, n, n = c12.shape + # print( m,n,n) + c12_ = np.zeros([n, n, m]) + for i in range(m): + c12_[:, :, i] = c12[i] + return c12_, lag_steps + + +def cal_g2c( + FD, + ring_mask, + bad_frame_list=None, + good_start=0, + num_buf=8, + num_lev=None, + imgsum=None, + norm=None, + cal_error=False, +): + """calculation g2 by using a multi-tau algorithm""" + + # noframes = FD.end - good_start # number of frames, not "no frames" + + FD.beg = max(FD.beg, good_start) + noframes = FD.end - FD.beg # number of frames, not "no frames" + # num_buf = 8 # number of buffers + + if num_lev is None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list) != 0: + print("Bad frame involved and will be precessed!") + noframes -= len(np.where(np.in1d(bad_frame_list, range(good_start, FD.end)))[0]) + + print("%s frames will be processed..." % (noframes)) + if cal_error: + g2, lag_steps, s = multi_tau_auto_corr( + num_lev, num_buf, ring_mask, FD, bad_frame_list, imgsum=imgsum, norm=norm, cal_error=cal_error + ) + + g2 = np.zeros_like(s.G) + g2_err = np.zeros_like(g2) + qind, pixelist = extract_label_indices(ring_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + Ntau, Nq = s.G.shape + g_max = 1e30 + for qi in range(1, 1 + Nq): + pixelist_qi = np.where(qind == qi)[0] + s_Gall_qi = s.G_all[:, pixelist_qi] + s_Pall_qi = s.past_intensity_all[:, pixelist_qi] + s_Fall_qi = s.future_intensity_all[:, pixelist_qi] + avgGi = np.average(s_Gall_qi, axis=1) + devGi = np.std(s_Gall_qi, axis=1) + avgPi = np.average(s_Pall_qi, axis=1) + devPi = np.std(s_Pall_qi, axis=1) + avgFi = np.average(s_Fall_qi, axis=1) + devFi = np.std(s_Fall_qi, axis=1) + + if len(np.where(avgPi == 0)[0]) != 0: + g_max1 = np.where(avgPi == 0)[0][0] + else: + g_max1 = avgPi.shape[0] + if len(np.where(avgFi == 0)[0]) != 0: + g_max2 = np.where(avgFi == 0)[0][0] + else: + g_max2 = avgFi.shape[0] + g_max = min(g_max1, g_max2) + # print(g_max) + # g2_ = (s.G[:g_max] / (s.past_intensity[:g_max] * + # s.future_intensity[:g_max])) + g2[:g_max, qi - 1] = avgGi[:g_max] / (avgPi[:g_max] * avgFi[:g_max]) + g2_err[:g_max, qi - 1] = np.sqrt( + (1 / (avgFi[:g_max] * avgPi[:g_max])) ** 2 * devGi[:g_max] ** 2 + + (avgGi[:g_max] / (avgFi[:g_max] ** 2 * avgPi[:g_max])) ** 2 * devFi[:g_max] ** 2 + + (avgGi[:g_max] / (avgFi[:g_max] * avgPi[:g_max] ** 2)) ** 2 * devPi[:g_max] ** 2 + ) + + print("G2 with error bar calculation DONE!") + return g2[:g_max, :], lag_steps[:g_max], g2_err[:g_max, :] / np.sqrt(nopr), s + else: + g2, lag_steps = multi_tau_auto_corr( + num_lev, num_buf, ring_mask, FD, bad_frame_list, imgsum=imgsum, norm=norm, cal_error=cal_error + ) + + print("G2 calculation DONE!") + return g2, lag_steps + + +def get_pixelist_interp_iq(qp, iq, ring_mask, center): + + qind, pixelist = roi.extract_label_indices(ring_mask) + # pixely = pixelist%FD.md['nrows'] -center[1] + # pixelx = pixelist//FD.md['nrows'] - center[0] + + pixely = pixelist % ring_mask.shape[1] - center[1] + pixelx = pixelist // ring_mask.shape[1] - center[0] + + r = np.hypot(pixelx, pixely) # leave as float. + # r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 + return np.interp(r, qp, iq) + + +class Get_Pixel_Arrayc_todo(object): + """ + a class to get intested pixels from a images sequence, + load ROI of all images into memory + get_data: to get a 2-D array, shape as (len(images), len(pixellist)) + + One example: + data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() + """ + + def __init__(self, FD, pixelist, beg=None, end=None, norm=None, imgsum=None, norm_inten=None, qind=None): + """ + indexable: a images sequences + pixelist: 1-D array, interest pixel list + norm: each q-ROI of each frame is normalized by the corresponding q-ROI of time averaged intensity + imgsum: each q-ROI of each frame is normalized by the total intensity of the corresponding frame, should have the same time sequences as FD, e.g., imgsum[10] corresponding to FD[10] + norm_inten: if True, each q-ROI of each frame is normlized by total intensity of the correponding q-ROI of the corresponding frame + qind: the index of each ROI in one frame, i.e., q + if norm_inten is True: qind has to be given + + """ + if beg is None: + self.beg = FD.beg + if end is None: + self.end = FD.end + # if self.beg ==0: + # self.length = self.end - self.beg + # else: + # self.length = self.end - self.beg + 1 + + self.length = self.end - self.beg + + self.FD = FD + self.pixelist = pixelist + self.norm = norm + self.imgsum = imgsum + self.norm_inten = norm_inten + self.qind = qind + if self.norm_inten is not None: + if self.qind is None: + print("Please give qind.") + + def get_data(self): + """ + To get intested pixels array + Return: 2-D array, shape as (len(images), len(pixellist)) + """ + + data_array = np.zeros( + [self.length, len(self.pixelist)], dtype=np.float64 + ) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 + # fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros(self.FD.md["ncols"] * self.FD.md["nrows"], dtype=np.int32) + timg[self.pixelist] = np.arange(1, len(self.pixelist) + 1) + + if self.norm_inten is not None: + # Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) + Mean_Int_Qind = np.ones( + len(self.qind), dtype=np.float64 + ) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 + noqs = len(np.unique(self.qind)) + nopr = np.bincount(self.qind - 1) + noprs = np.concatenate([np.array([0]), np.cumsum(nopr)]) + qind_ = np.zeros_like(self.qind) + for j in range(noqs): + qind_[noprs[j] : noprs[j + 1]] = np.where(self.qind == j + 1)[0] + + n = 0 + for i in tqdm(range(self.beg, self.end)): + (p, v) = self.FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + # np.bincount( qind[pxlist], weight= + + if self.mean_int_sets is not None: # for each frame will normalize each ROI by it's averaged value + for j in range(noqs): + # if i ==100: + # if j==0: + # print( self.mean_int_sets[i][j] ) + # print( qind_[ noprs[j]: noprs[j+1] ] ) + Mean_Int_Qind[qind_[noprs[j] : noprs[j + 1]]] = self.mean_int_sets[i][j] + norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] # self.mean_int_set or Mean_Int_Qind[pxlist] + + # if i==100: + # print( i, Mean_Int_Qind[ self.qind== 11 ]) + + # print('Do norm_mean_int here') + # if i ==10: + # print( norm_Mean_Int_Qind ) + else: + norm_Mean_Int_Qind = 1.0 + if self.imgsum is not None: + norm_imgsum = self.imgsum[i] + else: + norm_imgsum = 1.0 + if self.norm is not None: + norm_avgimg_roi = self.norm[pxlist] + else: + norm_avgimg_roi = 1.0 + + norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi + # if i==100: + # print(norm_Mean_Int_Qind[:100]) + data_array[n][pxlist] = v[w] / norms + n += 1 + + return data_array + + +class Get_Pixel_Arrayc(object): + """ + a class to get intested pixels from a images sequence, + load ROI of all images into memory + get_data: to get a 2-D array, shape as (len(images), len(pixellist)) + + One example: + data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() + """ + + def __init__(self, FD, pixelist, beg=None, end=None, norm=None, imgsum=None, mean_int_sets=None, qind=None): + """ + indexable: a images sequences + pixelist: 1-D array, interest pixel list + norm: each q-ROI of each frame is normalized by the corresponding q-ROI of time averaged intensity + imgsum: each q-ROI of each frame is normalized by the total intensity of the corresponding frame, should have the same time sequences as FD, e.g., imgsum[10] corresponding to FD[10] + mean_int_sets: each q-ROI of each frame is normlized by total intensity of the correponding q-ROI of the corresponding frame + qind: the index of each ROI in one frame, i.e., q + if mean_int_sets is not None: qind has to be not None + + """ + if beg is None: + self.beg = FD.beg + if end is None: + self.end = FD.end + # if self.beg ==0: + # self.length = self.end - self.beg + # else: + # self.length = self.end - self.beg + 1 + + self.length = self.end - self.beg + + self.FD = FD + self.pixelist = pixelist + self.norm = norm + self.imgsum = imgsum + self.mean_int_sets = mean_int_sets + self.qind = qind + if self.mean_int_sets is not None: + if self.qind is None: + print("Please give qind.") + + def get_data(self): + """ + To get intested pixels array + Return: 2-D array, shape as (len(images), len(pixellist)) + """ + + data_array = np.zeros([self.length, len(self.pixelist)], dtype=np.float64) + # fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros(self.FD.md["ncols"] * self.FD.md["nrows"], dtype=np.int32) + timg[self.pixelist] = np.arange(1, len(self.pixelist) + 1) + + if self.mean_int_sets is not None: + # Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) + Mean_Int_Qind = np.ones(len(self.qind), dtype=np.float64) + noqs = len(np.unique(self.qind)) + nopr = np.bincount(self.qind - 1) + noprs = np.concatenate([np.array([0]), np.cumsum(nopr)]) + qind_ = np.zeros_like(self.qind) + for j in range(noqs): + qind_[noprs[j] : noprs[j + 1]] = np.where(self.qind == j + 1)[0] + + n = 0 + for i in tqdm(range(self.beg, self.end)): + (p, v) = self.FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + + if self.mean_int_sets is not None: # for normalization of each averaged ROI of each frame + for j in range(noqs): + # if i ==100: + # if j==0: + # print( self.mean_int_sets[i][j] ) + # print( qind_[ noprs[j]: noprs[j+1] ] ) + Mean_Int_Qind[qind_[noprs[j] : noprs[j + 1]]] = self.mean_int_sets[i][j] + norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] # self.mean_int_set or Mean_Int_Qind[pxlist] + + # if i==100: + # print( i, Mean_Int_Qind[ self.qind== 11 ]) + + # print('Do norm_mean_int here') + # if i ==10: + # print( norm_Mean_Int_Qind ) + else: + norm_Mean_Int_Qind = 1.0 + if self.imgsum is not None: + norm_imgsum = self.imgsum[i] + else: + norm_imgsum = 1.0 + if self.norm is not None: + if len((self.norm).shape) > 1: + norm_avgimg_roi = self.norm[i][pxlist] + # print('here') + + else: + norm_avgimg_roi = self.norm[pxlist] + else: + norm_avgimg_roi = 1.0 + + norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi + # if i==100: + # print(norm_Mean_Int_Qind[:100]) + data_array[n][pxlist] = v[w] / norms + n += 1 + + return data_array + + +def auto_two_Arrayc(data_pixel, rois, index=None): + """ + Dec 16, 2015, Y.G.@CHX + a numpy operation method to get two-time correlation function + + Parameters: + data: images sequence, shape as [img[0], img[1], imgs_length] + rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs + Options: + + data_pixel: if not None, + 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + + + Return: + g12: a 3-D array, shape as ( imgs_length, imgs_length, q) + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + """ + + qind, pixelist = roi.extract_label_indices(rois) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + noframes = data_pixel.shape[0] + + if index is None: + index = np.arange(1, noqs + 1) + else: + try: + len(index) + index = np.array(index) + except TypeError: + index = np.array([index]) + # print( index ) + qlist = np.arange(1, noqs + 1)[index - 1] + # print( qlist ) + try: + g12b = np.zeros([noframes, noframes, len(qlist)]) + DO = True + except: + print( + "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" + ) + """TO be done here """ + DO = False + + if DO: + i = 0 + for qi in tqdm(qlist): + # print (qi-1) + pixelist_qi = np.where(qind == qi)[0] + # print (pixelist_qi.shape, data_pixel[qi].shape) + data_pixel_qi = data_pixel[:, pixelist_qi] + sum1 = (np.average(data_pixel_qi, axis=1)).reshape(1, noframes) + sum2 = sum1.T + # print( qi, qlist, ) + # print( g12b[:,:,qi -1 ] ) + g12b[:, :, i] = np.dot(data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1] + i += 1 + return g12b + + +def auto_two_Arrayc_ExplicitNorm(data_pixel, rois, norm=None, index=None): + """ + Dec 16, 2015, Y.G.@CHX + a numpy operation method to get two-time correlation function by giving explict normalization + + Parameters: + data: images sequence, shape as [img[0], img[1], imgs_length] + rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs + norm: if not None, shoud be the shape as data_pixel, will normalize two time by this norm + if None, will return two time without normalization + + Options: + + data_pixel: if not None, + 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + + + Return: + g12: a 3-D array, shape as ( imgs_length, imgs_length, q) + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + """ + + qind, pixelist = roi.extract_label_indices(rois) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + noframes = data_pixel.shape[0] + + if index is None: + index = np.arange(1, noqs + 1) + else: + try: + len(index) + index = np.array(index) + except TypeError: + index = np.array([index]) + # print( index ) + qlist = np.arange(1, noqs + 1)[index - 1] + # print( qlist ) + try: + g12b = np.zeros([noframes, noframes, len(qlist)]) + DO = True + except: + print( + "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" + ) + """TO be done here """ + DO = False + if DO: + i = 0 + for qi in tqdm(qlist): + pixelist_qi = np.where(qind == qi)[0] + data_pixel_qi = data_pixel[:, pixelist_qi] + if norm is not None: + norm1 = norm[:, pixelist_qi] + sum1 = (np.average(norm1, axis=1)).reshape(1, noframes) + sum2 = sum1.T + else: + sum1 = 1 + sum2 = 1 + g12b[:, :, i] = np.dot(data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1] + i += 1 + return g12b + + +def two_time_norm(data_pixel, rois, index=None): + """ + Dec 16, 2015, Y.G.@CHX + a numpy operation method to get two-time correlation function + + Parameters: + data: images sequence, shape as [img[0], img[1], imgs_length] + rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs + + Options: + + data_pixel: if not None, + 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + + + Return: + g12: a 3-D array, shape as ( imgs_length, imgs_length, q) + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + """ + + qind, pixelist = roi.extract_label_indices(rois) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + noframes = data_pixel.shape[0] + + if index is None: + index = np.arange(1, noqs + 1) + else: + try: + len(index) + index = np.array(index) + except TypeError: + index = np.array([index]) + # print( index ) + qlist = np.arange(1, noqs + 1)[index - 1] + # print( qlist ) + try: + norm = np.zeros(len(qlist)) + DO = True + except: + print( + "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" + ) + """TO be done here """ + DO = False + + if DO: + i = 0 + for qi in tqdm(qlist): + # print (qi-1) + pixelist_qi = np.where(qind == qi)[0] + # print (pixelist_qi.shape, data_pixel[qi].shape) + data_pixel_qi = data_pixel[:, pixelist_qi] + sum1 = (np.average(data_pixel_qi, axis=1)).reshape(1, noframes) + norm[i] = np.average(sum1) + # sum2 = sum1.T + # print( qi, qlist, ) + # print( g12b[:,:,qi -1 ] ) + # g12b[:,:, i ] = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2 / nopr[qi -1] + i += 1 + return norm + + +def check_normalization(frame_num, q_list, imgsa, data_pixel): + """check the ROI intensity before and after normalization + Input: + frame_num: integer, the number of frame to be checked + q_list: list of integer, the list of q to be checked + imgsa: the raw data + data_pixel: the normalized data, caculated by fucntion Get_Pixel_Arrayc + Plot the intensities + """ + fig, ax = plt.subplots(2) + n = 0 + for q in q_list: + norm_data = data_pixel[frame_num][qind == q] + raw_data = np.ravel(np.array(imgsa[frame_num]))[pixelist[qind == q]] + # print(raw_data.mean()) + plot1D(raw_data, ax=ax[0], legend="q=%s" % (q), m=markers[n], title="fra=%s_raw_data" % (frame_num)) + + # plot1D( raw_data/mean_int_sets_[frame_num][q-1], ax=ax[1], legend='q=%s'%(q), m=markers[n], + # xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) + # print( mean_int_sets_[frame_num][q-1] ) + plot1D( + norm_data, + ax=ax[1], + legend="q=%s" % (q), + m=markers[n], + xlabel="pixel", + title="fra=%s_norm_data" % (frame_num), + ) + n += 1 diff --git a/pyCHX/backups/pyCHX-backup/chx_correlationp.py b/pyCHX/backups/pyCHX-backup/chx_correlationp.py new file mode 100644 index 0000000..496ec67 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_correlationp.py @@ -0,0 +1,957 @@ +""" +Aug 10, Developed by Y.G.@CHX +yuzhang@bnl.gov +This module is for parallel computation of time correlation +""" + +from __future__ import absolute_import, division, print_function + +import logging +import sys +from collections import namedtuple +from multiprocessing import Pool + +import dill +import numpy as np +import skbeam.core.roi as roi +from skbeam.core.roi import extract_label_indices +from skbeam.core.utils import multi_tau_lags + +from pyCHX.chx_compress import apply_async, go_through_FD, map_async, pass_FD, run_dill_encoded +from pyCHX.chx_correlationc import _one_time_process as _one_time_processp +from pyCHX.chx_correlationc import _one_time_process_error as _one_time_process_errorp +from pyCHX.chx_correlationc import _two_time_process as _two_time_processp +from pyCHX.chx_correlationc import _validate_and_transform_inputs, get_pixelist_interp_iq +from pyCHX.chx_libs import tqdm + +logger = logging.getLogger(__name__) + + +class _init_state_two_timep: + def __init__(self, num_levels, num_bufs, labels, num_frames): + ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + count_level = np.zeros(num_levels, dtype=np.int64) + # current image time + current_img_time = 0 + # generate a time frame for each level + time_ind = {key: [] for key in range(num_levels)} + # two time correlation results (array) + g2 = np.zeros((num_rois, num_frames, num_frames), dtype=np.float64) + + ( + self.buf, + self.img_per_level, + self.label_array, + self.track_level, + self.cur, + self.pixel_list, + self.num_pixels, + self.lag_steps, + self.g2, + self.count_level, + self.current_img_time, + self.time_ind, + self.norm, + self.lev_len, + ) = ( + buf, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + g2, + count_level, + current_img_time, + time_ind, + norm, + lev_len, + ) + + def __getstate__(self): + """This is called before pickling.""" + state = self.__dict__.copy() + return state + + def __setstate__(self, state): + """This is called while unpickling.""" + self.__dict__.update(state) + + +def lazy_two_timep( + FD, num_levels, num_bufs, labels, internal_state=None, bad_frame_list=None, imgsum=None, norm=None +): + """Generator implementation of two-time correlation + If you do not want multi-tau correlation, set num_levels to 1 and + num_bufs to the number of images you wish to correlate + Multi-tau correlation uses a scheme to achieve long-time correlations + inexpensively by downsampling the data, iteratively combining successive + frames. + The longest lag time computed is num_levels * num_bufs. + ** see comments on multi_tau_auto_corr + Parameters + ---------- + FD: the handler of compressed data + num_levels : int, optional + how many generations of downsampling to perform, i.e., + the depth of the binomial tree of averaged frames + default is one + num_bufs : int, must be even + maximum lag step to compute in each generation of + downsampling + labels : array + labeled array of the same shape as the image stack; + each ROI is represented by a distinct label (i.e., integer) + two_time_internal_state: None + + + Yields + ------ + namedtuple + A ``results`` object is yielded after every image has been processed. + This `reults` object contains, in this order: + - ``g2``: the normalized correlation + shape is (num_rois, len(lag_steps), len(lag_steps)) + - ``lag_steps``: the times at which the correlation was computed + - ``_internal_state``: all of the internal state. Can be passed back in + to ``lazy_one_time`` as the ``internal_state`` parameter + Notes + ----- + The two-time correlation function is defined as + .. math:: + C(q,t_1,t_2) = \\frac{}{} + Here, the ensemble averages are performed over many pixels of detector, + all having the same ``q`` value. The average time or age is equal to + ``(t1+t2)/2``, measured by the distance along the ``t1 = t2`` diagonal. + The time difference ``t = |t1 - t2|``, with is distance from the + ``t1 = t2`` diagonal in the perpendicular direction. + In the equilibrium system, the two-time correlation functions depend only + on the time difference ``t``, and hence the two-time correlation contour + lines are parallel. + References + ---------- + .. [1] + A. Fluerasu, A. Moussaid, A. Mandsen and A. Schofield, "Slow dynamics + and aging in collodial gels studied by x-ray photon correlation + spectroscopy," Phys. Rev. E., vol 76, p 010401(1-4), 2007. + """ + num_frames = FD.end - FD.beg + if internal_state is None: + internal_state = _init_state_two_timep(num_levels, num_bufs, labels, num_frames) + # create a shorthand reference to the results and state named tuple + s = internal_state + + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + if bad_frame_list is None: + bad_frame_list = [] + + for i in range(FD.beg, FD.end): + if i in bad_frame_list: + fra_pix[:] = np.nan + else: + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + if imgsum is None: + if norm is None: + fra_pix[pxlist] = v[w] + else: + S = norm.shape + if len(S) > 1: + fra_pix[pxlist] = v[w] / norm[i, pxlist] # -1.0 + else: + fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 + else: + if norm is None: + fra_pix[pxlist] = v[w] / imgsum[i] + else: + S = norm.shape + if len(S) > 1: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[i, pxlist] + else: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + s.count_level[0] = 1 + s.count_level[0] + # get the current image time + # s = s._replace(current_img_time=(s.current_img_time + 1)) + s.current_img_time += 1 + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:] = 0 + _two_time_processp( + s.buf, + s.g2, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + s.lag_steps, + s.current_img_time, + level=0, + buf_no=s.cur[0] - 1, + ) + # time frame for each level + s.time_ind[0].append(s.current_img_time) + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + # Compute the correlations for all higher levels. + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = 1 + processing = False + else: + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs + s.count_level[level] = 1 + s.count_level[level] + s.buf[level, s.cur[level] - 1] = ( + s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] + ) / 2 + t1_idx = (s.count_level[level] - 1) * 2 + current_img_time = ((s.time_ind[level - 1])[t1_idx] + (s.time_ind[level - 1])[t1_idx + 1]) / 2.0 + # time frame for each level + s.time_ind[level].append(current_img_time) + # make the track_level zero once that level is processed + s.track_level[level] = 0 + # call the _two_time_process function for each multi-tau level + # for multi-tau levels greater than one + # Again, this is modifying things in place. See comment + # on previous call above. + _two_time_processp( + s.buf, + s.g2, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + s.lag_steps, + current_img_time, + level=level, + buf_no=s.cur[level] - 1, + ) + level += 1 + + # Checking whether there is next level for processing + processing = level < num_levels + # print (s.g2[1,:,1] ) + # yield s + for q in range(np.max(s.label_array)): + x0 = (s.g2)[q, :, :] + (s.g2)[q, :, :] = np.tril(x0) + np.tril(x0).T - np.diag(np.diag(x0)) + return s.g2, s.lag_steps + + +def cal_c12p(FD, ring_mask, bad_frame_list=None, good_start=0, num_buf=8, num_lev=None, imgsum=None, norm=None): + """calculation g2 by using a multi-tau algorithm + for a compressed file with parallel calculation + """ + FD.beg = max(FD.beg, good_start) + noframes = FD.end - FD.beg # +1 # number of frames, not "no frames" + for i in range(FD.beg, FD.end): + pass_FD(FD, i) + if num_lev is None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list) != 0: + print("Bad frame involved and will be precessed!") + noframes -= len(np.where(np.in1d(bad_frame_list, range(good_start, FD.end)))[0]) + print("%s frames will be processed..." % (noframes)) + ring_masks = [np.array(ring_mask == i, dtype=np.int64) for i in np.unique(ring_mask)[1:]] + qind, pixelist = roi.extract_label_indices(ring_mask) + if norm is not None: + S = norm.shape + if len(S) > 1: + norms = [ + norm[:, np.in1d(pixelist, extract_label_indices(np.array(ring_mask == i, dtype=np.int64))[1])] + for i in np.unique(ring_mask)[1:] + ] + else: + norms = [ + norm[np.in1d(pixelist, extract_label_indices(np.array(ring_mask == i, dtype=np.int64))[1])] + for i in np.unique(ring_mask)[1:] + ] + inputs = range(len(ring_masks)) + pool = Pool(processes=len(inputs)) + internal_state = None + print("Starting assign the tasks...") + results = {} + if norm is not None: + for i in tqdm(inputs): + # for i in inputs: + results[i] = apply_async( + pool, + lazy_two_timep, + ( + FD, + num_lev, + num_buf, + ring_masks[i], + internal_state, + bad_frame_list, + imgsum, + norms[i], + ), + ) + else: + # print ('for norm is None') + for i in tqdm(inputs): + # for i in inputs: + results[i] = apply_async( + pool, + lazy_two_timep, + ( + FD, + num_lev, + num_buf, + ring_masks[i], + internal_state, + bad_frame_list, + imgsum, + None, + ), + ) + pool.close() + print("Starting running the tasks...") + res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] + + c12 = np.zeros([noframes, noframes, len(ring_masks)]) + for i in inputs: + # print( res[i][0][:,0].shape, g2.shape ) + c12[:, :, i] = res[i][0][0] # [:len_lag, :len_lag] + if i == 0: + lag_steps = res[0][1] + + print("G2 calculation DONE!") + del results + del res + return c12, lag_steps[lag_steps < noframes] + + +class _internal_statep: + def __init__(self, num_levels, num_bufs, labels, cal_error=False): + """YG. DEV Nov, 2016, Initialize class for the generator-based multi-tau + for one time correlation + + Jan 1, 2018, Add cal_error option to calculate signal to noise to one time correaltion + + """ + ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + + G = np.zeros((int((num_levels + 1) * num_bufs / 2), num_rois), dtype=np.float64) + # matrix for normalizing G into g2 + past_intensity = np.zeros_like(G) + # matrix for normalizing G into g2 + future_intensity = np.zeros_like(G) + ( + self.buf, + self.G, + self.past_intensity, + self.future_intensity, + self.img_per_level, + self.label_array, + self.track_level, + self.cur, + self.pixel_list, + self.num_pixels, + self.lag_steps, + self.norm, + self.lev_len, + ) = ( + buf, + G, + past_intensity, + future_intensity, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + norm, + lev_len, + ) + if cal_error: + self.G_all = np.zeros((int((num_levels + 1) * num_bufs / 2), len(pixel_list)), dtype=np.float64) + # matrix for normalizing G into g2 + self.past_intensity_all = np.zeros_like(self.G_all) + # matrix for normalizing G into g2 + self.future_intensity_all = np.zeros_like(self.G_all) + + def __getstate__(self): + """This is called before pickling.""" + state = self.__dict__.copy() + return state + + def __setstate__(self, state): + """This is called while unpickling.""" + self.__dict__.update(state) + + +def lazy_one_timep( + FD, + num_levels, + num_bufs, + labels, + internal_state=None, + bad_frame_list=None, + imgsum=None, + norm=None, + cal_error=False, +): + if internal_state is None: + internal_state = _internal_statep(num_levels, num_bufs, labels, cal_error) + # create a shorthand reference to the results and state named tuple + s = internal_state + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + if bad_frame_list is None: + bad_frame_list = [] + # for i in tqdm(range( FD.beg , FD.end )): + for i in range(FD.beg, FD.end): + if i in bad_frame_list: + fra_pix[:] = np.nan + else: + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + if imgsum is None: + if norm is None: + # print ('here') + fra_pix[pxlist] = v[w] + else: + S = norm.shape + if len(S) > 1: + fra_pix[pxlist] = v[w] / norm[i, pxlist] # -1.0 + else: + fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 + else: + if norm is None: + fra_pix[pxlist] = v[w] / imgsum[i] + else: + S = norm.shape + if len(S) > 1: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[i, pxlist] + else: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] + + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:] = 0 + # print( i, len(p), len(w), len( pixelist)) + + # print ('i= %s init fra_pix'%i ) + buf_no = s.cur[0] - 1 + # Compute the correlations between the first level + # (undownsampled) frames. This modifies G, + # past_intensity, future_intensity, + # and img_per_level in place! + # print (s.G) + if cal_error: + _one_time_process_errorp( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + s.G_all, + s.past_intensity_all, + s.future_intensity_all, + ) + else: + _one_time_processp( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) + + # print (s.G) + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = True + processing = False + else: + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs + + s.buf[level, s.cur[level] - 1] = ( + s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] + ) / 2 + + # make the track_level zero once that level is processed + s.track_level[level] = False + + # call processing_func for each multi-tau level greater + # than one. This is modifying things in place. See comment + # on previous call above. + buf_no = s.cur[level] - 1 + if cal_error: + _one_time_process_errorp( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + s.G_all, + s.past_intensity_all, + s.future_intensity_all, + ) + else: + _one_time_processp( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) + + level += 1 + # Checking whether there is next level for processing + processing = level < num_levels + + # If any past intensities are zero, then g2 cannot be normalized at + # those levels. This if/else code block is basically preventing + # divide-by-zero errors. + if not cal_error: + if len(np.where(s.past_intensity == 0)[0]) != 0: + g_max1 = np.where(s.past_intensity == 0)[0][0] + else: + g_max1 = s.past_intensity.shape[0] + if len(np.where(s.future_intensity == 0)[0]) != 0: + g_max2 = np.where(s.future_intensity == 0)[0][0] + else: + g_max2 = s.future_intensity.shape[0] + g_max = min(g_max1, g_max2) + g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) + # sys.stdout.write('#') + # del FD + # sys.stdout.flush() + # print (g2) + # return results(g2, s.lag_steps[:g_max], s) + if cal_error: + # return g2, s.lag_steps[:g_max], s.G[:g_max],s.past_intensity[:g_max], s.future_intensity[:g_max] #, s + return (None, s.lag_steps, s.G_all, s.past_intensity_all, s.future_intensity_all) # , s ) + else: + return g2, s.lag_steps[:g_max] # , s + + +def cal_g2p( + FD, + ring_mask, + bad_frame_list=None, + good_start=0, + num_buf=8, + num_lev=None, + imgsum=None, + norm=None, + cal_error=False, +): + """calculation g2 by using a multi-tau algorithm + for a compressed file with parallel calculation + if return_g2_details: return g2 with g2_denomitor, g2_past, g2_future + """ + FD.beg = max(FD.beg, good_start) + noframes = FD.end - FD.beg + 1 # number of frames, not "no frames" + for i in range(FD.beg, FD.end): + pass_FD(FD, i) + if num_lev is None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list) != 0: + print("%s Bad frames involved and will be discarded!" % len(bad_frame_list)) + noframes -= len(np.where(np.in1d(bad_frame_list, range(good_start, FD.end)))[0]) + print("%s frames will be processed..." % (noframes - 1)) + ring_masks = [np.array(ring_mask == i, dtype=np.int64) for i in np.unique(ring_mask)[1:]] + qind, pixelist = roi.extract_label_indices(ring_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + if norm is not None: + S = norm.shape + if len(S) > 1: + norms = [ + norm[:, np.in1d(pixelist, extract_label_indices(np.array(ring_mask == i, dtype=np.int64))[1])] + for i in np.unique(ring_mask)[1:] + ] + else: + norms = [ + norm[np.in1d(pixelist, extract_label_indices(np.array(ring_mask == i, dtype=np.int64))[1])] + for i in np.unique(ring_mask)[1:] + ] + inputs = range(len(ring_masks)) + pool = Pool(processes=len(inputs)) + internal_state = None + print("Starting assign the tasks...") + results = {} + if norm is not None: + for i in tqdm(inputs): + results[i] = apply_async( + pool, + lazy_one_timep, + (FD, num_lev, num_buf, ring_masks[i], internal_state, bad_frame_list, imgsum, norms[i], cal_error), + ) + else: + # print ('for norm is None') + for i in tqdm(inputs): + results[i] = apply_async( + pool, + lazy_one_timep, + (FD, num_lev, num_buf, ring_masks[i], internal_state, bad_frame_list, imgsum, None, cal_error), + ) + pool.close() + print("Starting running the tasks...") + res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] + len_lag = 10**10 + for i in inputs: # to get the smallest length of lag_step, + ##***************************** + ##Here could result in problem for significantly cut useful data if some Q have very short tau list + ##**************************** + if len_lag > len(res[i][1]): + lag_steps = res[i][1] + len_lag = len(lag_steps) + + # lag_steps = res[0][1] + if not cal_error: + g2 = np.zeros([len(lag_steps), len(ring_masks)]) + else: + g2 = np.zeros([int((num_lev + 1) * num_buf / 2), len(ring_masks)]) + g2_err = np.zeros_like(g2) + # g2_G = np.zeros(( int( (num_lev + 1) * num_buf / 2), len(pixelist)) ) + # g2_P = np.zeros_like( g2_G ) + # g2_F = np.zeros_like( g2_G ) + Gmax = 0 + lag_steps_err = res[0][1] + for i in inputs: + # print( res[i][0][:,0].shape, g2.shape ) + if not cal_error: + g2[:, i] = res[i][0][:, 0][:len_lag] + else: + s_Gall_qi = res[i][2] # [:len_lag] + s_Pall_qi = res[i][3] # [:len_lag] + s_Fall_qi = res[i][4] # [:len_lag] + # print( s_Gall_qi.shape,s_Pall_qi.shape,s_Fall_qi.shape ) + avgGi = np.average(s_Gall_qi, axis=1) + devGi = np.std(s_Gall_qi, axis=1) + avgPi = np.average(s_Pall_qi, axis=1) + devPi = np.std(s_Pall_qi, axis=1) + avgFi = np.average(s_Fall_qi, axis=1) + devFi = np.std(s_Fall_qi, axis=1) + + if len(np.where(avgPi == 0)[0]) != 0: + g_max1 = np.where(avgPi == 0)[0][0] + else: + g_max1 = avgPi.shape[0] + if len(np.where(avgFi == 0)[0]) != 0: + g_max2 = np.where(avgFi == 0)[0][0] + else: + g_max2 = avgFi.shape[0] + g_max = min(g_max1, g_max2) + g2[:g_max, i] = avgGi[:g_max] / (avgPi[:g_max] * avgFi[:g_max]) + g2_err[:g_max, i] = np.sqrt( + (1 / (avgFi[:g_max] * avgPi[:g_max])) ** 2 * devGi[:g_max] ** 2 + + (avgGi[:g_max] / (avgFi[:g_max] ** 2 * avgPi[:g_max])) ** 2 * devFi[:g_max] ** 2 + + (avgGi[:g_max] / (avgFi[:g_max] * avgPi[:g_max] ** 2)) ** 2 * devPi[:g_max] ** 2 + ) + Gmax = max(g_max, Gmax) + lag_stepsi = res[i][1] + if len(lag_steps_err) < len(lag_stepsi): + lag_steps_err = lag_stepsi + + del results + del res + if cal_error: + print("G2 with error bar calculation DONE!") + return g2[:Gmax, :], lag_steps_err[:Gmax], g2_err[:Gmax, :] / np.sqrt(nopr) + else: + print("G2 calculation DONE!") + return g2, lag_steps + + +def cal_GPF( + FD, + ring_mask, + bad_frame_list=None, + good_start=0, + num_buf=8, + num_lev=None, + imgsum=None, + norm=None, + cal_error=True, +): + """calculation G,P,D by using a multi-tau algorithm + for a compressed file with parallel calculation + if return_g2_details: return g2 with g2_denomitor, g2_past, g2_future + """ + FD.beg = max(FD.beg, good_start) + noframes = FD.end - FD.beg + 1 # number of frames, not "no frames" + for i in range(FD.beg, FD.end): + pass_FD(FD, i) + if num_lev is None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list) != 0: + print("%s Bad frames involved and will be discarded!" % len(bad_frame_list)) + noframes -= len(np.where(np.in1d(bad_frame_list, range(good_start, FD.end)))[0]) + print("%s frames will be processed..." % (noframes - 1)) + if np.min(ring_mask) == 0: + qstart = 1 + else: + qstart = 0 + ring_masks = [np.array(ring_mask == i, dtype=np.int64) for i in np.unique(ring_mask)[qstart:]] + qind, pixelist = roi.extract_label_indices(ring_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[qstart:] + if norm is not None: + norms = [ + norm[np.in1d(pixelist, extract_label_indices(np.array(ring_mask == i, dtype=np.int64))[1])] + for i in np.unique(ring_mask)[qstart:] + ] + + inputs = range(len(ring_masks)) + pool = Pool(processes=len(inputs)) + internal_state = None + print("Starting assign the tasks...") + results = {} + if norm is not None: + for i in tqdm(inputs): + results[i] = apply_async( + pool, + lazy_one_timep, + (FD, num_lev, num_buf, ring_masks[i], internal_state, bad_frame_list, imgsum, norms[i], cal_error), + ) + else: + # print ('for norm is None') + for i in tqdm(inputs): + results[i] = apply_async( + pool, + lazy_one_timep, + (FD, num_lev, num_buf, ring_masks[i], internal_state, bad_frame_list, imgsum, None, cal_error), + ) + pool.close() + print("Starting running the tasks...") + res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] + + # lag_steps = res[0][1] + g2_G = np.zeros((int((num_lev + 1) * num_buf / 2), len(pixelist))) + g2_P = np.zeros_like(g2_G) + g2_F = np.zeros_like(g2_G) + Gmax = 0 + lag_steps_err = res[0][1] + # print('Here') + for i in inputs: + g2_G[:, qind == 1 + i] = res[i][2] # [:len_lag] + g2_P[:, qind == 1 + i] = res[i][3] # [:len_lag] + g2_F[:, qind == 1 + i] = res[i][4] # [:len_lag] + del results + del res + return g2_G, g2_P, g2_F + + +def get_g2_from_ROI_GPF(G, P, F, roi_mask): + """YG. 2018.10.26. Get g2 from G, P, F by giving bins (roi_mask) + Input: + G: t + P: < I(t) >t + F: < I(t+tau) >t + roi_mask: the roi mask + + Output: + g2 and g2_err + + """ + + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + g2 = np.zeros([G.shape[0], noqs]) + g2_err = np.zeros([G.shape[0], noqs]) + for i in range(1, 1 + noqs): + ## G[0].shape is the same as roi_mask shape + if len(G.shape) > 2: + s_Gall_qi = G[:, roi_mask == i] + s_Pall_qi = P[:, roi_mask == i] + s_Fall_qi = F[:, roi_mask == i] + ## G[0].shape is the same length as pixelist + else: + s_Gall_qi = G[:, qind == i] + s_Pall_qi = P[:, qind == i] + s_Fall_qi = F[:, qind == i] + + # print( s_Gall_qi.shape,s_Pall_qi.shape,s_Fall_qi.shape ) + avgGi = np.average(s_Gall_qi, axis=1) + devGi = np.std(s_Gall_qi, axis=1) + avgPi = np.average(s_Pall_qi, axis=1) + devPi = np.std(s_Pall_qi, axis=1) + avgFi = np.average(s_Fall_qi, axis=1) + devFi = np.std(s_Fall_qi, axis=1) + if len(np.where(avgPi == 0)[0]) != 0: + g_max1 = np.where(avgPi == 0)[0][0] + else: + g_max1 = avgPi.shape[0] + if len(np.where(avgFi == 0)[0]) != 0: + g_max2 = np.where(avgFi == 0)[0][0] + else: + g_max2 = avgFi.shape[0] + g_max = min(g_max1, g_max2) + # print() + g2[:g_max, i - 1] = avgGi[:g_max] / (avgPi[:g_max] * avgFi[:g_max]) + g2_err[:g_max, i - 1] = np.sqrt( + (1 / (avgFi[:g_max] * avgPi[:g_max])) ** 2 * devGi[:g_max] ** 2 + + (avgGi[:g_max] / (avgFi[:g_max] ** 2 * avgPi[:g_max])) ** 2 * devFi[:g_max] ** 2 + + (avgGi[:g_max] / (avgFi[:g_max] * avgPi[:g_max] ** 2)) ** 2 * devPi[:g_max] ** 2 + ) + + return g2, g2_err + + +def auto_two_Arrayp(data_pixel, rois, index=None): + """ + TODO list + will try to use dask + + Dec 16, 2015, Y.G.@CHX + a numpy operation method to get two-time correlation function using parallel computation + + Parameters: + data: images sequence, shape as [img[0], img[1], imgs_length] + rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs + + Options: + + data_pixel: if not None, + 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + + + Return: + g12: a 3-D array, shape as ( imgs_length, imgs_length, q) + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + """ + qind, pixelist = roi.extract_label_indices(rois) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + noframes = data_pixel.shape[0] + g12b = np.zeros([noframes, noframes, noqs]) + + if index is None: + index = np.arange(1, noqs + 1) + else: + try: + len(index) + index = np.array(index) + except TypeError: + index = np.array([index]) + qlist = np.arange(1, noqs + 1)[index - 1] + + inputs = range(len(qlist)) + + data_pixel_qis = [0] * len(qlist) + for i in inputs: + pixelist_qi = np.where(qind == qlist[i])[0] + data_pixel_qis[i] = data_pixel[:, pixelist_qi] + + # pool = Pool(processes= len(inputs) ) + # results = [ apply_async( pool, _get_two_time_for_one_q, ( qlist[i], + # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] + # res = [r.get() for r in results] + + pool = Pool(processes=len(inputs)) + results = {} + for i in inputs: + results[i] = pool.apply_async(_get_two_time_for_one_q, [qlist[i], data_pixel_qis[i], nopr, noframes]) + pool.close() + pool.join() + res = np.array([results[k].get() for k in list(sorted(results.keys()))]) + + # print('here') + + for i in inputs: + qi = qlist[i] + g12b[:, :, qi - 1] = res[i] + print("G12 calculation DONE!") + return g12b # g12b + + +def _get_two_time_for_one_q(qi, data_pixel_qi, nopr, noframes): + # print( data_pixel_qi.shape) + + sum1 = (np.average(data_pixel_qi, axis=1)).reshape(1, noframes) + sum2 = sum1.T + two_time_qi = np.dot(data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1] + return two_time_qi diff --git a/pyCHX/backups/pyCHX-backup/chx_correlationp2.py b/pyCHX/backups/pyCHX-backup/chx_correlationp2.py new file mode 100644 index 0000000..8ddbc19 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_correlationp2.py @@ -0,0 +1,804 @@ +""" +Aug 10, Developed by Y.G.@CHX +yuzhang@bnl.gov +This module is for parallel computation of time correlation +Feb 20, 2018 +The chx_correlationp2 is for dedug g2 +""" + +from __future__ import absolute_import, division, print_function + +import logging +import sys +from collections import namedtuple +from multiprocessing import Pool + +import dill +import numpy as np +import skbeam.core.roi as roi +from skbeam.core.roi import extract_label_indices +from skbeam.core.utils import multi_tau_lags + +from pyCHX.chx_compress import apply_async, go_through_FD, map_async, pass_FD, run_dill_encoded +from pyCHX.chx_correlationc import _one_time_process as _one_time_processp +from pyCHX.chx_correlationc import _one_time_process_error as _one_time_process_errorp +from pyCHX.chx_correlationc import _two_time_process as _two_time_processp +from pyCHX.chx_correlationc import _validate_and_transform_inputs, get_pixelist_interp_iq +from pyCHX.chx_libs import tqdm + +logger = logging.getLogger(__name__) + + +class _init_state_two_timep: + def __init__(self, num_levels, num_bufs, labels, num_frames): + ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + count_level = np.zeros(num_levels, dtype=np.int64) + # current image time + current_img_time = 0 + # generate a time frame for each level + time_ind = {key: [] for key in range(num_levels)} + # two time correlation results (array) + g2 = np.zeros((num_rois, num_frames, num_frames), dtype=np.float64) + + ( + self.buf, + self.img_per_level, + self.label_array, + self.track_level, + self.cur, + self.pixel_list, + self.num_pixels, + self.lag_steps, + self.g2, + self.count_level, + self.current_img_time, + self.time_ind, + self.norm, + self.lev_len, + ) = ( + buf, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + g2, + count_level, + current_img_time, + time_ind, + norm, + lev_len, + ) + + def __getstate__(self): + """This is called before pickling.""" + state = self.__dict__.copy() + return state + + def __setstate__(self, state): + """This is called while unpickling.""" + self.__dict__.update(state) + + +def lazy_two_timep( + FD, num_levels, num_bufs, labels, internal_state=None, bad_frame_list=None, imgsum=None, norm=None +): + """Generator implementation of two-time correlation + If you do not want multi-tau correlation, set num_levels to 1 and + num_bufs to the number of images you wish to correlate + Multi-tau correlation uses a scheme to achieve long-time correlations + inexpensively by downsampling the data, iteratively combining successive + frames. + The longest lag time computed is num_levels * num_bufs. + ** see comments on multi_tau_auto_corr + Parameters + ---------- + FD: the handler of compressed data + num_levels : int, optional + how many generations of downsampling to perform, i.e., + the depth of the binomial tree of averaged frames + default is one + num_bufs : int, must be even + maximum lag step to compute in each generation of + downsampling + labels : array + labeled array of the same shape as the image stack; + each ROI is represented by a distinct label (i.e., integer) + two_time_internal_state: None + + + Yields + ------ + namedtuple + A ``results`` object is yielded after every image has been processed. + This `reults` object contains, in this order: + - ``g2``: the normalized correlation + shape is (num_rois, len(lag_steps), len(lag_steps)) + - ``lag_steps``: the times at which the correlation was computed + - ``_internal_state``: all of the internal state. Can be passed back in + to ``lazy_one_time`` as the ``internal_state`` parameter + Notes + ----- + The two-time correlation function is defined as + .. math:: + C(q,t_1,t_2) = \\frac{}{} + Here, the ensemble averages are performed over many pixels of detector, + all having the same ``q`` value. The average time or age is equal to + ``(t1+t2)/2``, measured by the distance along the ``t1 = t2`` diagonal. + The time difference ``t = |t1 - t2|``, with is distance from the + ``t1 = t2`` diagonal in the perpendicular direction. + In the equilibrium system, the two-time correlation functions depend only + on the time difference ``t``, and hence the two-time correlation contour + lines are parallel. + References + ---------- + .. [1] + A. Fluerasu, A. Moussaid, A. Mandsen and A. Schofield, "Slow dynamics + and aging in collodial gels studied by x-ray photon correlation + spectroscopy," Phys. Rev. E., vol 76, p 010401(1-4), 2007. + """ + num_frames = FD.end - FD.beg + if internal_state is None: + internal_state = _init_state_two_timep(num_levels, num_bufs, labels, num_frames) + # create a shorthand reference to the results and state named tuple + s = internal_state + + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + if bad_frame_list is None: + bad_frame_list = [] + + for i in range(FD.beg, FD.end): + if i in bad_frame_list: + fra_pix[:] = np.nan + else: + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + if imgsum is None: + if norm is None: + fra_pix[pxlist] = v[w] + else: + fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 + else: + if norm is None: + fra_pix[pxlist] = v[w] / imgsum[i] + else: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] + + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + s.count_level[0] = 1 + s.count_level[0] + # get the current image time + + # s = s._replace(current_img_time=(s.current_img_time + 1)) + s.current_img_time += 1 + + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:] = 0 + _two_time_processp( + s.buf, + s.g2, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + s.lag_steps, + s.current_img_time, + level=0, + buf_no=s.cur[0] - 1, + ) + # time frame for each level + s.time_ind[0].append(s.current_img_time) + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + # Compute the correlations for all higher levels. + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = 1 + processing = False + else: + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs + s.count_level[level] = 1 + s.count_level[level] + s.buf[level, s.cur[level] - 1] = ( + s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] + ) / 2 + t1_idx = (s.count_level[level] - 1) * 2 + current_img_time = ((s.time_ind[level - 1])[t1_idx] + (s.time_ind[level - 1])[t1_idx + 1]) / 2.0 + # time frame for each level + s.time_ind[level].append(current_img_time) + # make the track_level zero once that level is processed + s.track_level[level] = 0 + # call the _two_time_process function for each multi-tau level + # for multi-tau levels greater than one + # Again, this is modifying things in place. See comment + # on previous call above. + _two_time_processp( + s.buf, + s.g2, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + s.lag_steps, + current_img_time, + level=level, + buf_no=s.cur[level] - 1, + ) + level += 1 + + # Checking whether there is next level for processing + processing = level < num_levels + # print (s.g2[1,:,1] ) + # yield s + for q in range(np.max(s.label_array)): + x0 = (s.g2)[q, :, :] + (s.g2)[q, :, :] = np.tril(x0) + np.tril(x0).T - np.diag(np.diag(x0)) + return s.g2, s.lag_steps + + +def cal_c12p(FD, ring_mask, bad_frame_list=None, good_start=0, num_buf=8, num_lev=None, imgsum=None, norm=None): + """calculation g2 by using a multi-tau algorithm + for a compressed file with parallel calculation + """ + FD.beg = max(FD.beg, good_start) + noframes = FD.end - FD.beg # +1 # number of frames, not "no frames" + for i in range(FD.beg, FD.end): + pass_FD(FD, i) + if num_lev is None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list) != 0: + print("Bad frame involved and will be precessed!") + noframes -= len(np.where(np.in1d(bad_frame_list, range(good_start, FD.end)))[0]) + print("%s frames will be processed..." % (noframes)) + ring_masks = [np.array(ring_mask == i, dtype=np.int64) for i in np.unique(ring_mask)[1:]] + qind, pixelist = roi.extract_label_indices(ring_mask) + if norm is not None: + norms = [ + norm[np.in1d(pixelist, extract_label_indices(np.array(ring_mask == i, dtype=np.int64))[1])] + for i in np.unique(ring_mask)[1:] + ] + inputs = range(len(ring_masks)) + pool = Pool(processes=len(inputs)) + internal_state = None + print("Starting assign the tasks...") + results = {} + if norm is not None: + for i in tqdm(inputs): + # for i in inputs: + results[i] = apply_async( + pool, + lazy_two_timep, + ( + FD, + num_lev, + num_buf, + ring_masks[i], + internal_state, + bad_frame_list, + imgsum, + norms[i], + ), + ) + else: + # print ('for norm is None') + for i in tqdm(inputs): + # for i in inputs: + results[i] = apply_async( + pool, + lazy_two_timep, + ( + FD, + num_lev, + num_buf, + ring_masks[i], + internal_state, + bad_frame_list, + imgsum, + None, + ), + ) + pool.close() + print("Starting running the tasks...") + res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] + + c12 = np.zeros([noframes, noframes, len(ring_masks)]) + for i in inputs: + # print( res[i][0][:,0].shape, g2.shape ) + c12[:, :, i] = res[i][0][0] # [:len_lag, :len_lag] + if i == 0: + lag_steps = res[0][1] + + print("G2 calculation DONE!") + del results + del res + return c12, lag_steps[lag_steps < noframes] + + +class _internal_statep: + def __init__(self, num_levels, num_bufs, labels, cal_error=False): + """YG. DEV Nov, 2016, Initialize class for the generator-based multi-tau + for one time correlation + + Jan 1, 2018, Add cal_error option to calculate signal to noise to one time correaltion + + """ + ( + label_array, + pixel_list, + num_rois, + num_pixels, + lag_steps, + buf, + img_per_level, + track_level, + cur, + norm, + lev_len, + ) = _validate_and_transform_inputs(num_bufs, num_levels, labels) + + G = np.zeros((int((num_levels + 1) * num_bufs / 2), num_rois), dtype=np.float64) + # matrix for normalizing G into g2 + past_intensity = np.zeros_like(G) + # matrix for normalizing G into g2 + future_intensity = np.zeros_like(G) + ( + self.buf, + self.G, + self.past_intensity, + self.future_intensity, + self.img_per_level, + self.label_array, + self.track_level, + self.cur, + self.pixel_list, + self.num_pixels, + self.lag_steps, + self.norm, + self.lev_len, + ) = ( + buf, + G, + past_intensity, + future_intensity, + img_per_level, + label_array, + track_level, + cur, + pixel_list, + num_pixels, + lag_steps, + norm, + lev_len, + ) + if cal_error: + self.G_all = np.zeros((int((num_levels + 1) * num_bufs / 2), len(pixel_list)), dtype=np.float64) + # matrix for normalizing G into g2 + self.past_intensity_all = np.zeros_like(self.G_all) + # matrix for normalizing G into g2 + self.future_intensity_all = np.zeros_like(self.G_all) + + def __getstate__(self): + """This is called before pickling.""" + state = self.__dict__.copy() + return state + + def __setstate__(self, state): + """This is called while unpickling.""" + self.__dict__.update(state) + + +def lazy_one_timep( + FD, + num_levels, + num_bufs, + labels, + internal_state=None, + bad_frame_list=None, + imgsum=None, + norm=None, + cal_error=False, +): + if internal_state is None: + internal_state = _internal_statep(num_levels, num_bufs, labels, cal_error) + # create a shorthand reference to the results and state named tuple + s = internal_state + qind, pixelist = roi.extract_label_indices(labels) + # iterate over the images to compute multi-tau correlation + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + if bad_frame_list is None: + bad_frame_list = [] + # for i in tqdm(range( FD.beg , FD.end )): + for i in range(FD.beg, FD.end): + if i in bad_frame_list: + fra_pix[:] = np.nan + else: + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + if imgsum is None: + if norm is None: + # print ('here') + fra_pix[pxlist] = v[w] + else: + fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 + else: + if norm is None: + fra_pix[pxlist] = v[w] / imgsum[i] + else: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] + + level = 0 + # increment buffer + s.cur[0] = (1 + s.cur[0]) % num_bufs + # Put the ROI pixels into the ring buffer. + s.buf[0, s.cur[0] - 1] = fra_pix + fra_pix[:] = 0 + # print( i, len(p), len(w), len( pixelist)) + + # print ('i= %s init fra_pix'%i ) + buf_no = s.cur[0] - 1 + # Compute the correlations between the first level + # (undownsampled) frames. This modifies G, + # past_intensity, future_intensity, + # and img_per_level in place! + # print (s.G) + if cal_error: + _one_time_process_errorp( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + s.G_all, + s.past_intensity_all, + s.future_intensity_all, + ) + else: + _one_time_processp( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) + + # print (s.G) + # check whether the number of levels is one, otherwise + # continue processing the next level + processing = num_levels > 1 + level = 1 + while processing: + if not s.track_level[level]: + s.track_level[level] = True + processing = False + else: + prev = 1 + (s.cur[level - 1] - 2) % num_bufs + s.cur[level] = 1 + s.cur[level] % num_bufs + + s.buf[level, s.cur[level] - 1] = ( + s.buf[level - 1, prev - 1] + s.buf[level - 1, s.cur[level - 1] - 1] + ) / 2 + + # make the track_level zero once that level is processed + s.track_level[level] = False + + # call processing_func for each multi-tau level greater + # than one. This is modifying things in place. See comment + # on previous call above. + buf_no = s.cur[level] - 1 + if cal_error: + _one_time_process_errorp( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + s.G_all, + s.past_intensity_all, + s.future_intensity_all, + ) + else: + _one_time_processp( + s.buf, + s.G, + s.past_intensity, + s.future_intensity, + s.label_array, + num_bufs, + s.num_pixels, + s.img_per_level, + level, + buf_no, + s.norm, + s.lev_len, + ) + + level += 1 + # Checking whether there is next level for processing + processing = level < num_levels + + # If any past intensities are zero, then g2 cannot be normalized at + # those levels. This if/else code block is basically preventing + # divide-by-zero errors. + if not cal_error: + if len(np.where(s.past_intensity == 0)[0]) != 0: + g_max1 = np.where(s.past_intensity == 0)[0][0] + else: + g_max1 = s.past_intensity.shape[0] + if len(np.where(s.future_intensity == 0)[0]) != 0: + g_max2 = np.where(s.future_intensity == 0)[0][0] + else: + g_max2 = s.future_intensity.shape[0] + g_max = min(g_max1, g_max2) + g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) + # sys.stdout.write('#') + # del FD + # sys.stdout.flush() + # print (g2) + # return results(g2, s.lag_steps[:g_max], s) + if cal_error: + # return g2, s.lag_steps[:g_max], s.G[:g_max],s.past_intensity[:g_max], s.future_intensity[:g_max] #, s + return (None, s.lag_steps, s.G_all, s.past_intensity_all, s.future_intensity_all) # , s ) + else: + return g2, s.lag_steps[:g_max] # , s + + +def cal_g2p( + FD, + ring_mask, + bad_frame_list=None, + good_start=0, + num_buf=8, + num_lev=None, + imgsum=None, + norm=None, + cal_error=False, +): + """calculation g2 by using a multi-tau algorithm + for a compressed file with parallel calculation + if return_g2_details: return g2 with g2_denomitor, g2_past, g2_future + """ + FD.beg = max(FD.beg, good_start) + noframes = FD.end - FD.beg + 1 # number of frames, not "no frames" + for i in range(FD.beg, FD.end): + pass_FD(FD, i) + if num_lev is None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + if bad_frame_list is not None: + if len(bad_frame_list) != 0: + print("%s Bad frames involved and will be discarded!" % len(bad_frame_list)) + noframes -= len(np.where(np.in1d(bad_frame_list, range(good_start, FD.end)))[0]) + print("%s frames will be processed..." % (noframes - 1)) + ring_masks = [np.array(ring_mask == i, dtype=np.int64) for i in np.unique(ring_mask)[1:]] + qind, pixelist = roi.extract_label_indices(ring_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + if norm is not None: + norms = [ + norm[np.in1d(pixelist, extract_label_indices(np.array(ring_mask == i, dtype=np.int64))[1])] + for i in np.unique(ring_mask)[1:] + ] + + inputs = range(len(ring_masks)) + + pool = Pool(processes=len(inputs)) + internal_state = None + print("Starting assign the tasks...") + results = {} + if norm is not None: + for i in tqdm(inputs): + results[i] = apply_async( + pool, + lazy_one_timep, + (FD, num_lev, num_buf, ring_masks[i], internal_state, bad_frame_list, imgsum, norms[i], cal_error), + ) + else: + # print ('for norm is None') + for i in tqdm(inputs): + results[i] = apply_async( + pool, + lazy_one_timep, + (FD, num_lev, num_buf, ring_masks[i], internal_state, bad_frame_list, imgsum, None, cal_error), + ) + pool.close() + print("Starting running the tasks...") + res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] + len_lag = 10**10 + for i in inputs: # to get the smallest length of lag_step, + ##***************************** + ##Here could result in problem for significantly cut useful data if some Q have very short tau list + ##**************************** + if len_lag > len(res[i][1]): + lag_steps = res[i][1] + len_lag = len(lag_steps) + + # lag_steps = res[0][1] + if not cal_error: + g2 = np.zeros([len(lag_steps), len(ring_masks)]) + else: + g2 = np.zeros([int((num_lev + 1) * num_buf / 2), len(ring_masks)]) + g2_err = np.zeros_like(g2) + + g2_G = np.zeros((int((num_lev + 1) * num_buf / 2), len(pixelist))) + g2_P = np.zeros_like(g2_G) + g2_F = np.zeros_like(g2_G) + + Gmax = 0 + lag_steps_err = res[0][1] + nopr_ = np.lib.pad(np.cumsum(nopr), [1], mode="constant", constant_values=(0))[:-1] + for i in inputs: + # print( res[i][0][:,0].shape, g2.shape ) + if not cal_error: + g2[:, i] = res[i][0][:, 0][:len_lag] + else: + s_Gall_qi = res[i][2] # [:len_lag] + s_Pall_qi = res[i][3] # [:len_lag] + s_Fall_qi = res[i][4] # [:len_lag] + # print( s_Gall_qi.shape,s_Pall_qi.shape,s_Fall_qi.shape ) + avgGi = np.average(s_Gall_qi, axis=1) + devGi = np.std(s_Gall_qi, axis=1) + avgPi = np.average(s_Pall_qi, axis=1) + devPi = np.std(s_Pall_qi, axis=1) + avgFi = np.average(s_Fall_qi, axis=1) + devFi = np.std(s_Fall_qi, axis=1) + + if len(np.where(avgPi == 0)[0]) != 0: + g_max1 = np.where(avgPi == 0)[0][0] + else: + g_max1 = avgPi.shape[0] + if len(np.where(avgFi == 0)[0]) != 0: + g_max2 = np.where(avgFi == 0)[0][0] + else: + g_max2 = avgFi.shape[0] + g_max = min(g_max1, g_max2) + g2[:g_max, i] = avgGi[:g_max] / (avgPi[:g_max] * avgFi[:g_max]) + g2_err[:g_max, i] = np.sqrt( + (1 / (avgFi[:g_max] * avgPi[:g_max])) ** 2 * devGi[:g_max] ** 2 + + (avgGi[:g_max] / (avgFi[:g_max] ** 2 * avgPi[:g_max])) ** 2 * devFi[:g_max] ** 2 + + (avgGi[:g_max] / (avgFi[:g_max] * avgPi[:g_max] ** 2)) ** 2 * devPi[:g_max] ** 2 + ) + Gmax = max(g_max, Gmax) + lag_stepsi = res[i][1] + if len(lag_steps_err) < len(lag_stepsi): + lag_steps_err = lag_stepsi + + g2_G[:, nopr_[i] : nopr_[i + 1]] = s_Gall_qi + g2_P[:, nopr_[i] : nopr_[i + 1]] = s_Pall_qi + g2_F[:, nopr_[i] : nopr_[i + 1]] = s_Fall_qi + + del results + del res + if cal_error: + print("G2 with error bar calculation DONE!") + return g2[:Gmax, :], lag_steps_err[:Gmax], g2_err[:Gmax, :] / np.sqrt(nopr), g2_G, g2_P, g2_F + else: + print("G2 calculation DONE!") + return g2, lag_steps + + +def auto_two_Arrayp(data_pixel, rois, index=None): + """ + TODO list + will try to use dask + + Dec 16, 2015, Y.G.@CHX + a numpy operation method to get two-time correlation function using parallel computation + + Parameters: + data: images sequence, shape as [img[0], img[1], imgs_length] + rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs + + Options: + + data_pixel: if not None, + 2-D array, shape as (len(images), len(qind)), + use function Get_Pixel_Array( ).get_data( ) to get + + + Return: + g12: a 3-D array, shape as ( imgs_length, imgs_length, q) + + One example: + g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) + """ + qind, pixelist = roi.extract_label_indices(rois) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + noframes = data_pixel.shape[0] + g12b = np.zeros([noframes, noframes, noqs]) + + if index is None: + index = np.arange(1, noqs + 1) + else: + try: + len(index) + index = np.array(index) + except TypeError: + index = np.array([index]) + qlist = np.arange(1, noqs + 1)[index - 1] + + inputs = range(len(qlist)) + + data_pixel_qis = [0] * len(qlist) + for i in inputs: + pixelist_qi = np.where(qind == qlist[i])[0] + data_pixel_qis[i] = data_pixel[:, pixelist_qi] + + # pool = Pool(processes= len(inputs) ) + # results = [ apply_async( pool, _get_two_time_for_one_q, ( qlist[i], + # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] + # res = [r.get() for r in results] + + pool = Pool(processes=len(inputs)) + results = {} + for i in inputs: + results[i] = pool.apply_async(_get_two_time_for_one_q, [qlist[i], data_pixel_qis[i], nopr, noframes]) + pool.close() + pool.join() + res = np.array([results[k].get() for k in list(sorted(results.keys()))]) + + # print('here') + + for i in inputs: + qi = qlist[i] + g12b[:, :, qi - 1] = res[i] + print("G12 calculation DONE!") + return g12b # g12b + + +def _get_two_time_for_one_q(qi, data_pixel_qi, nopr, noframes): + # print( data_pixel_qi.shape) + + sum1 = (np.average(data_pixel_qi, axis=1)).reshape(1, noframes) + sum2 = sum1.T + two_time_qi = np.dot(data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1] + return two_time_qi diff --git a/pyCHX/backups/pyCHX-backup/chx_crosscor.py b/pyCHX/backups/pyCHX-backup/chx_crosscor.py new file mode 100644 index 0000000..738be4e --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_crosscor.py @@ -0,0 +1,831 @@ +# Develop new version +# Original from #/XF11ID/analysis/Analysis_Pipelines/Develop/chxanalys/chxanalys/chx_correlation.py +# ###################################################################### +# Let's change from mask's to indices +######################################################################## + +""" +This module is for functions specific to spatial correlation in order to tackle the motion of speckles +""" +from __future__ import absolute_import, division, print_function + +from collections import namedtuple + +import numpy as np +from scipy.signal import fftconvolve +from skbeam.core.roi import extract_label_indices + +# from __future__ import absolute_import, division, print_function +from skbeam.core.utils import multi_tau_lags + +# for a convenient status bar +try: + from tqdm import tqdm +except ImportError: + + def tqdm(iterator): + return iterator + + +from scipy.fftpack.helper import next_fast_len + + +def get_cor_region(cor, cij, qid, fitw): + """YG developed@CHX July/2019, Get a rectangle region of the cor class by giving center and width""" + ceni = cor.centers[qid] + x1, x2, y1, y2 = max(0, ceni[0] - fitw), ceni[0] + fitw, max(0, ceni[1] - fitw), ceni[1] + fitw + return cij[qid][x1:x2, y1:y2] + + +def direct_corss_cor(im1, im2): + """YG developed@CHX July/2019, directly calculate the cross correlation of two images + Input: + im1: the first image + im2: the second image + Return: + The cross correlation + """ + sx, sy = im1.shape + Nx, Ny = sx // 2, sy // 2 + C = np.zeros([2 * Nx, 2 * Ny]) + for i in range(-Nx, Nx): + for j in range(-Ny, Ny): + if i == 0: + if j == 0: + d1 = im1[:, :] + d2 = im2[:, :] + elif j < 0: + d1 = im1[:j, :] + d2 = im2[-j:, :] + else: ##j>0 + d1 = im1[j:, :] + d2 = im2[:-j, :] + elif i < 0: + if j == 0: + d1 = im1[:, :i] + d2 = im2[:, -i:] + elif j < 0: + d1 = im1[:j, :i] + d2 = im2[-j:, -i:] + else: ##j>0 + d1 = im1[j:, :i] + d2 = im2[:-j, -i:] + else: # i>0: + if j == 0: + d1 = im1[:, i:] + d2 = im2[:, :-i] + elif j < 0: + d1 = im1[:j, i:] + d2 = im2[-j:, :-i] + else: ##j>0 + d1 = im1[j:, i:] + d2 = im2[:-j, :-i] + # print(i,j) + C[i + Nx, j + Ny] = np.sum(d1 * d2) / (np.average(d1) * np.average(d2) * d1.size) + return C.T + + +class CrossCorrelator2: + """ + Compute a 1D or 2D cross-correlation on data. + This uses a mask, which may be binary (array of 0's and 1's), + or a list of non-negative integer id's to compute cross-correlations + separately on. + The symmetric averaging scheme introduced here is inspired by a paper + from Schatzel, although the implementation is novel in that it + allows for the usage of arbitrary masks. [1]_ + Examples + -------- + >> ccorr = CrossCorrelator(mask.shape, mask=mask) + >> # correlated image + >> cimg = cc(img1) + or, mask may may be ids + >> cc = CrossCorrelator(ids) + #(where ids is same shape as img1) + >> cc1 = cc(img1) + >> cc12 = cc(img1, img2) + # if img2 shifts right of img1, point of maximum correlation is shifted + # right from correlation center + References + ---------- + .. [1] Schatzel, Klaus, Martin Drewel, and Sven Stimac. "Photon + correlation measurements at large lag times: improving + statistical accuracy." Journal of Modern Optics 35.4 (1988): + 711-718. + """ + + # TODO : when mask is None, don't compute a mask, submasks + def __init__(self, shape, mask=None, normalization=None, progress_bar=True): + """ + Prepare the spatial correlator for various regions specified by the + id's in the image. + Parameters + ---------- + shape : 1 or 2-tuple + The shape of the incoming images or curves. May specify 1D or + 2D shapes by inputting a 1 or 2-tuple + mask : 1D or 2D np.ndarray of int, optional + Each non-zero integer represents unique bin. Zero integers are + assumed to be ignored regions. If None, creates a mask with + all points set to 1 + normalization: string or list of strings, optional + These specify the normalization and may be any of the + following: + 'regular' : divide by pixel number + 'symavg' : use symmetric averaging + Defaults to ['regular'] normalization + Delete argument wrap as not used. See fftconvolve as this + expands arrays to get complete convolution, IE no need + to expand images of subregions. + """ + if normalization is None: + normalization = ["regular"] + elif not isinstance(normalization, list): + normalization = list([normalization]) + self.normalization = normalization + self.progress_bar = progress_bar + if mask is None: # we can do this easily now. + mask = np.ones(shape) + + # initialize subregion information for the correlations + # first find indices of subregions and sort them by subregion id + pii, pjj = np.where(mask) + bind = mask[pii, pjj] + ord = np.argsort(bind) + bind = bind[ord] + pii = pii[ord] + pjj = pjj[ord] # sort them all + + # make array of pointers into position arrays + pos = np.append(0, 1 + np.where(np.not_equal(bind[1:], bind[:-1]))[0]) + pos = np.append(pos, len(bind)) + self.pos = pos + self.ids = bind[pos[:-1]] + self.nids = len(self.ids) + sizes = np.array( + [ + [ + pii[pos[i] : pos[i + 1]].min(), + pii[pos[i] : pos[i + 1]].max(), + pjj[pos[i] : pos[i + 1]].min(), + pjj[pos[i] : pos[i + 1]].max(), + ] + for i in range(self.nids) + ] + ) + self.pii = pii + self.pjj = pjj + self.offsets = sizes[:, 0:3:2].copy() + # WE now have two sets of positions of the subregions + # (pii-offsets[0],pjj-offsets[1]) in subregion and (pii,pjj) in + # images. pos is a pointer such that (pos[i]:pos[i+1]) + # are the indices in the position arrays of subregion i. + + self.sizes = 1 + (np.diff(sizes)[:, [0, 2]]).copy() # make sizes be for regions + centers = np.array(self.sizes.copy()) // 2 + self.centers = centers + if len(self.ids) == 1: + self.centers = self.centers[0, :] + + def __call__(self, img1, img2=None, normalization=None, check_res=False): + """Run the cross correlation on an image/curve or against two + images/curves + Parameters + ---------- + img1 : 1D or 2D np.ndarray + The image (or curve) to run the cross correlation on + img2 : 1D or 2D np.ndarray + If not set to None, run cross correlation of this image (or + curve) against img1. Default is None. + normalization : string or list of strings + normalization types. If not set, use internally saved + normalization parameters + Returns + ------- + ccorrs : 1d or 2d np.ndarray + An image of the correlation. The zero correlation is + located at shape//2 where shape is the 1 or 2-tuple + shape of the array + """ + progress_bar = self.progress_bar + if normalization is None: + normalization = self.normalization + + if img2 is None: + self_correlation = True + else: + self_correlation = False + + ccorrs = list() + + pos = self.pos + # loop over individual regions + if progress_bar: + R = tqdm(range(self.nids)) + else: + R = range(self.nids) + for reg in R: + # for reg in tqdm(range(self.nids)): #for py3.5 + ii = self.pii[pos[reg] : pos[reg + 1]] + jj = self.pjj[pos[reg] : pos[reg + 1]] + i = ii.copy() - self.offsets[reg, 0] + j = jj.copy() - self.offsets[reg, 1] + # set up size for fft with padding + shape = 2 * self.sizes[reg, :] - 1 + fshape = [next_fast_len(int(d)) for d in shape] + # fslice = tuple([slice(0, int(sz)) for sz in shape]) + + submask = np.zeros(self.sizes[reg, :]) + submask[i, j] = 1 + mma1 = np.fft.rfftn(submask, fshape) # for mask + # do correlation by ffts + maskcor = np.fft.irfftn(mma1 * mma1.conj(), fshape) # [fslice]) + # print(reg, maskcor) + # maskcor = _centered(np.fft.fftshift(maskcor), self.sizes[reg,:]) #make smaller?? + maskcor = _centered(maskcor, self.sizes[reg, :]) # make smaller?? + # choose some small value to threshold + maskcor *= maskcor > 0.5 + tmpimg = np.zeros(self.sizes[reg, :]) + tmpimg[i, j] = img1[ii, jj] + im1 = np.fft.rfftn(tmpimg, fshape) # image 1 + if self_correlation: + # ccorr = np.real(np.fft.ifftn(im1 * im1.conj(), fshape)[fslice]) + ccorr = np.fft.irfftn(im1 * im1.conj(), fshape) # [fslice]) + # ccorr = np.fft.fftshift(ccorr) + ccorr = _centered(ccorr, self.sizes[reg, :]) + else: + ndim = img1.ndim + tmpimg2 = np.zeros_like(tmpimg) + tmpimg2[i, j] = img2[ii, jj] + im2 = np.fft.rfftn(tmpimg2, fshape) # image 2 + ccorr = np.fft.irfftn(im1 * im2.conj(), fshape) # [fslice]) + # ccorr = _centered(np.fft.fftshift(ccorr), self.sizes[reg,:]) + ccorr = _centered(ccorr, self.sizes[reg, :]) + # print('here') + + ###check here + if check_res: + if reg == 0: + self.norm = maskcor + self.ck = ccorr.copy() + # print(ccorr.max()) + self.tmp = tmpimg + self.fs = fshape + ###end the check + + # now handle the normalizations + if "symavg" in normalization: + mim1 = np.fft.rfftn(tmpimg * submask, fshape) + Icorr = np.fft.irfftn(mim1 * mma1.conj(), fshape) # [fslice]) + # Icorr = _centered(np.fft.fftshift(Icorr), self.sizes[reg,:]) + Icorr = _centered(Icorr, self.sizes[reg, :]) + # do symmetric averaging + if self_correlation: + Icorr2 = np.fft.irfftn(mma1 * mim1.conj(), fshape) # [fslice]) + # Icorr2 = _centered(np.fft.fftshift(Icorr2), self.sizes[reg,:]) + Icorr2 = _centered(Icorr2, self.sizes[reg, :]) + else: + mim2 = np.fft.rfftn(tmpimg2 * submask, fshape) + Icorr2 = np.fft.irfftn(mma1 * mim2.conj(), fshape) + # Icorr2 = _centered(np.fft.fftshift(Icorr2), self.sizes[reg,:]) + Icorr2 = _centered(Icorr2, self.sizes[reg, :]) + # there is an extra condition that Icorr*Icorr2 != 0 + w = np.where(np.abs(Icorr * Icorr2) > 0) # DO WE NEED THIS (use i,j). + ccorr[w] *= maskcor[w] / Icorr[w] / Icorr2[w] + # print 'size:',tmpimg.shape,Icorr.shape + if check_res: + if reg == 0: + self.ckn = ccorr.copy() + if "regular" in normalization: + # only run on overlapping regions for correlation + w = np.where(maskcor > 0.5) + + if self_correlation: + ccorr[w] /= maskcor[w] * np.average(tmpimg[w]) ** 2 + else: + ccorr[w] /= maskcor[w] * np.average(tmpimg[w]) * np.average(tmpimg2[w]) + if check_res: + if reg == 0: + self.ckn = ccorr.copy() + # print('here') + # print( np.average(tmpimg[w]) ) + # print( maskcor[w] ) + # print( ccorr.max(), maskcor[w], np.average(tmpimg[w]), np.average(tmpimg2[w]) ) + ccorrs.append(ccorr) + + if len(ccorrs) == 1: + ccorrs = ccorrs[0] + + return ccorrs + + +def _centered(img, sz): + n = sz // 2 + # ind=np.r_[-n[0]:0,0:sz[0]-n[0]] + img = np.take(img, np.arange(-n[0], sz[0] - n[0]), 0, mode="wrap") + # ind=np.r_[-n[1]:0,0:sz[1]-n[1]] + img = np.take(img, np.arange(-n[1], sz[1] - n[1]), 1, mode="wrap") + return img + + +##define a custmoized fftconvolve + +######################################################################################## +# modifided version from signaltools.py in scipy (Mark March 2017) +# Author: Travis Oliphant +# 1999 -- 2002 + + +import threading +import warnings + +# from . import sigtools +import numpy as np +from numpy import ( + allclose, + angle, + arange, + argsort, + array, + asarray, + atleast_1d, + atleast_2d, + cast, + dot, + exp, + expand_dims, + iscomplexobj, + isscalar, + mean, + ndarray, + newaxis, + ones, + pi, + poly, + polyadd, + polyder, + polydiv, + polymul, + polysub, + polyval, + prod, + product, + r_, + ravel, + real_if_close, + reshape, + roots, + sort, + sum, + take, + transpose, + unique, + where, + zeros, + zeros_like, +) +from numpy.fft import irfftn, rfftn +from numpy.lib import NumpyVersion +from scipy import linalg +from scipy.fftpack import fft, fft2, fftfreq, fftn, ifft, ifft2, ifftn, ifftshift + +# from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext + +_rfft_mt_safe = NumpyVersion(np.__version__) >= "1.9.0.dev-e24486e" + +_rfft_lock = threading.Lock() + + +def fftconvolve_new(in1, in2, mode="full"): + """Convolve two N-dimensional arrays using FFT. + + Convolve `in1` and `in2` using the fast Fourier transform method, with + the output size determined by the `mode` argument. + + This is generally much faster than `convolve` for large arrays (n > ~500), + but can be slower when only a few output values are needed, and can only + output float arrays (int or object array inputs will be cast to float). + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`;from scipy.signal import fftconvolve + if sizes of `in1` and `in2` are not equal then `in1` has to be the + larger array.get_window + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + + Returns + ------- + out : array + An N-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + Examples + -------- + Autocorrelation of white noise is an impulse. (This is at least 100 times + as fast as `convolve`.) + + >>> from scipy import signal + >>> sig = np.random.randn(1000) + >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full') + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('White noise') + >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr) + >>> ax_mag.set_title('Autocorrelation') + >>> fig.tight_layout() + >>> fig.show() + + Gaussian blur implemented using FFT convolution. Notice the dark borders + around the image, due to the zero-padding beyond its boundaries. + The `convolve2d` function allows for other types of image boundaries, + but is far slower. + + >>> from scipy import misc + >>> lena = misc.lena() + >>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8)) + >>> blurred = signal.fftconvolve(lena, kernel, mode='same') + + >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(1, 3) + >>> ax_orig.imshow(lena, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_kernel.imshow(kernel, cmap='gray') + >>> ax_kernel.set_title('Gaussian kernel') + >>> ax_kernel.set_axis_off() + >>> ax_blurred.imshow(blurred, cmap='gray') + >>> ax_blurred.set_title('Blurred') + >>> ax_blurred.set_axis_off() + >>> fig.show() + + """ + in1 = asarray(in1) + in2 = asarray(in2) + + if in1.ndim == in2.ndim == 0: # scalar inputs + return in1 * in2 + elif not in1.ndim == in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + elif in1.size == 0 or in2.size == 0: # empty arrays + return array([]) + + s1 = array(in1.shape) + s2 = array(in2.shape) + complex_result = np.issubdtype(in1.dtype, np.complex) or np.issubdtype(in2.dtype, np.complex) + shape = s1 + s2 - 1 + + if mode == "valid": + _check_valid_mode_shapes(s1, s2) + + # Speed up FFT by padding to optimal size for FFTPACK + # expand by at least twice+1 + fshape = [_next_regular(int(d)) for d in shape] + fslice = tuple([slice(0, int(sz)) for sz in shape]) + # Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make + # sure we only call rfftn/irfftn from one thread at a time. + if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)): + try: + ret = irfftn(rfftn(in1, fshape) * rfftn(in2, fshape), fshape)[fslice].copy() + finally: + if not _rfft_mt_safe: + _rfft_lock.release() + else: + # If we're here, it's either because we need a complex result, or we + # failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and + # is already in use by another thread). In either case, use the + # (threadsafe but slower) SciPy complex-FFT routines instead. + ret = ifftn(fftn(in1, fshape) * fftn(in2, fshape))[fslice].copy() + if not complex_result: + ret = ret.real + + if mode == "full": + return ret + elif mode == "same": + return _centered(ret, s1) + elif mode == "valid": + return _centered(ret, s1 - s2 + 1) + else: + raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.") + + +def _cross_corr1(img1, img2=None): + """Compute the cross correlation of one (or two) images. + Parameters + ---------- + img1 : np.ndarray + the image or curve to cross correlate + img2 : 1d or 2d np.ndarray, optional + If set, cross correlate img1 against img2. A shift of img2 + to the right of img1 will lead to a shift of the point of + highest correlation to the right. + Default is set to None + """ + ndim = img1.ndim + + if img2 is None: + img2 = img1 + + if img1.shape != img2.shape: + errorstr = "Image shapes don't match. " + errorstr += "(img1 : {},{}; img2 : {},{})".format(*img1.shape, *img2.shape) + raise ValueError(errorstr) + + # need to reverse indices for second image + # fftconvolve(A,B) = FFT^(-1)(FFT(A)*FFT(B)) + # but need FFT^(-1)(FFT(A(x))*conj(FFT(B(x)))) = FFT^(-1)(A(x)*B(-x)) + reverse_index = tuple([slice(None, None, -1) for i in range(ndim)]) + imgc = fftconvolve(img1, img2[reverse_index], mode="same") + + return imgc + + +class CrossCorrelator1: + """ + Compute a 1D or 2D cross-correlation on data. + This uses a mask, which may be binary (array of 0's and 1's), + or a list of non-negative integer id's to compute cross-correlations + separately on. + The symmetric averaging scheme introduced here is inspired by a paper + from Schätzel, although the implementation is novel in that it + allows for the usage of arbitrary masks. [1]_ + Examples + -------- + >> ccorr = CrossCorrelator(mask.shape, mask=mask) + >> # correlated image + >> cimg = cc(img1) + or, mask may may be ids + >> cc = CrossCorrelator(ids) + #(where ids is same shape as img1) + >> cc1 = cc(img1) + >> cc12 = cc(img1, img2) + # if img2 shifts right of img1, point of maximum correlation is shifted + # right from correlation center + References + ---------- + .. [1] Schätzel, Klaus, Martin Drewel, and Sven Stimac. “Photon + correlation measurements at large lag times: improving + statistical accuracy.” Journal of Modern Optics 35.4 (1988): + 711-718. + """ + + # TODO : when mask is None, don't compute a mask, submasks + def __init__(self, shape, mask=None, normalization=None): + """ + Prepare the spatial correlator for various regions specified by the + id's in the image. + Parameters + ---------- + shape : 1 or 2-tuple + The shape of the incoming images or curves. May specify 1D or + 2D shapes by inputting a 1 or 2-tuple + mask : 1D or 2D np.ndarray of int, optional + Each non-zero integer represents unique bin. Zero integers are + assumed to be ignored regions. If None, creates a mask with + all points set to 1 + normalization: string or list of strings, optional + These specify the normalization and may be any of the + following: + 'regular' : divide by pixel number + 'symavg' : use symmetric averaging + Defaults to ['regular'] normalization + Delete argument wrap as not used. See fftconvolve as this + expands arrays to get complete convolution, IE no need + to expand images of subregions. + """ + if normalization is None: + normalization = ["regular"] + elif not isinstance(normalization, list): + normalization = list([normalization]) + self.normalization = normalization + + if mask is None: # we can do this easily now. + mask = np.ones(shape) + + # initialize subregions information for the correlation + # first find indices of subregions and sort them by subregion id + pii, pjj = np.where(mask) + bind = mask[pii, pjj] + ord = np.argsort(bind) + bind = bind[ord] + pii = pii[ord] + pjj = pjj[ord] # sort them all + + # make array of pointers into position arrays + pos = np.append(0, 1 + np.where(np.not_equal(bind[1:], bind[:-1]))[0]) + pos = np.append(pos, len(bind)) + self.pos = pos + self.ids = bind[pos[:-1]] + self.nids = len(self.ids) + sizes = np.array( + [ + [ + pii[pos[i] : pos[i + 1]].min(), + pii[pos[i] : pos[i + 1]].max(), + pjj[pos[i] : pos[i + 1]].min(), + pjj[pos[i] : pos[i + 1]].max(), + ] + for i in range(self.nids) + ] + ) + # make indices for subregions arrays and their sizes + pi = pii.copy() + pj = pjj.copy() + for i in range(self.nids): + pi[pos[i] : pos[i + 1]] -= sizes[i, 0] + pj[pos[i] : pos[i + 1]] -= sizes[i, 2] + self.pi = pi + self.pj = pj + self.pii = pii + self.pjj = pjj + sizes = 1 + (np.diff(sizes)[:, [0, 2]]) # make sizes be for regions + self.sizes = sizes.copy() # the shapes of each correlation + # WE now have two sets of positions of the subregions (pi,pj) in subregion + # and (pii,pjj) in images. pos is a pointer such that (pos[i]:pos[i+1]) + # is the indices in the position arrays of subregion i. + + # Making a list of arrays holding the masks for each id. Ideally, mask + # is binary so this is one element to quickly index original images + self.submasks = list() + self.centers = list() + # the positions of each axes of each correlation + self.positions = list() + self.maskcorrs = list() + # regions where the correlations are not zero + self.pxlst_maskcorrs = list() + + # basically saving bunch of mask related stuff like indexing etc, just + # to save some time when actually computing the cross correlations + for id in range(self.nids): + submask = np.zeros(self.sizes[id, :]) + submask[pi[pos[id] : pos[id + 1]], pj[pos[id] : pos[id + 1]]] = 1 + self.submasks.append(submask) + + maskcorr = _cross_corr1(submask) + # quick fix for #if self.wrap is False: + # submask = _expand_image1(submask)finite numbers should be integer so + # choose some small value to threshold + maskcorr *= maskcorr > 0.5 + self.maskcorrs.append(maskcorr) + self.pxlst_maskcorrs.append(maskcorr > 0) + # centers are shape//2 as performed by fftshift + center = np.array(maskcorr.shape) // 2 + self.centers.append(np.array(maskcorr.shape) // 2) + if mask.ndim == 1: + self.positions.append(np.arange(maskcorr.shape[0]) - center[0]) + elif mask.ndim == 2: + self.positions.append( + [np.arange(maskcorr.shape[0]) - center[0], np.arange(maskcorr.shape[1]) - center[1]] + ) + + if len(self.ids) == 1: + self.positions = self.positions[0] + self.centers = self.centers[0] + + def __call__(self, img1, img2=None, normalization=None, desc="cc"): + """Run the cross correlation on an image/curve or against two + images/curves + Parameters + ---------- + img1 : 1D or 2D np.ndarray + The image (or curve) to run the cross correlation on + img2 : 1D or 2D np.ndarray + If not set to None, run cross correlation of this image (or + curve) against img1. Default is None. + normalization : string or list of strings + normalization types. If not set, use internally saved + normalization parameters + Returns + ------- + ccorrs : 1d or 2d np.ndarray + An image of the correlation. The zero correlation is + located at shape//2 where shape is the 1 or 2-tuple + shape of the array + """ + if normalization is None: + normalization = self.normalization + + if img2 is None: + self_correlation = True + # img2 = img1 + else: + self_correlation = False + + ccorrs = list() + rngiter = tqdm(range(self.nids), desc=desc) + + pos = self.pos + for reg in rngiter: + i = self.pi[pos[reg] : pos[reg + 1]] + j = self.pj[pos[reg] : pos[reg + 1]] + ii = self.pii[pos[reg] : pos[reg + 1]] + jj = self.pjj[pos[reg] : pos[reg + 1]] + tmpimg = np.zeros(self.sizes[reg, :]) + tmpimg[i, j] = img1[ii, jj] + if not self_correlation: + tmpimg2 = np.zeros_like(tmpimg) + tmpimg2[i, j] = img2[ii, jj] + + if self_correlation: + ccorr = _cross_corr1(tmpimg) + else: + ccorr = _cross_corr1(tmpimg, tmpimg2) + # now handle the normalizations + if "symavg" in normalization: + # do symmetric averaging + Icorr = _cross_corr1(tmpimg * self.submasks[reg], self.submasks[reg]) + if self_correlation: + Icorr2 = _cross_corr1(self.submasks[reg], tmpimg * self.submasks[reg]) + else: + Icorr2 = _cross_corr1(self.submasks[reg], tmpimg2 * self.submasks[reg]) + # there is an extra condition that Icorr*Icorr2 != 0 + w = np.where(np.abs(Icorr * Icorr2) > 0) # DO WE NEED THIS (use i,j). + ccorr[w] *= self.maskcorrs[reg][w] / Icorr[w] / Icorr2[w] + + if "regular" in normalization: + # only run on overlapping regions for correlation + w = self.pxlst_maskcorrs[reg] # NEED THIS? + + if self_correlation: + ccorr[w] /= self.maskcorrs[reg][w] * np.average(tmpimg[w]) ** 2 + else: + ccorr[w] /= self.maskcorrs[reg][w] * np.average(tmpimg[w]) * np.average(tmpimg2[w]) + ccorrs.append(ccorr) + + if len(ccorrs) == 1: + ccorrs = ccorrs[0] + + return ccorrs + + +##for parallel +from multiprocessing import Pool + +import dill + +from pyCHX.chx_compress import apply_async, map_async + + +def run_para_ccorr_sym(ccorr_sym, FD, nstart=0, nend=None, imgsum=None, img_norm=None): + """ + example: + ccorr_sym = CrossCorrelator2(roi_mask.shape, mask=roi_mask, normalization='symavg') + img_norm = get_img_from_iq( qp_saxs, iq_saxs, roi_mask.shape, center) + + """ + + if nend is None: + nend = FD.end - 1 + if nend > FD.end - 1: + nend = FD.end - 1 + N = nend - nstart + if imgsum is None: + imgsum = np.ones(N) + if img_norm is None: + img_norm = 1.0 + inputs = range(N) + pool = Pool(processes=len(inputs)) + print("Starting assign the tasks...") + results = {} + for i in tqdm(range(nstart, nend)): + # img1 = FD.rdframe(i) + # img2 = FD.rdframe(i+1) + results[i] = apply_async( + pool, + ccorr_sym, + (FD.rdframe(i) / (imgsum[i] * img_norm), FD.rdframe(1 + i) / (imgsum[i + 1] * img_norm)), + ) + pool.close() + print("Starting running the tasks...") + res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] + + for i in inputs: + if i == 0: + cc = res[i] + Nc = len(cc) + else: + cci = res[i] + for j in range(Nc): + cc[j] += cci[j] + + for i in range(Nc): + cc[i] = cc[i] / N + + del results + del res + + return cc diff --git a/pyCHX/backups/pyCHX-backup/chx_generic_functions.py b/pyCHX/backups/pyCHX-backup/chx_generic_functions.py new file mode 100644 index 0000000..97f4b05 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_generic_functions.py @@ -0,0 +1,6446 @@ +import copy +from datetime import datetime +from os import listdir +from shutil import copyfile + +import matplotlib.cm as mcm +import numpy as np +import PIL +import pytz +import scipy +from matplotlib import cm +from modest_image import imshow +from scipy.special import erf +from skbeam.core.utils import angle_grid, radial_grid, radius_to_twotheta, twotheta_to_q +from skimage.draw import disk, ellipse, line, line_aa, polygon +from skimage.filters import prewitt + +# from tqdm import * +from pyCHX.chx_libs import * +from pyCHX.chx_libs import colors, markers + +markers = [ + "o", + "D", + "v", + "^", + "<", + ">", + "p", + "s", + "H", + "h", + "*", + "d", + "8", + "1", + "3", + "2", + "4", + "+", + "x", + "_", + "|", + ",", + "1", +] +markers = np.array(markers * 100) + + +flatten_nestlist = lambda l: [item for sublist in l for item in sublist] +"""a function to flatten a nest list +e.g., flatten( [ ['sg','tt'],'ll' ] ) +gives ['sg', 'tt', 'l', 'l'] +""" + + +def get_frames_from_dscan(uid, detector="eiger4m_single_image"): + """Get frames from a dscan by giving uid and detector""" + hdr = db[uid] + return db.get_images(hdr, detector) + + +def get_roi_intensity(img, roi_mask): + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + avgs = np.zeros(noqs) + for i in tqdm(range(1, 1 + noqs)): + avgs[i - 1] = np.average(img[roi_mask == i]) + return avgs + + +def generate_h5_list(inDir, filename): + """YG DEV at 9/19/2019@CHX generate a lst file containing all h5 fiels in inDir + Input: + inDir: the input direction + filename: the filename for output (have to lst as extension) + Output: + Save the all h5 filenames in a lst file + """ + fp_list = listdir(inDir) + if filename[-4:] != ".lst": + filename += ".lst" + for FP in fp_list: + FP_ = inDir + FP + if os.path.isdir(FP_): + fp = listdir(FP_) + for fp_ in fp: + if ".h5" in fp_: + append_txtfile(filename=filename, data=np.array([FP_ + "/" + fp_])) + print("The full path of all the .h5 in %s has been saved in %s." % (inDir, filename)) + print("You can use ./analysis/run_gui to visualize all the h5 file.") + + +def fit_one_peak_curve(x, y, fit_range=None): + """YG Dev@Aug 10, 2019 fit a curve with a single Lorentzian shape + Parameters: + x: one-d array, x-axis data + y: one-d array, y-axis data + fit_range: [x1, x2], a list of index, to define the x-range for fit + Return: + center: float, center of the peak + center_std: float, error bar of center in the fitting + fwhm: float, full width at half max intensity of the peak, 2*sigma + fwhm_std:float, error bar of the full width at half max intensity of the peak + xf: the x in the fit + out: the fitting class resutled from lmfit + + """ + from lmfit.models import LinearModel, LorentzianModel + + peak = LorentzianModel() + background = LinearModel() + model = peak + background + if fit_range != None: + x1, x2 = fit_range + xf = x[x1:x2] + yf = y[x1:x2] + else: + xf = x + yf = y + model.set_param_hint("slope", value=5) + model.set_param_hint("intercept", value=0) + model.set_param_hint("center", value=0.005) + model.set_param_hint("amplitude", value=0.1) + model.set_param_hint("sigma", value=0.003) + # out=model.fit(yf, x=xf)#, method='nelder') + out = model.fit(yf, x=xf, method="leastsq") + cen = out.params["center"].value + cen_std = out.params["center"].stderr + wid = out.params["sigma"].value * 2 + wid_std = out.params["sigma"].stderr * 2 + return cen, cen_std, wid, wid_std, xf, out + + +def plot_xy_with_fit( + x, + y, + xf, + out, + cen, + cen_std, + wid, + wid_std, + xlim=[1e-3, 0.01], + xlabel="q (" r"$\AA^{-1}$)", + ylabel="I(q)", + filename=None, +): + """YG Dev@Aug 10, 2019 to plot x,y with fit, + currently this code is dedicated to plot q-Iq with fit and show the fittign parameter, peak pos, peak wid""" + + yf2 = out.model.eval(params=out.params, x=xf) + fig, ax = plt.subplots() + plot1D(x=x, y=y, ax=ax, m="o", ls="", c="k", legend="data") + plot1D(x=xf, y=yf2, ax=ax, m="", ls="-", c="r", legend="fit", logy=True) + ax.set_xlim(xlim) + # ax.set_ylim( 0.1, 4) + # ax.set_title(uid+'--t=%.2f'%tt) + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + txts = r"peak" + r" = %.5f +/- %.5f " % (cen, cen_std) + ax.text(x=0.02, y=0.2, s=txts, fontsize=14, transform=ax.transAxes) + txts = r"wid" + r" = %.4f +/- %.4f" % (wid, wid_std) + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=0.02, y=0.1, s=txts, fontsize=14, transform=ax.transAxes) + plt.tight_layout() + if filename != None: + plt.savefig(filename) + return ax + + +def get_touched_qwidth(qcenters): + """YG Dev@CHX April 2019, get touched qwidth by giving qcenters""" + qwX = np.zeros_like(qcenters) + qW = qcenters[1:] - qcenters[:-1] + qwX[0] = qW[0] + for i in range(1, len(qcenters) - 1): + # print(i) + qwX[i] = min(qW[i - 1], qW[i]) + qwX[-1] = qW[-1] + qwX *= 0.9999 + return qwX + + +def append_txtfile(filename, data, fmt="%s", *argv, **kwargs): + """YG. Dev May 10, 2109 append data to a file + Create an empty file if the file dose not exist, otherwise, will append the data to it + Input: + fp: filename + data: the data to be append + fmt: the parameter defined in np.savetxt + + """ + from numpy import savetxt + + exists = os.path.isfile(filename) + if not exists: + np.savetxt( + filename, + [], + fmt="%s", + ) + print("create new file") + + f = open(filename, "a") + savetxt(f, data, fmt=fmt, *argv, **kwargs) + f.close() + + +def get_roi_mask_qval_qwid_by_shift( + new_cen, new_mask, old_cen, old_roi_mask, setup_pargs, geometry, limit_qnum=None +): + """YG Dev April 22, 2019 Get roi_mask, qval_dict, qwid_dict by shift the pre-defined big roi_mask""" + center = setup_pargs["center"] + roi_mask1 = shift_mask( + new_cen=center, new_mask=new_mask, old_cen=old_cen, old_roi_mask=old_roi_mask, limit_qnum=limit_qnum + ) + qval_dict_, qwid_dict_ = get_masked_qval_qwid_dict_using_Rmax( + new_mask=new_mask, setup_pargs=setup_pargs, old_roi_mask=old_roi_mask, old_cen=old_cen, geometry=geometry + ) + w, w1 = get_zero_nozero_qind_from_roi_mask(roi_mask1, new_mask) + # print(w,w1) + qval_dictx = {k: v for (k, v) in list(qval_dict_.items()) if k in w1} + qwid_dictx = {k: v for (k, v) in list(qwid_dict_.items()) if k in w1} + qval_dict = {} + qwid_dict = {} + for i, k in enumerate(list(qval_dictx.keys())): + qval_dict[i] = qval_dictx[k] + qwid_dict[i] = qwid_dictx[k] + return roi_mask1, qval_dict, qwid_dict + + +def get_zero_nozero_qind_from_roi_mask(roi_mask, mask): + """YG Dev April 22, 2019 Get unique qind of roi_mask with zero and non-zero pixel number""" + qind, pixelist = roi.extract_label_indices(roi_mask * mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + w = np.where(nopr == 0)[0] + w1 = np.where(nopr != 0)[0] + return w, w1 + + +def get_masked_qval_qwid_dict_using_Rmax(new_mask, setup_pargs, old_roi_mask, old_cen, geometry): + """YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask using a Rmax method""" + cy, cx = setup_pargs["center"] + my, mx = new_mask.shape + Rmax = int( + np.ceil(max(np.hypot(cx, cy), np.hypot(cx - mx, cy - my), np.hypot(cx, cy - my), np.hypot(cx - mx, cy))) + ) + Fmask = np.zeros([Rmax * 2, Rmax * 2], dtype=int) + Fmask[Rmax - cy : Rmax - cy + my, Rmax - cx : Rmax - cx + mx] = new_mask + roi_mask1 = shift_mask( + new_cen=[Rmax, Rmax], + new_mask=np.ones_like(Fmask), + old_cen=old_cen, + old_roi_mask=old_roi_mask, + limit_qnum=None, + ) + setup_pargs_ = { + "center": [Rmax, Rmax], + "dpix": setup_pargs["dpix"], + "Ldet": setup_pargs["Ldet"], + "lambda_": setup_pargs["lambda_"], + } + qval_dict1, qwid_dict1 = get_masked_qval_qwid_dict(roi_mask1, Fmask, setup_pargs_, geometry) + # w = get_zero_qind_from_roi_mask(roi_mask1,Fmask) + return qval_dict1, qwid_dict1 # ,w + + +def get_masked_qval_qwid_dict(roi_mask, mask, setup_pargs, geometry): + """YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask""" + + qval_dict_, qwid_dict_ = get_qval_qwid_dict(roi_mask, setup_pargs, geometry=geometry) + w, w1 = get_zero_nozero_qind_from_roi_mask(roi_mask, mask) + qval_dictx = {k: v for (k, v) in list(qval_dict_.items()) if k not in w} + qwid_dictx = {k: v for (k, v) in list(qwid_dict_.items()) if k not in w} + qval_dict = {} + qwid_dict = {} + for i, k in enumerate(list(qval_dictx.keys())): + qval_dict[i] = qval_dictx[k] + qwid_dict[i] = qwid_dictx[k] + return qval_dict, qwid_dict + + +def get_qval_qwid_dict(roi_mask, setup_pargs, geometry="saxs"): + """YG Dev April 6, 2019 + Get qval_dict and qwid_dict by giving roi_mask, setup_pargs + Input: + roi_mask: integer type 2D array + setup_pargs: dict, should at least contains, center (direct beam center), dpix (in mm), + lamda_: in A-1, Ldet: in mm + e.g., + {'Ldet': 1495.0, abs #essential + 'center': [-4469, 363], #essential + 'dpix': 0.075000003562308848, #essential + 'exposuretime': 0.99999702, + 'lambda_': 0.9686265, #essential + 'path': '/XF11ID/analysis/2018_1/jianheng/Results/b85dad/', + 'timeperframe': 1.0, + 'uid': 'uid=b85dad'} + geometry: support saxs for isotropic transmission SAXS + ang_saxs for anisotropic transmission SAXS + flow_saxs for anisotropic transmission SAXS under flow (center symetric) + + Return: + qval_dict: dict, key as q-number, val: q val + qwid_dict: dict, key as q-number, val: q width (qmax - qmin) + + TODOLIST: to make GiSAXS work + + """ + + origin = setup_pargs["center"] # [::-1] + shape = roi_mask.shape + qp_map = radial_grid(origin, shape) + phi_map = np.degrees(angle_grid(origin, shape)) + two_theta = radius_to_twotheta(setup_pargs["Ldet"], setup_pargs["dpix"] * qp_map) + q_map = utils.twotheta_to_q(two_theta, setup_pargs["lambda_"]) + qind, pixelist = roi.extract_label_indices(roi_mask) + Qval = np.unique(qind) + qval_dict_ = {} + qwid_dict_ = {} + for j, i in enumerate(Qval): + qval = q_map[roi_mask == i] + # print( qval ) + if geometry == "saxs": + qval_dict_[j] = [(qval.max() + qval.min()) / 2] # np.mean(qval) + qwid_dict_[j] = [(qval.max() - qval.min())] + + elif geometry == "ang_saxs": + aval = phi_map[roi_mask == i] + # print(j,i,qval, aval) + qval_dict_[j] = np.zeros(2) + qwid_dict_[j] = np.zeros(2) + + qval_dict_[j][0] = (qval.max() + qval.min()) / 2 # np.mean(qval) + qwid_dict_[j][0] = qval.max() - qval.min() + + if ((aval.max() * aval.min()) < 0) & (aval.max() > 90): + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 - 180 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min() - 360) + # print('here -- %s'%j) + else: + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min()) + + elif geometry == "flow_saxs": + sx, sy = roi_mask.shape + cx, cy = origin + aval = (phi_map[cx:])[roi_mask[cx:] == i] + if len(aval) == 0: + aval = (phi_map[:cx])[roi_mask[:cx] == i] + 180 + + qval_dict_[j] = np.zeros(2) + qwid_dict_[j] = np.zeros(2) + qval_dict_[j][0] = (qval.max() + qval.min()) / 2 # np.mean(qval) + qwid_dict_[j][0] = qval.max() - qval.min() + # print(aval) + if ((aval.max() * aval.min()) < 0) & (aval.max() > 90): + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 - 180 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min() - 360) + # print('here -- %s'%j) + else: + qval_dict_[j][1] = (aval.max() + aval.min()) / 2 # np.mean(qval) + qwid_dict_[j][1] = abs(aval.max() - aval.min()) + + return qval_dict_, qwid_dict_ + + +def get_SG_norm(FD, pixelist, bins=1, mask=None, window_size=11, order=5): + """Get normalization of a time series by SavitzkyGolay filter + Input: + FD: file handler for a compressed data + pixelist: pixel list for a roi_mask + bins: the bin number for the time series, if number = total number of the time frame, + it means SG of the time averaged image + mask: the additional mask + window_size, order, for the control of SG filter, see chx_generic_functions.py/sgolay2d for details + Return: + norm: shape as ( length of FD, length of pixelist ) + """ + if mask == None: + mask = 1 + beg = FD.beg + end = FD.end + N = end - beg + BEG = beg + if bins == 1: + END = end + NB = N + MOD = 0 + else: + END = N // bins + MOD = N % bins + NB = END + norm = np.zeros([end, len(pixelist)]) + for i in tqdm(range(NB)): + if bins == 1: + img = FD.rdframe(i + BEG) + else: + for j in range(bins): + ct = i * bins + j + BEG + # print(ct) + if j == 0: + img = FD.rdframe(ct) + n = 1.0 + else: + (p, v) = FD.rdrawframe(ct) + np.ravel(img)[p] += v + # img += FD.rdframe( ct ) + n += 1 + img /= n + avg_imgf = sgolay2d(img, window_size=window_size, order=order) * mask + normi = np.ravel(avg_imgf)[pixelist] + if bins == 1: + norm[i + beg] = normi + else: + norm[i * bins + beg : (i + 1) * bins + beg] = normi + if MOD: + for j in range(MOD): + ct = (1 + i) * bins + j + BEG + if j == 0: + img = FD.rdframe(ct) + n = 1.0 + else: + (p, v) = FD.rdrawframe(ct) + np.ravel(img)[p] += v + n += 1 + img /= n + # print(ct,n) + img = FD.rdframe(ct) + avg_imgf = sgolay2d(img, window_size=window_size, order=order) * mask + normi = np.ravel(avg_imgf)[pixelist] + norm[(i + 1) * bins + beg : (i + 2) * bins + beg] = normi + return norm + + +def shift_mask(new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None): + """Y.G. Dev April 2019@CHX to make a new roi_mask by shift and crop the old roi_mask, which is much bigger than the new mask + Input: + new_cen: [x,y] in uint of pixel + new_mask: provide the shape of the new roi_mask and also multiply this mask to the shifted mask + old_cen: [x,y] in uint of pixel + old_roi_mask: the roi_mask to be shifted + limit_qnum: integer, if not None, defines the max number of unique values of nroi_mask + + Output: + the shifted/croped roi_mask + """ + nsx, nsy = new_mask.shape + down, up, left, right = new_cen[0], nsx - new_cen[0], new_cen[1], nsy - new_cen[1] + x1, x2, y1, y2 = [old_cen[0] - down, old_cen[0] + up, old_cen[1] - left, old_cen[1] + right] + nroi_mask_ = old_roi_mask[x1:x2, y1:y2] * new_mask + nroi_mask = np.zeros_like(nroi_mask_) + qind, pixelist = roi.extract_label_indices(nroi_mask_) + qu = np.unique(qind) + # noqs = len( qu ) + # nopr = np.bincount(qind, minlength=(noqs+1))[1:] + # qm = nopr>0 + for j, qv in enumerate(qu): + nroi_mask[nroi_mask_ == qv] = j + 1 + if limit_qnum != None: + nroi_mask[nroi_mask > limit_qnum] = 0 + return nroi_mask + + +def plot_q_g2fitpara_general( + g2_dict, + g2_fitpara, + geometry="saxs", + ylim=None, + plot_all_range=True, + plot_index_range=None, + show_text=True, + return_fig=False, + show_fit=True, + ylabel="g2", + qth_interest=None, + max_plotnum_fig=1600, + qphi_analysis=False, + *argv, + **kwargs, +): + """ + Mar 29,2019, Y.G.@CHX + + plot q~fit parameters + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + """ + + if "uid" in kwargs.keys(): + uid_ = kwargs["uid"] + else: + uid_ = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + data_dir = path + if ylabel == "g2": + ylabel = "g_2" + if ylabel == "g4": + ylabel = "g_4" + + if geometry == "saxs": + if qphi_analysis: + geometry = "ang_saxs" + + qval_dict_, fit_res_ = g2_dict, g2_fitpara + + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + fps = [] + + # print(qr_label, qz_label, short_ulabel, long_ulabel) + # $print( num_short, num_long ) + beta, relaxation_rate, baseline, alpha = ( + g2_fitpara["beta"], + g2_fitpara["relaxation_rate"], + g2_fitpara["baseline"], + g2_fitpara["alpha"], + ) + + fps = [] + for s_ind in range(num_short): + ind_long_i = ind_long[s_ind] + num_long_i = len(ind_long_i) + betai, relaxation_ratei, baselinei, alphai = ( + beta[ind_long_i], + relaxation_rate[ind_long_i], + baseline[ind_long_i], + alpha[ind_long_i], + ) + qi = long_ulabel + # print(s_ind, qi, np.array( betai) ) + + if RUN_GUI: + fig = Figure(figsize=(10, 12)) + else: + # fig = plt.figure( ) + if num_long_i <= 4: + if master_plot != "qz": + fig = plt.figure(figsize=(8, 6)) + else: + if num_short > 1: + fig = plt.figure(figsize=(8, 4)) + else: + fig = plt.figure(figsize=(10, 6)) + # print('Here') + elif num_long_i > max_plotnum_fig: + num_fig = int(np.ceil(num_long_i / max_plotnum_fig)) # num_long_i //16 + fig = [plt.figure(figsize=figsize) for i in range(num_fig)] + # print( figsize ) + else: + # print('Here') + if master_plot != "qz": + fig = plt.figure(figsize=figsize) + else: + fig = plt.figure(figsize=(10, 10)) + + if master_plot == "qz": + if geometry == "ang_saxs": + title_short = "Angle= %.2f" % (short_ulabel[s_ind]) + r"$^\circ$" + elif geometry == "gi_saxs": + title_short = r"$Q_z= $" + "%.4f" % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + else: + title_short = "" + else: # qr + if geometry == "ang_saxs" or geometry == "gi_saxs": + title_short = r"$Q_r= $" + "%.5f " % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + else: + title_short = "" + # print(geometry) + # filename ='' + til = "%s:--->%s" % (uid_, title_short) + if num_long_i <= 4: + plt.title(til, fontsize=14, y=1.15) + else: + plt.title(til, fontsize=20, y=1.06) + # print( num_long ) + if num_long != 1: + # print( 'here') + plt.axis("off") + # sy = min(num_long_i,4) + sy = min(num_long_i, int(np.ceil(min(max_plotnum_fig, num_long_i) / 4))) + + else: + sy = 1 + sx = min(4, int(np.ceil(min(max_plotnum_fig, num_long_i) / float(sy)))) + temp = sy + sy = sx + sx = temp + if sx == 1: + if sy == 1: + plt.axis("on") + ax1 = fig.add_subplot(4, 1, 1) + ax2 = fig.add_subplot(4, 1, 2) + ax3 = fig.add_subplot(4, 1, 3) + ax4 = fig.add_subplot(4, 1, 4) + plot1D(x=qi, y=betai, m="o", ls="--", c="k", ax=ax1, legend=r"$\beta$", title="") + plot1D(x=qi, y=alphai, m="o", ls="--", c="r", ax=ax2, legend=r"$\alpha$", title="") + plot1D(x=qi, y=baselinei, m="o", ls="--", c="g", ax=ax3, legend=r"$baseline$", title="") + plot1D(x=qi, y=relaxation_ratei, m="o", c="b", ls="--", ax=ax4, legend=r"$\gamma$ $(s^{-1})$", title="") + + ax4.set_ylabel(r"$\gamma$ $(s^{-1})$") + ax4.set_xlabel(r"$q $ $(\AA)$", fontsize=16) + ax3.set_ylabel(r"$baseline") + ax2.set_ylabel(r"$\alpha$") + ax1.set_ylabel(r"$\beta$") + fig.tight_layout() + fp = data_dir + uid_ + "g2_q_fit_para_%s.png" % short_ulabel[s_ind] + fig.savefig(fp, dpi=fig.dpi) + fps.append(fp) + outputfile = data_dir + "%s_g2_q_fitpara_plot" % uid_ + ".png" + # print(uid) + combine_images(fps, outputfile, outsize=[2000, 2400]) + + +def plot_q_rate_general( + qval_dict, + rate, + geometry="saxs", + ylim=None, + logq=True, + lograte=True, + plot_all_range=True, + plot_index_range=None, + show_text=True, + return_fig=False, + show_fit=True, + *argv, + **kwargs, +): + """ + Mar 29,2019, Y.G.@CHX + + plot q~rate in log-log scale + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + """ + + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + fig, ax = plt.subplots() + plt.title(r"$Q$" "-Rate-%s" % (uid), fontsize=20, y=1.06) + Nqz = num_short + if Nqz != 1: + ls = "--" + else: + ls = "" + # print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[i] + y = np.array(rate)[ind_long_i] + x = long_label[ind_long_i] + # print(i, x, y, D0 ) + if Nqz != 1: + label = r"$q_z=%.5f$" % short_ulabel[i] + else: + label = "" + ax.loglog(x, y, marker="o", ls=ls, label=label) + if Nqz != 1: + legend = ax.legend(loc="best") + + if plot_index_range != None: + d1, d2 = plot_index_range + d2 = min(len(x) - 1, d2) + ax.set_xlim((x**power)[d1], (x**power)[d2]) + ax.set_ylim(y[d1], y[d2]) + + if ylim != None: + ax.set_ylim(ylim) + + ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$) (log)") + ax.set_xlabel("$q$" r"($\AA$) (log)") + fp = path + "%s_Q_Rate_loglog" % (uid) + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + if return_fig: + return fig, ax + + +def plot_xy_x2( + x, + y, + x2=None, + pargs=None, + loglog=False, + logy=True, + fig_ax=None, + xlabel="q (" r"$\AA^{-1}$)", + xlabel2="q (pixel)", + title="_q_Iq", + ylabel="I(q)", + save=True, + *argv, + **kwargs, +): + """YG.@CHX 2019/10/ Plot x, y, x2, if have, will plot as twiny( same y, different x) + This funciton is primary for plot q-Iq + + Input: + x: one-d array, x in one unit + y: one-d array, + x2:one-d array, x in anoter unit + pargs: dict, could include 'uid', 'path' + loglog: if True, if plot x and y in log, by default plot in y-log + save: if True, save the plot in the path defined in pargs + kwargs: could include xlim (in unit of index), ylim (in unit of real value) + + """ + if fig_ax == None: + fig, ax1 = plt.subplots() + else: + fig, ax1 = fig_ax + if pargs != None: + uid = pargs["uid"] + path = pargs["path"] + else: + uid = "XXX" + path = "" + if loglog: + ax1.loglog(x, y, "-o") + elif logy: + ax1.semilogy(x, y, "-o") + else: + ax1.plot(x, y, "-o") + ax1.set_xlabel(xlabel) + ax1.set_ylabel(ylabel) + title = ax1.set_title("%s--" % uid + title) + Nx = len(x) + if "xlim" in kwargs.keys(): + xlim = kwargs["xlim"] + if xlim[1] > Nx: + xlim[1] = Nx - 1 + else: + xlim = [0, Nx] + if "ylim" in kwargs.keys(): + ylim = kwargs["ylim"] + else: + ylim = [y.min(), y.max()] + lx1, lx2 = xlim + ax1.set_xlim([x[lx1], x[lx2]]) + ax1.set_ylim(ylim) + if x2 != None: + ax2 = ax1.twiny() + ax2.set_xlabel(xlabel2) + ax2.set_ylabel(ylabel) + ax2.set_xlim([x2[lx1], x2[lx2]]) + title.set_y(1.1) + fig.subplots_adjust(top=0.85) + if save: + path = pargs["path"] + fp = path + "%s_q_Iq" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + + +def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1, threshold=0): + """save oavs as png""" + tifs = list(db[uid].data("OAV_image"))[0] + try: + pixel_scalebar = np.ceil(scalebar_size / md["OAV resolution um_pixel"]) + except: + pixel_scalebar = None + print("No OAVS resolution is available.") + + text_string = "%s $\mu$m" % scalebar_size + h = db[uid] + oavs = tifs + + # 12/03/2023: have a problem with OAV not being detector [0]...just try and go throught the list + detectors = sorted(get_detectors(h)) + for d in range(len(detectors)): + try: + oav_period = h["descriptors"][d]["configuration"]["OAV"]["data"]["OAV_cam_acquire_period"] + oav_expt = h["descriptors"][d]["configuration"]["OAV"]["data"]["OAV_cam_acquire_time"] + except: + pass + oav_times = [] + for i in range(len(oavs)): + oav_times.append(oav_expt + i * oav_period) + fig = plt.subplots(int(np.ceil(len(oavs) / 3)), 3, figsize=(3 * 5.08, int(np.ceil(len(oavs) / 3)) * 4)) + for m in range(len(oavs)): + plt.subplot(int(np.ceil(len(oavs) / 3)), 3, m + 1) + # plt.subplots(figsize=(5.2,4)) + img = oavs[m] + try: + ind = np.flipud(img * scale)[:, :, 2] < threshold + except: + ind = np.flipud(img * scale) < threshold + rgb_cont_img = np.copy(np.flipud(img)) + # rgb_cont_img[ind,0]=1000 + if brightness_scale != 1: + rgb_cont_img = scale_rgb(rgb_cont_img, scale=brightness_scale) + + plt.imshow(rgb_cont_img, interpolation="none", resample=True, cmap="gray") + plt.axis("equal") + cross = [685, 440, 50] # definintion of direct beam: x, y, size + plt.plot([cross[0] - cross[2] / 2, cross[0] + cross[2] / 2], [cross[1], cross[1]], "r-") + plt.plot([cross[0], cross[0]], [cross[1] - cross[2] / 2, cross[1] + cross[2] / 2], "r-") + if pixel_scalebar != None: + plt.plot([1100, 1100 + pixel_scalebar], [150, 150], "r-", Linewidth=5) # scale bar. + plt.text(1000, 50, text_string, fontsize=14, color="r") + plt.text(600, 50, str(oav_times[m])[:5] + " [s]", fontsize=14, color="r") + plt.axis("off") + plt.savefig(data_dir + "uid=%s_OVA_images.png" % uid) + +def save_oavs_tifs_v2(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1, threshold=0,cross=[685, 440, 50]): + """ + save OAV images collected for a uid as an 'aggregate' image that can be attached to Olog (attaching is not part of this function) + Adds time stamps for series of OAV images + uid: uid + data_dir: directory for saving aggregate image + brightness_scale: scale brightness of images (default: 1, i.e. no scaling) + scalebar_size: [pixel] if sufficient information available in md, add scalebar to images; (default: 100) + cross: [xpos,ypos,width] (xpos,ypos): pixel coordinate of X-ray beam, width: width of cross on image [pixel] ;default: [685, 440, 50] -> optical axis for 12x zoom, 50 pixel wide cross + scale/threshold: manipulation of image intensity, enhancement of areas (currently not implemented) + 01/28/2025 by LW + """ + h=db[uid].v2.start + detectors=h['detectors'] + for d in detectors: + if 'oav' in d or 'OAV' in d: + oav_det=d + oav_cam = '%s_image'%d + + oavs = list(db[uid].data(oav_cam))[0] + res_key=None + for k in h.keys(): + if 'OAV' in k and 'resolution' in k: + res_key = k + try: + pixel_scalebar = np.ceil(scalebar_size / h[res_key]) + except: + pixel_scalebar = None + print("No OAV resolution is available.") + text_string = "%s $\mu$m" % scalebar_size + oav_period=np.array(db[uid].v2['primary']['config'][oav_det]['%s_cam_acquire_period'%oav_det])[0] + oav_expt=np.array(db[uid].v2['primary']['config'][oav_det]['%s_cam_acquire_time'%oav_det])[0] + + oav_times = [] + for i in range(len(oavs)): + oav_times.append(oav_expt + i * oav_period) + fig = plt.subplots(int(np.ceil(len(oavs) / 3)), 3, figsize=(3 * 5.08, int(np.ceil(len(oavs) / 3)) * 4)) + pc=1 + for m in range(len(oavs)): + plt.subplot(int(np.ceil(len(oavs) / 3)), 3, m + 1) + # plt.subplots(figsize=(5.2,4)) + img = oavs[m] + try: + ind = np.flipud(img * scale)[:, :, 2] < threshold + except: + ind = np.flipud(img * scale) < threshold + rgb_cont_img = np.copy(np.flipud(img)) + # rgb_cont_img[ind,0]=1000 + if brightness_scale != 1: + rgb_cont_img = scale_rgb(rgb_cont_img, scale=brightness_scale) + + plt.imshow(rgb_cont_img, interpolation="none", resample=True, cmap="gray") + plt.axis("equal") + plt.plot([cross[0] - cross[2] / 2, cross[0] + cross[2] / 2], [cross[1], cross[1]], "r-") + plt.plot([cross[0], cross[0]], [cross[1] - cross[2] / 2, cross[1] + cross[2] / 2], "r-") + if pixel_scalebar != None: + plt.plot([1100, 1100 + pixel_scalebar], [150, 150], "r-", Linewidth=5) # scale bar. + plt.text(1000, 50, text_string, fontsize=14, color="r") + plt.text(600, 50, str(oav_times[m])[:5] + " [s]", fontsize=14, color="r") + plt.axis("off");pc+=1 + for i in range(int(np.ceil(len(oavs) / 3))* 3-pc+1): + plt.subplot(int(np.ceil(len(oavs) / 3)), 3, pc) + plt.axis("off") + plt.savefig(data_dir + "uid=%s_OVA_images.png" % uid) + + +def shift_mask_old(mask, shiftx, shifty): + """YG Dev Feb 4@CHX create new mask by shift mask in x and y direction with unit in pixel + Input: + mask: int-type array, + shiftx: int scalar, shift value in x direction with unit in pixel + shifty: int scalar, shift value in y direction with unit in pixel + Output: + maskn: int-type array, shifted mask + + """ + qind, pixelist = roi.extract_label_indices(mask) + dims = mask.shape + imgwidthy = dims[1] # dimension in y, but in plot being x + imgwidthx = dims[0] # dimension in x, but in plot being y + pixely = pixelist % imgwidthy + pixelx = pixelist // imgwidthy + pixelyn = pixely + shiftx + pixelxn = pixelx + shifty + w = (pixelyn < imgwidthy) & (pixelyn >= 0) & (pixelxn < imgwidthx) & (pixelxn >= 0) + pixelist_new = pixelxn[w] * imgwidthy + pixelyn[w] + maskn = np.zeros_like(mask) + maskn.ravel()[pixelist_new] = qind[w] + return maskn + + +def get_current_time(): + """get current time in a fomart of year/month/date/hour(24)/min/sec/, + e.g. 2009-01-05 22:14:39 + """ + loc_dt = datetime.now(pytz.timezone("US/Eastern")) + fmt = "%Y-%m-%d %H:%M:%S" + return loc_dt.strftime(fmt) + + +def evalue_array(array, verbose=True): + """Y.G., Dev Nov 1, 2018 Get min, max, avg, std of an array""" + _min, _max, avg, std = np.min(array), np.max(array), np.average(array), np.std(array) + if verbose: + print( + "The min, max, avg, std of this array are: %s %s %s %s, respectively." % (_min, _max, avg, std) + ) + return _min, _max, avg, std + + +def find_good_xpcs_uids(fuids, Nlim=100, det=["4m", "1m", "500"]): + """Y.G., Dev Nov 1, 2018 Find the good xpcs series + Input: + fuids: list, a list of full uids + Nlim: integer, the smallest number of images to be considered as XCPS sereis + det: list, a list of detector (can be short string of the full name of the detector) + Return: + the xpcs uids list + + """ + guids = [] + for i, uid in enumerate(fuids): + if db[uid]["start"]["plan_name"] == "count" or db[uid]["start"]["plan_name"] == "manual_count": + head = db[uid]["start"] + for dec in head["detectors"]: + for dt in det: + if dt in dec: + if "number of images" in head: + if float(head["number of images"]) >= Nlim: + # print(i, uid) + guids.append(uid) + G = np.unique(guids) + print("Found %s uids for XPCS series." % len(G)) + return G + + +def create_fullImg_with_box( + shape, + box_nx=9, + box_ny=8, +): + """Y.G. 2018/10/26 Divide image with multi touched boxes + Input + shape: the shape of image + box_nx: the number of box in x + box_ny: the number width of box in y + Return: + roi_mask, (* mask ) + """ + + # shape = mask.shape + Wrow, Wcol = int(np.ceil(shape[0] / box_nx)), int(np.ceil(shape[1] / box_ny)) + # print(Wrow, Wcol) + roi_mask = np.zeros(shape, dtype=np.int32) + for i in range(box_nx): + for j in range(box_ny): + roi_mask[i * Wrow : (i + 1) * Wrow, j * Wcol : (j + 1) * Wcol] = i * box_ny + j + 1 + # roi_mask *= mask + return roi_mask + + +def get_refl_y0( + inc_ang, + inc_y0, + Ldet, + pixel_size, +): + """Get reflection beam center y + Input: + inc_ang: incident angle in degree + inc_y0: incident beam y center in pixel + Ldet: sample to detector distance in meter + pixel_size: pixel size in meter + Return: reflection beam center y in pixel + """ + return Ldet * np.tan(np.radians(inc_ang)) * 2 / pixel_size + inc_y0 + + +def lin2log_g2(lin_tau, lin_g2, num_points=False): + """ + Lutz developed at Aug,2018 + function to resample g2 with linear time steps into logarithmics + g2 values between consecutive logarthmic time steps are averaged to increase statistics + calling sequence: lin2log_g2(lin_tau,lin_g2,num_points=False) + num_points=False -> determine number of logortihmically sampled time points automatically (8 pts./decade) + num_points=18 -> use 18 logarithmically spaced time points + """ + # prep taus and g2s: remove nan and first data point at tau=0 + rem = lin_tau == 0 + # print('lin_tau: '+str(lin_tau.size)) + # print('lin_g2: '+str(lin_g2.size)) + lin_tau[rem] = np.nan + # lin_tau[0]=np.nan;#lin_g2[0]=np.nan + lin_g2 = lin_g2[np.isfinite(lin_tau)] + lin_tau = lin_tau[np.isfinite(lin_tau)] + # print('from lin-to-log-g2_sampling: ',lin_tau) + if num_points == False: + # automatically decide how many log-points (8/decade) + dec = int(np.ceil((np.log10(lin_tau.max()) - np.log10(lin_tau.min())) * 8)) + else: + dec = int(num_points) + log_tau = np.logspace(np.log10(lin_tau[0]), np.log10(lin_tau.max()), dec) + # re-sample correlation function: + log_g2 = [] + for i in range(log_tau.size - 1): + y = [i, log_tau[i] - (log_tau[i + 1] - log_tau[i]) / 2, log_tau[i] + (log_tau[i + 1] - log_tau[i]) / 2] + # x=lin_tau[lin_tau>y[1]] + x1 = lin_tau > y[1] + x2 = lin_tau < y[2] + x = x1 * x2 + # print(np.average(lin_g2[x])) + if np.isfinite(np.average(lin_g2[x])): + log_g2.append(np.average(lin_g2[x])) + else: + log_g2.append(np.interp(log_tau[i], lin_tau, lin_g2)) + if i == log_tau.size - 2: + # print(log_tau[i+1]) + y = [i + 1, log_tau[i + 1] - (log_tau[i + 1] - log_tau[i]) / 2, log_tau[i + 1]] + x1 = lin_tau > y[1] + x2 = lin_tau < y[2] + x = x1 * x2 + log_g2.append(np.average(lin_g2[x])) + return [log_tau, log_g2] + + +def get_eigerImage_per_file(data_fullpath): + f = h5py.File(data_fullpath) + dset_keys = list(f["/entry/data"].keys()) + dset_keys.sort() + dset_root = "/entry/data" + dset_keys = [dset_root + "/" + dset_key for dset_key in dset_keys] + dset = f[dset_keys[0]] + return len(dset) + + +def copy_data(old_path, new_path="/tmp_data/data/"): + """YG Dev July@CHX + Copy Eiger file containing master and data files to a new path + old_path: the full path of the Eiger master file + new_path: the new path + + """ + import glob + import shutil + + # old_path = sud[2][0] + # new_path = '/tmp_data/data/' + fps = glob.glob(old_path[:-10] + "*") + for fp in tqdm(fps): + if not os.path.exists(new_path + os.path.basename(fp)): + shutil.copy(fp, new_path) + print("The files %s are copied: %s." % (old_path[:-10] + "*", new_path + os.path.basename(fp))) + + +def delete_data(old_path, new_path="/tmp_data/data/"): + """YG Dev July@CHX + Delete copied Eiger file containing master and data in a new path + old_path: the full path of the Eiger master file + new_path: the new path + """ + import glob + import shutil + + # old_path = sud[2][0] + # new_path = '/tmp_data/data/' + fps = glob.glob(old_path[:-10] + "*") + for fp in tqdm(fps): + nfp = new_path + os.path.basename(fp) + if os.path.exists(nfp): + os.remove(nfp) + + +def show_tif_series( + tif_series, Nx=None, center=None, w=50, vmin=None, vmax=None, cmap=cmap_vge_hdr, logs=False, figsize=[10, 16] +): + """ + tif_series: list of 2D tiff images + Nx: the number in the row for dispalying + center: the center of iamge (or direct beam pixel) + w: the ROI half size in pixel + vmin: the min intensity value for plot + vmax: if None, will be max intensity value of the ROI + figsize: size of the plot (in inch) + + """ + + if center != None: + cy, cx = center + # infs = sorted(sample_list) + N = len(tif_series) + if Nx == None: + sy = int(np.sqrt(N)) + else: + sy = Nx + sx = int(np.ceil(N / sy)) + fig = plt.figure(figsize=figsize) + for i in range(N): + # print(i) + ax = fig.add_subplot(sx, sy, i + 1) + # d = (np.array( PIL.Image.open( infs[i] ).convert('I') ))[ cy-w:cy+w, cx-w:cx+w ] + d = tif_series[i][::-1] + # vmax= np.max(d) + # pritn(vmax) + # vmin= 10#np.min(d) + show_img( + d, + logs=logs, + show_colorbar=False, + show_ticks=False, + ax=[fig, ax], + image_name="%02d" % (i + 1), + cmap=cmap, + vmin=vmin, + vmax=vmax, + aspect=1, + save=False, + path=None, + ) + return fig, ax + + +from scipy.special import erf + + +def ps(y, shift=0.5, replot=True, logplot="off", x=None): + """ + Dev 16, 2018 + Modified ps() function in 95-utilities.py + function to determine statistic on line profile (assumes either peak or erf-profile) + Input: + y: 1D array, the data for analysis + shift: scale for peak presence (0.5 -> peak has to be taller factor 2 above background) + replot: if True, will plot data (if error func) with the fit and peak/cen/com position + logplot: if on, will plot in log scale + x: if not None, give x-data + + + """ + if x == None: + x = np.arange(len(y)) + x = np.array(x) + y = np.array(y) + + PEAK = x[np.argmax(y)] + PEAK_y = np.max(y) + COM = np.sum(x * y) / np.sum(y) + + ### from Maksim: assume this is a peak profile: + def is_positive(num): + return True if num > 0 else False + + # Normalize values first: + ym = (y - np.min(y)) / (np.max(y) - np.min(y)) - shift # roots are at Y=0 + positive = is_positive(ym[0]) + list_of_roots = [] + for i in range(len(y)): + current_positive = is_positive(ym[i]) + if current_positive != positive: + list_of_roots.append(x[i - 1] + (x[i] - x[i - 1]) / (abs(ym[i]) + abs(ym[i - 1])) * abs(ym[i - 1])) + positive = not positive + if len(list_of_roots) >= 2: + FWHM = abs(list_of_roots[-1] - list_of_roots[0]) + CEN = list_of_roots[0] + 0.5 * (list_of_roots[1] - list_of_roots[0]) + ps.fwhm = FWHM + ps.cen = CEN + yf = ym + # return { + # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), + # 'x_range': list_of_roots, + # } + else: # ok, maybe it's a step function.. + # print('no peak...trying step function...') + ym = ym + shift + + def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang + return base - A * erf(k * (x - x0)) + + mod = Model(err_func) + ### estimate starting values: + x0 = np.mean(x) + # k=0.1*(np.max(x)-np.min(x)) + pars = mod.make_params(x0=x0, k=2, A=1.0, base=0.0) + result = mod.fit(ym, pars, x=x) + CEN = result.best_values["x0"] + FWHM = result.best_values["k"] + A = result.best_values["A"] + b = result.best_values["base"] + yf_ = err_func(x, CEN, k=FWHM, A=A, base=b) # result.best_fit + yf = (yf_) * (np.max(y) - np.min(y)) + np.min(y) + + # (y - np.min(y)) / (np.max(y) - np.min(y)) - shift + + ps.cen = CEN + ps.fwhm = FWHM + + if replot: + ### re-plot results: + if logplot == "on": + fig, ax = plt.subplots() # plt.figure() + ax.semilogy([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") + ax.hold(True) + ax.semilogy([CEN, CEN], [np.min(y), np.max(y)], "r-.", label="CEN") + ax.semilogy([COM, COM], [np.min(y), np.max(y)], "g.-.", label="COM") + ax.semilogy(x, y, "bo-") + # plt.xlabel(field);plt.ylabel(intensity_field) + ax.legend() + # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + # plt.show() + else: + # plt.close(999) + fig, ax = plt.subplots() # plt.figure() + ax.plot([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") + + # ax.hold(True) + ax.plot([CEN, CEN], [np.min(y), np.max(y)], "m-.", label="CEN") + ax.plot([COM, COM], [np.min(y), np.max(y)], "g.-.", label="COM") + ax.plot(x, y, "bo--") + ax.plot(x, yf, "r-", label="Fit") + + # plt.xlabel(field);plt.ylabel(intensity_field) + ax.legend() + # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) + # plt.show() + + ### assign values of interest as function attributes: + ps.peak = PEAK + ps.com = COM + return ps.cen + + +def create_seg_ring(ring_edges, ang_edges, mask, setup_pargs): + """YG Dev April 6, 2018 + Create segment ring mask + Input: + ring_edges: edges of rings (in pixel), e.g., [ [320,340], [450, 460], ] + ang_edges: edges of angles, e.g., [ [20,40], [50, 60], ] + mask: bool type 2D array + set_pargs: dict, should at least contains, center + e.g., + {'Ldet': 1495.0, abs #essential + 'center': [-4469, 363], #essential + 'dpix': 0.075000003562308848, #essential + 'exposuretime': 0.99999702, + 'lambda_': 0.9686265, #essential + 'path': '/XF11ID/analysis/2018_1/jianheng/Results/b85dad/', + 'timeperframe': 1.0, + 'uid': 'uid=b85dad'} + Return: + roi_mask: segmented ring mask: two-D array + qval_dict: dict, key as q-number, val: q val + + """ + + roi_mask_qr, qr, qr_edge = get_ring_mask( + mask, + inner_radius=None, + outer_radius=None, + width=None, + num_rings=None, + edges=np.array(ring_edges), + unit="pixel", + pargs=setup_pargs, + ) + + roi_mask_ang, ang_center, ang_edge = get_angular_mask( + mask, + inner_angle=None, + outer_angle=None, + width=None, + edges=np.array(ang_edges), + num_angles=None, + center=center, + flow_geometry=False, + ) + + roi_mask, good_ind = combine_two_roi_mask(roi_mask_qr, roi_mask_ang, pixel_num_thres=100) + qval_dict_ = get_qval_dict(qr_center=qr, qz_center=ang_center, one_qz_multi_qr=False) + qval_dict = {i: qval_dict_[k] for (i, k) in enumerate(good_ind)} + return roi_mask, qval_dict + + +def find_bad_pixels_FD(bad_frame_list, FD, img_shape=[514, 1030], threshold=15, show_progress=True): + """Designed to find bad pixel list in 500K + threshold: the max intensity in 5K + """ + bad = np.zeros(img_shape, dtype=bool) + if show_progress: + for i in tqdm(bad_frame_list[bad_frame_list >= FD.beg]): + p, v = FD.rdrawframe(i) + w = np.where(v > threshold)[0] + bad.ravel()[p[w]] = 1 + # x,y = np.where( imgsa[i] > threshold) + # bad[x[0],y[0]] = 1 + else: + for i in bad_frame_list[bad_frame_list >= FD.beg]: + p, v = FD.rdrawframe(i) + w = np.where(v > threshold)[0] + bad.ravel()[p[w]] = 1 + + return ~bad + + +def get_q_iq_using_dynamic_mask(FD, mask, setup_pargs, bin_number=1, threshold=15): + """DEV by Yugang@CHX, June 6, 2019 + Get circular average of a time series using a dynamics mask, which pixel values are defined as + zeors if above a threshold. + Return an averaged q(pix)-Iq-q(A-1) of the whole time series using bin frames with bin_number + Input: + FD: the multifile handler for the time series + mask: a two-d bool type array + setup_pargs: dict, parameters of setup for calculate q-Iq + should have keys as + 'dpix', 'Ldet','lambda_', 'center' + bin_number: bin number of the frame + threshold: define the dynamics mask, which pixel values are defined as + zeors if above this threshold + Output: + qp_saxs: q in pixel + iq_saxs: intenstity + q_saxs: q in A-1 + """ + beg = FD.beg + end = FD.end + shape = FD.rdframe(beg).shape + Nimg_ = FD.end - FD.beg + # Nimg_ = 100 + Nimg = Nimg_ // bin_number + time_edge = np.array(create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bin_number)) + beg + for n in tqdm(range(Nimg)): + t1, t2 = time_edge[n] + # print(t1,t2) + if bin_number == 1: + avg_imgi = FD.rdframe(t1) + else: + avg_imgi = get_avg_imgc(FD, beg=t1, end=t2, sampling=1, plot_=False, show_progress=False) + badpi = find_bad_pixels_FD( + np.arange(t1, t2), FD, img_shape=avg_imgi.shape, threshold=threshold, show_progress=False + ) + img = avg_imgi * mask * badpi + qp_saxsi, iq_saxsi, q_saxsi = get_circular_average(img, mask * badpi, save=False, pargs=setup_pargs) + # print( img.max()) + if t1 == FD.beg: + qp_saxs, iq_saxs, q_saxs = np.zeros_like(qp_saxsi), np.zeros_like(iq_saxsi), np.zeros_like(q_saxsi) + qp_saxs += qp_saxsi + iq_saxs += iq_saxsi + q_saxs += q_saxsi + qp_saxs /= Nimg + iq_saxs /= Nimg + q_saxs /= Nimg + + return qp_saxs, iq_saxs, q_saxs + + +def get_waxs_beam_center(gamma, origin=[432, 363], Ldet=1495, pixel_size=75 * 1e-3): + """YG Feb 10, 2018 + Calculate beam center for WAXS geometry by giving beam center at gamma=0 and the target gamma + Input: + gamma: angle in degree + Ldet: sample to detector distance, 1495 mm for CHX WAXS + origin: beam center for gamma = 0, (python x,y coordinate in pixel) + pxiel size: 75 * 1e-3 mm for Eiger 1M + output: + beam center: for the target gamma, in pixel + """ + return [int(origin[0] + np.tan(np.radians(gamma)) * Ldet / pixel_size), origin[1]] + + +def get_img_from_iq(qp, iq, img_shape, center): + """YG Jan 24, 2018 + Get image from circular average + Input: + qp: q in pixel unit + iq: circular average + image_shape, e.g., [256,256] + center: [center_y, center_x] e.g., [120, 200] + Output: + img: recovered image + """ + pixelist = np.arange(img_shape[0] * img_shape[1]) + pixely = pixelist % img_shape[1] - center[1] + pixelx = pixelist // img_shape[1] - center[0] + r = np.hypot(pixelx, pixely) # leave as float. + # r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 + return (np.interp(r, qp, iq)).reshape(img_shape) + + +def average_array_withNan(array, axis=0, mask=None): + """YG. Jan 23, 2018 + Average array invovling np.nan along axis + + Input: + array: ND array, actually should be oneD or twoD at this stage..TODOLIST for ND + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + avg: averaged array along axis + """ + shape = array.shape + if mask == None: + mask = np.isnan(array) + # mask = np.ma.masked_invalid(array).mask + array_ = np.ma.masked_array(array, mask=mask) + try: + sums = np.array(np.ma.sum(array_[:, :], axis=axis)) + except: + sums = np.array(np.ma.sum(array_[:], axis=axis)) + + cts = np.sum(~mask, axis=axis) + # print(cts) + return sums / cts + + +def deviation_array_withNan(array, axis=0, mask=None): + """YG. Jan 23, 2018 + Get the deviation of array invovling np.nan along axis + + Input: + array: ND array + axis: the average axis + mask: bool, same shape as array, if None, will mask all the nan values + Output: + dev: the deviation of array along axis + """ + avg2 = average_array_withNan(array**2, axis=axis, mask=mask) + avg = average_array_withNan(array, axis=axis, mask=mask) + return np.sqrt(avg2 - avg**2) + + +def refine_roi_mask(roi_mask, pixel_num_thres=10): + """YG Dev Jan20,2018 + remove bad roi which pixel numbe is lower pixel_num_thres + roi_mask: array, + pixel_num_thres: integer, the low limit pixel number in each roi of the combined mask, + i.e., if the pixel number in one roi of the combined mask smaller than pixel_num_thres, + that roi will be considered as bad one and be removed. + """ + new_mask = np.zeros_like(roi_mask) + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + good_ind = np.where(nopr >= pixel_num_thres)[0] + 1 + l = len(good_ind) + new_ind = np.arange(1, l + 1) + for i, gi in enumerate(good_ind): + new_mask.ravel()[np.where(roi_mask.ravel() == gi)[0]] = new_ind[i] + return new_mask, good_ind - 1 + + +def shrink_image_stack(imgs, bins): + """shrink imgs by bins + imgs: shape as [Nimg, imx, imy]""" + Nimg, imx, imy = imgs.shape + bx, by = bins + imgsk = np.zeros([Nimg, imx // bx, imy // by]) + N = len(imgs) + for i in range(N): + imgsk[i] = shrink_image(imgs[i], bins) + return imgsk + + +def shrink_image(img, bins): + """YG Dec 12, 2017 dev@CHX shrink a two-d image by factor as bins, i.e., bins_x, bins_y + input: + img: 2d array, + bins: integer list, eg. [2,2] + output: + imgb: binned img + """ + m, n = img.shape + bx, by = bins + Nx, Ny = m // bx, n // by + # print(Nx*bx, Ny*by) + return img[: Nx * bx, : Ny * by].reshape(Nx, bx, Ny, by).mean(axis=(1, 3)) + + +def get_diff_fv(g2_fit_paras, qval_dict, ang_init=137.2): + """YG@CHX Nov 9,2017 + Get flow velocity and diff from g2_fit_paras""" + g2_fit_para_ = g2_fit_paras.copy() + qr = np.array([qval_dict[k][0] for k in sorted(qval_dict.keys())]) + qang = np.array([qval_dict[k][1] for k in sorted(qval_dict.keys())]) + # x=g2_fit_para_.pop( 'relaxation_rate' ) + # x=g2_fit_para_.pop( 'flow_velocity' ) + g2_fit_para_["diff"] = g2_fit_paras["relaxation_rate"] / qr**2 + cos_part = np.abs(np.cos(np.radians(qang - ang_init))) + g2_fit_para_["fv"] = g2_fit_paras["flow_velocity"] / cos_part / qr + return g2_fit_para_ + + +# function to get indices of local extrema (=indices of speckle echo maximum amplitudes): +def get_echos(dat_arr, min_distance=10): + """ + getting local maxima and minima from 1D data -> e.g. speckle echos + strategy: using peak_local_max (from skimage) with min_distance parameter to find well defined local maxima + using np.argmin to find absolute minima between relative maxima + returns [max_ind,min_ind] -> lists of indices corresponding to local maxima/minima + by LW 10/23/2018 + """ + from skimage.feature import peak_local_max + + max_ind = peak_local_max(dat_arr, min_distance) # !!! careful, skimage function reverses the order (wtf?) + min_ind = [] + for i in range(len(max_ind[:-1])): + min_ind.append(max_ind[i + 1][0] + np.argmin(dat_arr[max_ind[i + 1][0] : max_ind[i][0]])) + # unfortunately, skimage function fu$$s up the format: max_ind is an array of a list of lists...fix this: + mmax_ind = [] + for l in max_ind: + mmax_ind.append(l[0]) + # return [mmax_ind,min_ind] + return [list(reversed(mmax_ind)), list(reversed(min_ind))] + + +def pad_length(arr, pad_val=np.nan): + """ + arr: 2D matrix + pad_val: values being padded + adds pad_val to each row, to make the length of each row equal to the lenght of the longest row of the original matrix + -> used to convert python generic data object to HDF5 native format + function fixes python bug in padding (np.pad) integer array with np.nan + update June 2023: remove use of np.shape and np.size that doesn't work (anymore?) on arrays with inhomogenous size + by LW 12/30/2017 + """ + max_len = [] + for i in range(len(arr)): + max_len.append([len(arr[i])]) + max_len = np.max(max_len) + for l in range(len(arr)): + arr[l] = np.pad(arr[l] * 1.0, (0, max_len - np.size(arr[l])), mode="constant", constant_values=pad_val) + return arr + + +def save_array_to_tiff(array, output, verbose=True): + """Y.G. Nov 1, 2017 + Save array to a tif file + """ + img = PIL.Image.fromarray(array) + img.save(output) + if verbose: + print("The data is save to: %s." % (output)) + + +def load_pilatus(filename): + """Y.G. Nov 1, 2017 + Load a pilatus 2D image + """ + return np.array(PIL.Image.open(filename).convert("I")) + + +def ls_dir(inDir, have_list=[], exclude_list=[]): + """Y.G. Aug 1, 2019 + List all filenames in a filefolder + inDir: fullpath of the inDir + have_string: only retrun filename containing the string + exclude_string: only retrun filename not containing the string + + """ + from os import listdir + from os.path import isfile, join + + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + tifs_ = [] + for tif in tifs: + flag = 1 + for string in have_list: + if string not in tif: + flag *= 0 + for string in exclude_list: + if string in tif: + flag *= 0 + if flag: + tifs_.append(tif) + + return np.array(tifs_) + + +def ls_dir2(inDir, string=None): + """Y.G. Nov 1, 2017 + List all filenames in a filefolder (not include hidden files and subfolders) + inDir: fullpath of the inDir + string: if not None, only retrun filename containing the string + """ + from os import listdir + from os.path import isfile, join + + if string == None: + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + else: + tifs = np.array([f for f in listdir(inDir) if (isfile(join(inDir, f))) & (string in f)]) + return tifs + + +def re_filename(old_filename, new_filename, inDir=None, verbose=True): + """Y.G. Nov 28, 2017 + Rename old_filename with new_filename in a inDir + inDir: fullpath of the inDir, if None, the filename should have the fullpath + old_filename/ new_filename: string + an example: + re_filename( 'uid=run20_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', + 'uid=run17_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', + '/home/yuzhang/Analysis/Timepix/2017_3/Results/run17/run17_pos1/' + ) + """ + if inDir != None: + os.rename(inDir + old_filename, inDir + new_filename) + else: + os.rename(old_filename, new_filename) + print("The file: %s is changed to: %s." % (old_filename, new_filename)) + + +def re_filename_dir(old_pattern, new_pattern, inDir, verbose=True): + """Y.G. Nov 28, 2017 + Rename all filenames with old_pattern with new_pattern in a inDir + inDir: fullpath of the inDir, if None, the filename should have the fullpath + old_pattern, new_pattern + an example, + re_filename_dir('20_', '17_', inDir ) + """ + fps = ls_dir(inDir) + for fp in fps: + if old_pattern in fp: + old_filename = fp + new_filename = fp.replace(old_pattern, new_pattern) + re_filename(old_filename, new_filename, inDir, verbose=verbose) + + +def get_roi_nr(qdict, q, phi, q_nr=True, phi_nr=False, q_thresh=0, p_thresh=0, silent=True, qprecision=5): + """ + function to return roi number from qval_dict, corresponding Q and phi, lists (sets) of all available Qs and phis + [roi_nr,Q,phi,Q_list,phi_list]=get_roi_nr(..) + calling sequence: get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False, verbose=True) + qdict: qval_dict from analysis pipeline/hdf5 result file + q: q of interest, can be either value (q_nr=False) or q-number (q_nr=True) + q_thresh: threshold for comparing Q-values, set to 0 for exact comparison + phi: phi of interest, can be either value (phi_nr=False) or q-number (phi_nr=True) + p_thresh: threshold for comparing phi values, set to 0 for exact comparison + silent=True/False: Don't/Do print lists of available qs and phis, q and phi of interest + by LW 10/21/2017 + update by LW 08/22/2018: introduced thresholds for comparison of Q and phi values (before: exact match required) + update 2019/09/28 add qprecision to get unique Q + update 2020/3/12 explicitly order input dictionary to fix problem with environments >= 2019-3.0.1 + """ + import collections + from collections import OrderedDict + + qdict = collections.OrderedDict(sorted(qdict.items())) + qs = [] + phis = [] + for i in qdict.keys(): + qs.append(qdict[i][0]) + phis.append(qdict[i][1]) + qslist = list(OrderedDict.fromkeys(qs)) + qslist = np.unique(np.round(qslist, qprecision)) + phislist = list(OrderedDict.fromkeys(phis)) + qslist = list(np.sort(qslist)) + phislist = list(np.sort(phislist)) + if q_nr: + qinterest = qslist[q] + qindices = [i for i, x in enumerate(qs) if np.abs(x - qinterest) < q_thresh] + else: + qinterest = q + qindices = [i for i, x in enumerate(qs) if np.abs(x - qinterest) < q_thresh] # new + if phi_nr: + phiinterest = phislist[phi] + phiindices = [i for i, x in enumerate(phis) if x == phiinterest] + else: + phiinterest = phi + phiindices = [i for i, x in enumerate(phis) if np.abs(x - phiinterest) < p_thresh] # new + ret_list = [ + list(set(qindices).intersection(phiindices))[0], + qinterest, + phiinterest, + qslist, + phislist, + ] # -> this is the original + if silent == False: + print("list of available Qs:") + print(qslist) + print("list of available phis:") + print(phislist) + print("Roi number for Q= " + str(ret_list[1]) + " and phi= " + str(ret_list[2]) + ": " + str(ret_list[0])) + return ret_list + + +def get_fit_by_two_linear( + x, + y, + mid_xpoint1, + mid_xpoint2=None, + xrange=None, +): + """YG Octo 16,2017 Fit a curve with two linear func, the curve is splitted by mid_xpoint, + namely, fit the curve in two regions defined by (xmin,mid_xpoint ) and (mid_xpoint2, xmax) + Input: + x: 1D np.array + y: 1D np.array + mid_xpoint: float, the middle point of x + xrange: [x1,x2] + Return: + D1, gmfit1, D2, gmfit2 : + fit parameter (slope, background) of linear fit1 + convinent fit class, gmfit1(x) gives yvale + fit parameter (slope, background) of linear fit2 + convinent fit class, gmfit2(x) gives yvale + + """ + if xrange == None: + x1, x2 = min(x), max(x) + x1, x2 = xrange + if mid_xpoint2 == None: + mid_xpoint2 = mid_xpoint1 + D1, gmfit1 = linear_fit(x, y, xrange=[x1, mid_xpoint1]) + D2, gmfit2 = linear_fit(x, y, xrange=[mid_xpoint2, x2]) + return D1, gmfit1, D2, gmfit2 + + +def get_cross_point(x, gmfit1, gmfit2): + """YG Octo 16,2017 + Get croess point of two curve + """ + y1 = gmfit1(x) + y2 = gmfit2(x) + return x[np.argmin(np.abs(y1 - y2))] + + +def get_curve_turning_points( + x, + y, + mid_xpoint1, + mid_xpoint2=None, + xrange=None, +): + """YG Octo 16,2017 + Get a turning point of a curve by doing a two-linear fit + """ + D1, gmfit1, D2, gmfit2 = get_fit_by_two_linear(x, y, mid_xpoint1, mid_xpoint2, xrange) + return get_cross_point(x, gmfit1, gmfit2) + + +def plot_fit_two_linear_fit(x, y, gmfit1, gmfit2, ax=None): + """YG Octo 16,2017 Plot data with two fitted linear func""" + if ax == None: + fig, ax = plt.subplots() + plot1D(x=x, y=y, ax=ax, c="k", legend="data", m="o", ls="") # logx=True, logy=True ) + plot1D(x=x, y=gmfit1(x), ax=ax, c="r", m="", ls="-", legend="fit1") + plot1D(x=x, y=gmfit2(x), ax=ax, c="b", m="", ls="-", legend="fit2") + return ax + + +def linear_fit(x, y, xrange=None): + """YG Octo 16,2017 copied from XPCS_SAXS + a linear fit + """ + if xrange != None: + xmin, xmax = xrange + x1, x2 = find_index(x, xmin, tolerance=None), find_index(x, xmax, tolerance=None) + x_ = x[x1:x2] + y_ = y[x1:x2] + else: + x_ = x + y_ = y + D0 = np.polyfit(x_, y_, 1) + gmfit = np.poly1d(D0) + return D0, gmfit + + +def find_index(x, x0, tolerance=None): + """YG Octo 16,2017 copied from SAXS + find index of x0 in x + #find the position of P in a list (plist) with tolerance + """ + + N = len(x) + i = 0 + if x0 > max(x): + position = len(x) - 1 + elif x0 < min(x): + position = 0 + else: + position = np.argmin(np.abs(x - x0)) + return position + + +def find_index_old(x, x0, tolerance=None): + """YG Octo 16,2017 copied from SAXS + find index of x0 in x + #find the position of P in a list (plist) with tolerance + """ + + N = len(x) + i = 0 + position = None + if tolerance == None: + tolerance = (x[1] - x[0]) / 2.0 + if x0 > max(x): + position = len(x) - 1 + elif x0 < min(x): + position = 0 + else: + for item in x: + if abs(item - x0) <= tolerance: + position = i + # print 'Found Index!!!' + break + i += 1 + + return position + + +def sgolay2d(z, window_size, order, derivative=None): + """YG Octo 16, 2017 + Modified from http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html + Procedure for sg2D: + https://en.wikipedia.org/wiki/Savitzky%E2%80%93Golay_filter#Two-dimensional_convolution_coefficients + + Two-dimensional smoothing and differentiation can also be applied to tables of data values, such as intensity + values in a photographic image which is composed of a rectangular grid of pixels.[16] [17] The trick is to transform + part of the table into a row by a simple ordering of the indices of the pixels. Whereas the one-dimensional filter + coefficients are found by fitting a polynomial in the subsidiary variable, z to a set of m data points, the + two-dimensional coefficients are found by fitting a polynomial in subsidiary variables v and w to a set of m x m + data points. The following example, for a bicubic polynomial and m = 5, illustrates the process, which parallels the + process for the one dimensional case, above.[18] + + The square of 25 data values, d1 - d25 + becomes a vector when the rows are placed one after another. + The Jacobian has 10 columns, one for each of the parameters a00 - a03 and 25 rows, one for each pair of v and w values. + The convolution coefficients are calculated as + The first row of C contains 25 convolution coefficients which can be multiplied with the 25 data values to provide a + smoothed value for the central data point (13) of the 25. + + """ + # number of terms in the polynomial expression + n_terms = (order + 1) * (order + 2) / 2.0 + + if window_size % 2 == 0: + raise ValueError("window_size must be odd") + + if window_size**2 < n_terms: + raise ValueError("order is too high for the window size") + + half_size = window_size // 2 + + # exponents of the polynomial. + # p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ... + # this line gives a list of two item tuple. Each tuple contains + # the exponents of the k-th term. First element of tuple is for x + # second element for y. + # Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...] + exps = [(k - n, n) for k in range(order + 1) for n in range(k + 1)] + + # coordinates of points + ind = np.arange(-half_size, half_size + 1, dtype=np.float64) + dx = np.repeat(ind, window_size) + dy = np.tile(ind, [window_size, 1]).reshape( + window_size**2, + ) + + # build matrix of system of equation + A = np.empty((window_size**2, len(exps))) + for i, exp in enumerate(exps): + A[:, i] = (dx ** exp[0]) * (dy ** exp[1]) + + # pad input array with appropriate values at the four borders + new_shape = z.shape[0] + 2 * half_size, z.shape[1] + 2 * half_size + Z = np.zeros((new_shape)) + # top band + band = z[0, :] + Z[:half_size, half_size:-half_size] = band - np.abs(np.flipud(z[1 : half_size + 1, :]) - band) + # bottom band + band = z[-1, :] + Z[-half_size:, half_size:-half_size] = band + np.abs(np.flipud(z[-half_size - 1 : -1, :]) - band) + # left band + band = np.tile(z[:, 0].reshape(-1, 1), [1, half_size]) + Z[half_size:-half_size, :half_size] = band - np.abs(np.fliplr(z[:, 1 : half_size + 1]) - band) + # right band + band = np.tile(z[:, -1].reshape(-1, 1), [1, half_size]) + Z[half_size:-half_size, -half_size:] = band + np.abs(np.fliplr(z[:, -half_size - 1 : -1]) - band) + # central band + Z[half_size:-half_size, half_size:-half_size] = z + + # top left corner + band = z[0, 0] + Z[:half_size, :half_size] = band - np.abs(np.flipud(np.fliplr(z[1 : half_size + 1, 1 : half_size + 1])) - band) + # bottom right corner + band = z[-1, -1] + Z[-half_size:, -half_size:] = band + np.abs( + np.flipud(np.fliplr(z[-half_size - 1 : -1, -half_size - 1 : -1])) - band + ) + + # top right corner + band = Z[half_size, -half_size:] + Z[:half_size, -half_size:] = band - np.abs(np.flipud(Z[half_size + 1 : 2 * half_size + 1, -half_size:]) - band) + # bottom left corner + band = Z[-half_size:, half_size].reshape(-1, 1) + Z[-half_size:, :half_size] = band - np.abs(np.fliplr(Z[-half_size:, half_size + 1 : 2 * half_size + 1]) - band) + + # solve system and convolve + if derivative == None: + m = np.linalg.pinv(A)[0].reshape((window_size, -1)) + return scipy.signal.fftconvolve(Z, m, mode="valid") + elif derivative == "col": + c = np.linalg.pinv(A)[1].reshape((window_size, -1)) + return scipy.signal.fftconvolve(Z, -c, mode="valid") + elif derivative == "row": + r = np.linalg.pinv(A)[2].reshape((window_size, -1)) + return scipy.signal.fftconvolve(Z, -r, mode="valid") + elif derivative == "both": + c = np.linalg.pinv(A)[1].reshape((window_size, -1)) + r = np.linalg.pinv(A)[2].reshape((window_size, -1)) + return scipy.signal.fftconvolve(Z, -r, mode="valid"), scipy.signal.fftconvolve(Z, -c, mode="valid") + + +def load_filelines(fullpath): + """YG Develop March 10, 2018 + Load all content from a file + basepath, fname = os.path.split(os.path.abspath( fullpath )) + Input: + fullpath: str, full path of the file + Return: + list: str + """ + with open(fullpath, "r") as fin: + p = fin.readlines() + return p + + +def extract_data_from_file( + filename, + filepath, + good_line_pattern=None, + start_row=None, + good_cols=None, + labels=None, +): + """YG Develop Octo 17, 2017 + Add start_row option at March 5, 2018 + + Extract data from a file + Input: + filename: str, filename of the data + filepath: str, path of the data + good_line_pattern: str, data will be extract below this good_line_pattern + Or giving start_row: int + good_cols: list of integer, good index of cols + lables: the label of the good_cols + #save: False, if True will save the data into a csv file with filename appending csv ?? + Return: + a pds.dataframe + Example: + filepath = '/XF11ID/analysis/2017_3/lwiegart/Link_files/Exports/' + filename = 'ANPES2 15-10-17 16-31-11-84Exported.txt' + good_cols = [ 1,2,4,6,8,10 ] + labels = [ 'time', 'temperature', 'force', 'distance', 'stress', 'strain' ] + good_line_pattern = "Index\tX\tY\tX\tY\tX\tY" + df = extract_data_from_file( filename, filepath, good_line_pattern, good_cols, labels) + """ + import pandas as pds + + with open(filepath + filename, "r") as fin: + p = fin.readlines() + di = 1e20 + for i, line in enumerate(p): + if start_row != None: + di = start_row + elif good_line_pattern != None: + if good_line_pattern in line: + di = i + else: + di = 0 + if i == di + 1: + els = line.split() + if good_cols == None: + data = np.array(els, dtype=float) + else: + data = np.array([els[j] for j in good_cols], dtype=float) + elif i > di: + try: + els = line.split() + if good_cols == None: + temp = np.array(els, dtype=float) + else: + temp = np.array([els[j] for j in good_cols], dtype=float) + data = np.vstack((data, temp)) + except: + pass + if labels == None: + labels = np.arange(data.shape[1]) + df = pds.DataFrame(data, index=np.arange(data.shape[0]), columns=labels) + return df + + +def get_print_uids(start_time, stop_time, return_all_info=False): + """Update Feb 20, 2018 also return full uids + YG. Octo 3, 2017@CHX + Get full uids and print uid plus Measurement contents by giving start_time, stop_time + + """ + hdrs = list(db(start_time=start_time, stop_time=stop_time)) + fuids = np.zeros(len(hdrs), dtype=object) + uids = np.zeros(len(hdrs), dtype=object) + sids = np.zeros(len(hdrs), dtype=object) + n = 0 + all_info = np.zeros(len(hdrs), dtype=object) + for i in range(len(hdrs)): + fuid = hdrs[-i - 1]["start"]["uid"] # reverse order + uid = fuid[:6] # reverse order + sid = hdrs[-i - 1]["start"]["scan_id"] + fuids[n] = fuid + uids[n] = uid + sids[n] = sid + date = time.ctime(hdrs[-i - 1]["start"]["time"]) + try: + m = hdrs[-i - 1]["start"]["Measurement"] + except: + m = "" + info = "%3d: uid = '%s' ##%s #%s: %s-- %s " % (i, uid, date, sid, m, fuid) + print(info) + if return_all_info: + all_info[n] = info + n += 1 + if not return_all_info: + return fuids, uids, sids + else: + return fuids, uids, sids, all_info + + +def get_last_uids(n=-1): + """YG Sep 26, 2017 + A Convinient function to copy uid to jupyter for analysis""" + uid = db[n]["start"]["uid"][:8] + sid = db[n]["start"]["scan_id"] + m = db[n]["start"]["Measurement"] + return " uid = '%s' #(scan num: %s (Measurement: %s " % (uid, sid, m) + + +def get_base_all_filenames(inDir, base_filename_cut_length=-7): + """YG Sep 26, 2017 + Get base filenames and their related all filenames + Input: + inDir, str, input data dir + base_filename_cut_length: to which length the base name is unique + Output: + dict: keys, base filename + vales, all realted filename + """ + from os import listdir + from os.path import isfile, join + + tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) + tifsc = list(tifs.copy()) + utifs = np.sort(np.unique(np.array([f[:base_filename_cut_length] for f in tifs])))[::-1] + files = {} + for uf in utifs: + files[uf] = [] + i = 0 + reName = [] + for i in range(len(tifsc)): + if uf in tifsc[i]: + files[uf].append(tifsc[i]) + reName.append(tifsc[i]) + for fn in reName: + tifsc.remove(fn) + return files + + +def create_ring_mask(shape, r1, r2, center, mask=None): + """YG. Sep 20, 2017 Develop@CHX + Create 2D ring mask + input: + shape: two integer number list, mask shape, e.g., [100,100] + r1: the inner radius + r2: the outer radius + center: two integer number list, [cx,cy], ring center, e.g., [30,50] + output: + 2D numpy array, 0,1 type + """ + + m = np.zeros(shape, dtype=bool) + rr, cc = disk((center[1], center[0]), r2, shape=shape) + m[rr, cc] = 1 + rr, cc = disk((center[1], center[0]), r1, shape=shape) + m[rr, cc] = 0 + if mask != None: + m += mask + return m + + +def get_image_edge(img): + """ + Y.G. Developed at Sep 8, 2017 @CHX + Get sharp edges of an image + img: two-D array, e.g., a roi mask + """ + edg_ = prewitt(img / 1.0) + edg = np.zeros_like(edg_) + w = np.where(edg_ > 1e-10) + edg[w] = img[w] + edg[np.where(edg == 0)] = 1 + return edg + + +def get_image_with_roi(img, roi_mask, scale_factor=2): + """ + Y.G. Developed at Sep 8, 2017 @CHX + Get image with edges of roi_mask by doing + i) get edges of roi_mask by function get_image_edge + ii) scale img at region of interest (ROI) by scale_factor + img: two-D array for image + roi_mask: two-D array for ROI + scale_factor: scaling factor of ROI in image + """ + edg = get_image_edge(roi_mask) + img_ = img.copy() + w = np.where(roi_mask) + img_[w] = img[w] * scale_factor + return img_ * edg + + +def get_today_date(): + from time import gmtime, strftime + + return strftime("%m-%d-%Y", gmtime()) + + +def move_beamstop(mask, xshift, yshift): + """Y.G. Developed at July 18, 2017 @CHX + Create new mask by shift the old one with xshift, yshift + Input + --- + mask: 2D numpy array, 0 for bad pixels, 1 for good pixels + xshift, integer, shift value along x direction + yshift, integer, shift value along y direction + + Output + --- + mask, 2D numpy array, + """ + m = np.ones_like(mask) + W, H = mask.shape + w = np.where(mask == 0) + nx, ny = w[0] + int(yshift), w[1] + int(xshift) + gw = np.where((nx >= 0) & (nx < W) & (ny >= 0) & (ny < H)) + nx = nx[gw] + ny = ny[gw] + m[nx, ny] = 0 + return m + + +def validate_uid(uid): + """check uid whether be able to load data""" + try: + sud = get_sid_filenames(db[uid]) + print(sud) + md = get_meta_data(uid) + imgs = load_data(uid, md["detector"], reverse=True) + print(imgs) + return 1 + except: + print("Can't load this uid=%s!" % uid) + return 0 + + +def validate_uid_dict(uid_dict): + """Y.G. developed July 17, 2017 @CHX + Check each uid in a dict can load data or not + uids: dict, val: meaningful decription, key: a list of uids + + """ + badn = 0 + badlist = [] + for k in list(uids.keys()): + for uid in uids[k]: + flag = validate_uid(uid) + if not flag: + badn += 1 + badlist.append(uid) + print("There are %s bad uids:%s in this uid_dict." % (badn, badlist)) + + +def get_mass_center_one_roi(FD, roi_mask, roi_ind): + """Get the mass center (in pixel unit) of one roi in a time series FD + FD: handler for a compressed time series + roi_mask: the roi array + roi_ind: the interest index of the roi + + """ + import scipy + + m = roi_mask == roi_ind + cx, cy = np.zeros(int((FD.end - FD.beg) / 1)), np.zeros(int((FD.end - FD.beg) / 1)) + n = 0 + for i in tqdm(range(FD.beg, FD.end, 1), desc="Get mass center of one ROI of each frame"): + img = FD.rdframe(i) * m + c = scipy.ndimage.measurements.center_of_mass(img) + cx[n], cy[n] = int(c[0]), int(c[1]) + n += 1 + return cx, cy + + +def get_current_pipeline_filename(NOTEBOOK_FULL_PATH): + """Y.G. April 25, 2017 + Get the current running pipeline filename and path + Assume the piple is located in /XF11ID/ + Return, path and filename + """ + from IPython.core.magics.display import Javascript + + if False: + Javascript( + """ + var nb = IPython.notebook; + var kernel = IPython.notebook.kernel; + var command = "NOTEBOOK_FULL_PATH = '" + nb.base_url + nb.notebook_path + "'"; + kernel.execute(command); + """ + ) + print(NOTEBOOK_FULL_PATH) + filename = NOTEBOOK_FULL_PATH.split("/")[-1] + path = "/XF11ID/" + for s in NOTEBOOK_FULL_PATH.split("/")[3:-1]: + path += s + "/" + return path, filename + + +def get_current_pipeline_fullpath(NOTEBOOK_FULL_PATH): + """Y.G. April 25, 2017 + Get the current running pipeline full filepath + Assume the piple is located in /XF11ID/ + Return, the fullpath (path + filename) + """ + p, f = get_current_pipeline_filename(NOTEBOOK_FULL_PATH) + return p + f + + +def save_current_pipeline(NOTEBOOK_FULL_PATH, outDir): + """Y.G. April 25, 2017 + Save the current running pipeline to outDir + The save pipeline should be the snapshot of the current state. + """ + + import shutil + + path, fp = get_current_pipeline_filename(NOTEBOOK_FULL_PATH) + shutil.copyfile(path + fp, outDir + fp) + + print("This pipeline: %s is saved in %s." % (fp, outDir)) + + +def plot_g1(taus, g2, g2_fit_paras, qr=None, ylim=[0, 1], title=""): + """Dev Apr 19, 2017, + Plot one-time correlation, giving taus, g2, g2_fit""" + noqs = g2.shape[1] + fig, ax = plt.subplots() + if qr == None: + qr = np.arange(noqs) + for i in range(noqs): + b = g2_fit_paras["baseline"][i] + beta = g2_fit_paras["beta"][i] + y = np.sqrt(np.abs(g2[1:, i] - b) / beta) + plot1D( + x=taus[1:], + y=y, + ax=ax, + legend="q=%s" % qr[i], + ls="-", + lw=2, + m=markers[i], + c=colors[i], + title=title, + ylim=ylim, + logx=True, + legend_size=8, + ) + ax.set_ylabel(r"$g_1$" + "(" + r"$\tau$" + ")") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + return ax + + +def filter_roi_mask(filter_dict, roi_mask, avg_img, filter_type="ylim"): + """Remove bad pixels in roi_mask. The bad pixel is defined by the filter_dict, + if filter_type ='ylim', the filter_dict wit key as q and each value gives a high and low limit thresholds. The value of the pixels in avg_img above or below the limit are considered as bad pixels. + if filter_type='badpix': the filter_dict wit key as q and each value gives a list of bad pixel. + + avg_img, the averaged image + roi_mask: two-d array, the same shape as image, the roi mask, value is integer, e.g., 1 ,2 ,... + filter_dict: keys, as roi_mask integer, value, by default is [None,None], is the limit, + example, {2:[4,5], 10:[0.1,1.1]} + NOTE: first q = 1 (not 0) + """ + rm = roi_mask.copy() + rf = np.ravel(rm) + for k in list(filter_dict.keys()): + pixel = roi.roi_pixel_values(avg_img, roi_mask, [k])[0][0] + # print( np.max(pixel), np.min(pixel) ) + if filter_type == "ylim": + xmin, xmax = filter_dict[k] + badp = np.where((pixel >= xmax) | (pixel <= xmin))[0] + else: + badp = filter_dict[k] + if len(badp) != 0: + pls = np.where([rf == k])[1] + rf[pls[badp]] = 0 + return rm + + +## +# Dev at March 31 for create Eiger chip mask +def create_chip_edges_mask(det="1M"): + """Create a chip edge mask for Eiger detector""" + if det == "1M": + shape = [1065, 1030] + w = 4 + mask = np.ones(shape, dtype=np.int32) + cx = [1030 // 4 * i for i in range(1, 4)] + # cy = [ 1065//4 *i for i in range(1,4) ] + cy = [808, 257] + # print (cx, cy ) + for c in cx: + mask[:, c - w // 2 : c + w // 2] = 0 + for c in cy: + mask[c - w // 2 : c + w // 2, :] = 0 + + return mask + + +def create_ellipse_donut(cx, cy, wx_inner, wy_inner, wx_outer, wy_outer, roi_mask, gap=0): + Nmax = np.max(np.unique(roi_mask)) + rr1, cc1 = ellipse(cy, cx, wy_inner, wx_inner) + rr2, cc2 = ellipse(cy, cx, wy_inner + gap, wx_inner + gap) + rr3, cc3 = ellipse(cy, cx, wy_outer, wx_outer) + roi_mask[rr3, cc3] = 2 + Nmax + roi_mask[rr2, cc2] = 0 + roi_mask[rr1, cc1] = 1 + Nmax + return roi_mask + + +def create_box(cx, cy, wx, wy, roi_mask): + Nmax = np.max(np.unique(roi_mask)) + for i, [cx_, cy_] in enumerate(list(zip(cx, cy))): # create boxes + x = np.array([cx_ - wx, cx_ + wx, cx_ + wx, cx_ - wx]) + y = np.array([cy_ - wy, cy_ - wy, cy_ + wy, cy_ + wy]) + rr, cc = polygon(y, x) + roi_mask[rr, cc] = i + 1 + Nmax + return roi_mask + + +def create_folder(base_folder, sub_folder): + """ + Crate a subfolder under base folder + Input: + base_folder: full path of the base folder + sub_folder: sub folder name to be created + Return: + Created full path of the created folder + """ + + data_dir0 = os.path.join(base_folder, sub_folder) + ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + os.makedirs(data_dir0, exist_ok=True) + print("Results from this analysis will be stashed in the directory %s" % data_dir0) + return data_dir0 + + +def create_user_folder(CYCLE, username=None, default_dir="/XF11ID/analysis/"): + """ + Crate a folder for saving user data analysis result + Input: + CYCLE: run cycle + username: if None, get username from the jupyter username + Return: + Created folder name + """ + if username != "Default": + if username == None: + username = getpass.getuser() + data_dir0 = os.path.join(default_dir, CYCLE, username, "Results/") + else: + data_dir0 = os.path.join(default_dir, CYCLE + "/") + ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + os.makedirs(data_dir0, exist_ok=True) + print("Results from this analysis will be stashed in the directory %s" % data_dir0) + return data_dir0 + + +################################## +#########For dose analysis ####### +################################## +def get_fra_num_by_dose(exp_dose, exp_time, att=1, dead_time=2): + """ + Calculate the frame number to be correlated by giving a X-ray exposure dose + + Paramters: + exp_dose: a list, the exposed dose, e.g., in unit of exp_time(ms)*N(fram num)*att( attenuation) + exp_time: float, the exposure time for a xpcs time sereies + dead_time: dead time for the fast shutter reponse time, CHX = 2ms + Return: + noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) + e.g., + + no_dose_fra = get_fra_num_by_dose( exp_dose = [ 3.34* 20, 3.34*50, 3.34*100, 3.34*502, 3.34*505 ], + exp_time = 1.34, dead_time = 2) + + --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) + """ + return np.int_(np.array(exp_dose) / (exp_time + dead_time) / att) + + +def get_multi_tau_lag_steps(fra_max, num_bufs=8): + """ + Get taus in log steps ( a multi-taus defined taus ) for a time series with max frame number as fra_max + Parameters: + fra_max: integer, the maximun frame number + buf_num (default=8), + Return: + taus_in_log, a list + + e.g., + get_multi_tau_lag_steps( 20, 8 ) --> array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]) + + """ + num_levels = int(np.log(fra_max / (num_bufs - 1)) / np.log(2) + 1) + 1 + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + return lag_steps[lag_steps < fra_max] + + +def get_series_g2_taus(fra_max_list, acq_time=1, max_fra_num=None, log_taus=True, num_bufs=8): + """ + Get taus for dose dependent analysis + Parameters: + fra_max_list: a list, a lsit of largest available frame number + acq_time: acquistion time for each frame + log_taus: if true, will use the multi-tau defined taus bu using buf_num (default=8), + otherwise, use deltau =1 + Return: + tausd, a dict, with keys as taus_max_list items + e.g., + get_series_g2_taus( fra_max_list=[20,30,40], acq_time=1, max_fra_num=None, log_taus = True, num_bufs = 8) + --> + {20: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]), + 30: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28]), + 40: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32]) + } + + """ + tausd = {} + for n in fra_max_list: + if max_fra_num != None: + L = max_fra_num + else: + L = np.infty + if n > L: + warnings.warn( + "Warning: the dose value is too large, and please" + "check the maxium dose in this data set and give a smaller dose value." + "We will use the maxium dose of the data." + ) + n = L + if log_taus: + lag_steps = get_multi_tau_lag_steps(n, num_bufs) + else: + lag_steps = np.arange(n) + tausd[n] = lag_steps * acq_time + return tausd + + +def check_lost_metadata(md, Nimg=None, inc_x0=None, inc_y0=None, pixelsize=7.5 * 10 * (-5)): + """Y.G. Dec 31, 2016, check lost metadata + + Parameter: + md: dict, meta data dictionay + Nimg: number of frames for this uid metadata + inc_x0/y0: incident beam center x0/y0, if None, will over-write the md['beam_center_x/y'] + pixelsize: if md don't have ['x_pixel_size'], the pixelsize will add it + Return: + dpix: pixelsize, in mm + lambda_: wavelegth of the X-rays in Angstroms + exposuretime: exposure time in sec + timeperframe: acquisition time is sec + center: list, [x,y], incident beam center in pixel + Will also update md + """ + mdn = md.copy() + if "number of images" not in list(md.keys()): + md["number of images"] = Nimg + if "x_pixel_size" not in list(md.keys()): + md["x_pixel_size"] = 7.5000004e-05 + dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm + try: + lambda_ = md["wavelength"] + except: + lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms + try: + Ldet = md["det_distance"] + if Ldet <= 1000: + Ldet *= 1000 + md["det_distance"] = Ldet + except: + Ldet = md["detector_distance"] + if Ldet <= 1000: + Ldet *= 1000 + md["detector_distance"] = Ldet + + try: # try exp time from detector + exposuretime = md["count_time"] # exposure time in sec + except: + exposuretime = md["cam_acquire_time"] # exposure time in sec + try: # try acq time from detector + acquisition_period = md["frame_time"] + except: + try: + acquisition_period = md["acquire period"] + except: + uid = md["uid"] + acquisition_period = float(db[uid]["start"]["acquire period"]) + timeperframe = acquisition_period + if inc_x0 != None: + mdn["beam_center_x"] = inc_y0 + print("Beam_center_x has been changed to %s. (no change in raw metadata): " % inc_y0) + if inc_y0 != None: + mdn["beam_center_y"] = inc_x0 + print("Beam_center_y has been changed to %s. (no change in raw metadata): " % inc_x0) + center = [int(mdn["beam_center_x"]), int(mdn["beam_center_y"])] # beam center [y,x] for python image + center = [center[1], center[0]] + + return dpix, lambda_, Ldet, exposuretime, timeperframe, center + + +def combine_images(filenames, outputfile, outsize=(2000, 2400)): + """Y.G. Dec 31, 2016 + Combine images together to one image using PIL.Image + Input: + filenames: list, the images names to be combined + outputfile: str, the filename to generate + outsize: the combined image size + Output: + save a combined image file + """ + N = len(filenames) + # nx = np.int( np.ceil( np.sqrt(N)) ) + # ny = np.int( np.ceil( N / float(nx) ) ) + + ny = int(np.ceil(np.sqrt(N))) + nx = int(np.ceil(N / float(ny))) + + # print(nx,ny) + result = Image.new("RGB", outsize, color=(255, 255, 255, 0)) + basewidth = int(outsize[0] / nx) + hsize = int(outsize[1] / ny) + for index, file in enumerate(filenames): + path = os.path.expanduser(file) + img = Image.open(path) + bands = img.split() + ratio = img.size[1] / img.size[0] # h/w + if hsize > basewidth * ratio: + basewidth_ = basewidth + hsize_ = int(basewidth * ratio) + else: + basewidth_ = int(hsize / ratio) + hsize_ = hsize + # print( index, file, basewidth, hsize ) + size = (basewidth_, hsize_) + bands = [b.resize(size, Image.Resampling.BILINEAR) for b in bands] + img = Image.merge("RGBA", bands) + x = index % nx * basewidth + y = index // nx * hsize + w, h = img.size + # print('pos {0},{1} size {2},{3}'.format(x, y, w, h)) + result.paste(img, (x, y, x + w, y + h)) + result.save(outputfile, quality=100, optimize=True) + print("The combined image is saved as: %s" % outputfile) + + +def get_qval_dict(qr_center, qz_center=None, qval_dict=None, multi_qr_for_one_qz=True, one_qz_multi_qr=True): + """Y.G. Dec 27, 2016 + Map the roi label array with qr or (qr,qz) or (q//, q|-) values + Parameters: + qr_center: list, a list of qr + qz_center: list, a list of qz, + multi_qr_for_one_qz: by default=True, + if one_qz_multi_qr: + one qz_center corresponds to all qr_center, in other words, there are totally, len(qr_center)* len(qz) qs + else: + one qr_center corresponds to all qz_center, + else: one qr with one qz + qval_dict: if not None, will append the new dict to the qval_dict + Return: + qval_dict, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + + """ + + if qval_dict == None: + qval_dict = {} + maxN = 0 + else: + maxN = np.max(list(qval_dict.keys())) + 1 + + if qz_center != None: + if multi_qr_for_one_qz: + if one_qz_multi_qr: + for qzind in range(len(qz_center)): + for qrind in range(len(qr_center)): + qval_dict[maxN + qzind * len(qr_center) + qrind] = np.array( + [qr_center[qrind], qz_center[qzind]] + ) + else: + for qrind in range(len(qr_center)): + for qzind in range(len(qz_center)): + qval_dict[maxN + qrind * len(qz_center) + qzind] = np.array( + [qr_center[qrind], qz_center[qzind]] + ) + + else: + for i, [qr, qz] in enumerate(zip(qr_center, qz_center)): + qval_dict[maxN + i] = np.array([qr, qz]) + else: + for qrind in range(len(qr_center)): + qval_dict[maxN + qrind] = np.array([qr_center[qrind]]) + return qval_dict + + +def update_qval_dict(qval_dict1, qval_dict2): + """Y.G. Dec 31, 2016 + Update qval_dict1 with qval_dict2 + Input: + qval_dict1, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + qval_dict2, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-) + Output: + qval_dict, a dict, with the same key as dict1, and all key in dict2 but which key plus max(dict1.keys()) + """ + maxN = np.max(list(qval_dict1.keys())) + 1 + qval_dict = {} + qval_dict.update(qval_dict1) + for k in list(qval_dict2.keys()): + qval_dict[k + maxN] = qval_dict2[k] + return qval_dict + + +def update_roi_mask(roi_mask1, roi_mask2): + """Y.G. Dec 31, 2016 + Update qval_dict1 with qval_dict2 + Input: + roi_mask1, 2d-array, label array, same shape as xpcs frame, + roi_mask2, 2d-array, label array, same shape as xpcs frame, + Output: + roi_mask, 2d-array, label array, same shape as xpcs frame, update roi_mask1 with roi_mask2 + """ + roi_mask = roi_mask1.copy() + w = np.where(roi_mask2) + roi_mask[w] = roi_mask2[w] + np.max(roi_mask) + return roi_mask + + +def check_bad_uids(uids, mask, img_choice_N=10, bad_uids_index=None): + """Y.G. Dec 22, 2016 + Find bad uids by checking the average intensity by a selection of the number img_choice_N of frames for the uid. If the average intensity is zeros, the uid will be considered as bad uid. + Parameters: + uids: list, a list of uid + mask: array, bool type numpy.array + img_choice_N: random select number of the uid + bad_uids_index: a list of known bad uid list, default is None + Return: + guids: list, good uids + buids, list, bad uids + """ + import random + + buids = [] + guids = list(uids) + # print( guids ) + if bad_uids_index == None: + bad_uids_index = [] + for i, uid in enumerate(uids): + # print( i, uid ) + if i not in bad_uids_index: + detector = get_detector(db[uid]) + imgs = load_data(uid, detector) + img_samp_index = random.sample(range(len(imgs)), img_choice_N) + imgsa = apply_mask(imgs, mask) + avg_img = get_avg_img(imgsa, img_samp_index, plot_=False, uid=uid) + if avg_img.max() == 0: + buids.append(uid) + guids.pop(list(np.where(np.array(guids) == uid)[0])[0]) + print("The bad uid is: %s" % uid) + else: + guids.pop(list(np.where(np.array(guids) == uid)[0])[0]) + buids.append(uid) + print("The bad uid is: %s" % uid) + print("The total and bad uids number are %s and %s, repsectively." % (len(uids), len(buids))) + return guids, buids + + +def find_uids(start_time, stop_time): + """Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end time + Return: + sids: list, scan id + uids: list, uid with 8 character length + fuids: list, uid with full length + + """ + hdrs = db(start_time=start_time, stop_time=stop_time) + try: + print("Totally %s uids are found." % (len(list(hdrs)))) + except: + pass + sids = [] + uids = [] + fuids = [] + for hdr in hdrs: + s = get_sid_filenames(hdr) + # print (s[1][:8]) + sids.append(s[0]) + uids.append(s[1][:8]) + fuids.append(s[1]) + sids = sids[::-1] + uids = uids[::-1] + fuids = fuids[::-1] + return np.array(sids), np.array(uids), np.array(fuids) + + +def ployfit(y, x=None, order=20): + """ + fit data (one-d array) by a ploynominal function + return the fitted one-d array + """ + if x == None: + x = range(len(y)) + pol = np.polyfit(x, y, order) + return np.polyval(pol, x) + + +def check_bad_data_points( + data, + fit=True, + polyfit_order=30, + legend_size=12, + plot=True, + scale=1.0, + good_start=None, + good_end=None, + path=None, + return_ylim=False, +): + """ + data: 1D array + scale: the scale of deviation + fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve + else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve + + """ + if good_start == None: + good_start = 0 + if good_end == None: + good_end = len(data) + bd1 = [i for i in range(0, good_start)] + bd3 = [i for i in range(good_end, len(data))] + + d_ = data[good_start:good_end] + + if fit: + pfit = ployfit(d_, order=polyfit_order) + d = d_ - pfit + else: + d = d_ + pfit = np.ones_like(d) * data.mean() + + ymin = d.mean() - scale * d.std() + ymax = d.mean() + scale * d.std() + + if plot: + fig = plt.figure() + ax = fig.add_subplot(2, 1, 1) + plot1D(d_, ax=ax, color="k", legend="data", legend_size=legend_size) + plot1D(pfit, ax=ax, color="b", legend="ploy-fit", title="Find Bad Points", legend_size=legend_size) + + ax2 = fig.add_subplot(2, 1, 2) + plot1D( + d, + ax=ax2, + legend="difference", + marker="s", + color="b", + ) + + # print('here') + plot1D( + x=[0, len(d_)], + y=[ymin, ymin], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="low_thresh", + legend_size=legend_size, + ) + + plot1D( + x=[0, len(d_)], + y=[ymax, ymax], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="high_thresh", + title="", + legend_size=legend_size, + ) + + if path != None: + fp = path + "%s" % (uid) + "_find_bad_points" + ".png" + plt.savefig(fp, dpi=fig.dpi) + bd2 = list(np.where(np.abs(d - d.mean()) > scale * d.std())[0] + good_start) + + if return_ylim: + return np.array(bd1 + bd2 + bd3), ymin, ymax, pfit + else: + return np.array(bd1 + bd2 + bd3), pfit + + +def get_bad_frame_list( + imgsum, + fit=True, + polyfit_order=30, + legend_size=12, + plot=True, + scale=1.0, + good_start=None, + good_end=None, + uid="uid", + path=None, + return_ylim=False, +): + """ + imgsum: the sum intensity of a time series + scale: the scale of deviation + fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve + else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve + + """ + if good_start == None: + good_start = 0 + if good_end == None: + good_end = len(imgsum) + bd1 = [i for i in range(0, good_start)] + bd3 = [i for i in range(good_end, len(imgsum))] + + imgsum_ = imgsum[good_start:good_end] + + if fit: + pfit = ployfit(imgsum_, order=polyfit_order) + data = imgsum_ - pfit + else: + data = imgsum_ + pfit = np.ones_like(data) * data.mean() + + ymin = data.mean() - scale * data.std() + ymax = data.mean() + scale * data.std() + + if plot: + fig = plt.figure() + ax = fig.add_subplot(2, 1, 1) + plot1D(imgsum_, ax=ax, color="k", legend="data", legend_size=legend_size) + plot1D(pfit, ax=ax, color="b", legend="ploy-fit", title=uid + "_imgsum", legend_size=legend_size) + + ax2 = fig.add_subplot(2, 1, 2) + plot1D( + data, + ax=ax2, + legend="difference", + marker="s", + color="b", + ) + + # print('here') + plot1D( + x=[0, len(imgsum_)], + y=[ymin, ymin], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="low_thresh", + legend_size=legend_size, + ) + + plot1D( + x=[0, len(imgsum_)], + y=[ymax, ymax], + ax=ax2, + ls="--", + lw=3, + marker="o", + color="r", + legend="high_thresh", + title="imgsum_to_find_bad_frame", + legend_size=legend_size, + ) + + if path != None: + fp = path + "%s" % (uid) + "_imgsum_analysis" + ".png" + plt.savefig(fp, dpi=fig.dpi) + + bd2 = list(np.where(np.abs(data - data.mean()) > scale * data.std())[0] + good_start) + + if return_ylim: + return np.array(bd1 + bd2 + bd3), ymin, ymax + else: + return np.array(bd1 + bd2 + bd3) + + +def save_dict_csv(mydict, filename, mode="w"): + import csv + + with open(filename, mode) as csv_file: + spamwriter = csv.writer(csv_file) + for key, value in mydict.items(): + spamwriter.writerow([key, value]) + + +def read_dict_csv(filename): + import csv + + with open(filename, "r") as csv_file: + reader = csv.reader(csv_file) + mydict = dict(reader) + return mydict + + +def find_bad_pixels(FD, bad_frame_list, uid="uid"): + bpx = [] + bpy = [] + for n in bad_frame_list: + if n >= FD.beg and n <= FD.end: + f = FD.rdframe(n) + w = np.where(f == f.max()) + if len(w[0]) == 1: + bpx.append(w[0][0]) + bpy.append(w[1][0]) + + return trans_data_to_pd([bpx, bpy], label=[uid + "_x", uid + "_y"], dtype="list") + + +def mask_exclude_badpixel(bp, mask, uid): + + for i in range(len(bp)): + mask[int(bp[bp.columns[0]][i]), int(bp[bp.columns[1]][i])] = 0 + return mask + + +def print_dict(dicts, keys=None): + """ + print keys: values in a dicts + if keys is None: print all the keys + """ + if keys == None: + keys = list(dicts.keys()) + for k in keys: + try: + print("%s--> %s" % (k, dicts[k])) + except: + pass + + +def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): + """ + Jan 25, 2018 add default_dec opt + + Y.G. Dev Dec 8, 2016 + + Get metadata from a uid + + - Adds detector key with detector name + + Parameters: + uid: the unique data acquisition id + kwargs: overwrite the meta data, for example + get_meta_data( uid = uid, sample = 'test') --> will overwrtie the meta's sample to test + return: + meta data of the uid: a dictionay + with keys: + detector + suid: the simple given uid + uid: full uid + filename: the full path of the data + start_time: the data acquisition starting time in a human readable manner + And all the input metadata + """ + + if "verbose" in kwargs.keys(): # added: option to suppress output + verbose = kwargs["verbose"] + else: + verbose = True + + import time + + header = db[uid] + md = {} + + md["suid"] = uid # short uid + try: + md["filename"] = get_sid_filenames(header)[2][0] + except: + md["filename"] = "N.A." + + devices = sorted(list(header.devices())) + if len(devices) > 1: + if verbose: # added: mute output + print( + "More than one device. This would have unintented consequences.Currently, only the device contains 'default_dec=%s'." + % default_dec + ) + # raise ValueError("More than one device. This would have unintented consequences.") + dec = devices[0] + for dec_ in devices: + if default_dec in dec_: + dec = dec_ + + # print(dec) + # detector_names = sorted( header.start['detectors'] ) + detector_names = sorted(get_detectors(db[uid])) + # if len(detector_names) > 1: + # raise ValueError("More than one det. This would have unintented consequences.") + detector_name = detector_names[0] + # md['detector'] = detector_name + md["detector"] = get_detector(header) + # print( md['detector'] ) + new_dict = header.config_data(dec)["primary"][0] + for key, val in new_dict.items(): + newkey = key.replace(detector_name + "_", "") + md[newkey] = val + + # for k,v in ev['descriptor']['configuration'][dec]['data'].items(): + # md[ k[len(dec)+1:] ]= v + + try: + md.update(header.start["plan_args"].items()) + md.pop("plan_args") + except: + pass + md.update(header.start.items()) + + # print(header.start.time) + md["start_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(header.start["time"])) + md["stop_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(header.stop["time"])) + try: # added: try to handle runs that don't contain image data + if "primary" in header.v2: + descriptor = header.v2["primary"].descriptors[0] + md["img_shape"] = descriptor["data_keys"][md["detector"]]["shape"][:2][::-1] + except: + if verbose: + print("couldn't find image shape...skip!") + else: + pass + md.update(kwargs) + + # for k, v in sorted(md.items()): + # ... + # print(f'{k}: {v}') + + return md + + +def get_max_countc(FD, labeled_array): + """YG. 2016, Nov 18 + Compute the max intensity of ROIs in the compressed file (FD) + + Parameters + ---------- + FD: Multifile class + compressed file + labeled_array : array + labeled array; 0 is background. + Each ROI is represented by a nonzero integer. It is not required that + the ROI labels are contiguous + index : int, list, optional + The ROI's to use. If None, this function will extract averages for all + ROIs + + Returns + ------- + max_intensity : a float + index : list + The labels for each element of the `mean_intensity` list + """ + + qind, pixelist = roi.extract_label_indices(labeled_array) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + + if labeled_array.shape != (FD.md["ncols"], FD.md["nrows"]): + raise ValueError( + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" + % (FD.md["ncols"], FD.md["nrows"], labeled_array.shape[0], labeled_array.shape[1]) + ) + + max_inten = 0 + for i in tqdm(range(FD.beg, FD.end, 1), desc="Get max intensity of ROIs in all frames"): + try: + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + max_inten = max(max_inten, np.max(v[w])) + except: + pass + return max_inten + + +def create_polygon_mask(image, xcorners, ycorners): + """ + Give image and x/y coners to create a polygon mask + image: 2d array + xcorners, list, points of x coners + ycorners, list, points of y coners + Return: + the polygon mask: 2d array, the polygon pixels with values 1 and others with 0 + + Example: + + + """ + from skimage.draw import disk, line, line_aa, polygon + + imy, imx = image.shape + bst_mask = np.zeros_like(image, dtype=bool) + rr, cc = polygon(ycorners, xcorners, shape=image.shape) + bst_mask[rr, cc] = 1 + # full_mask= ~bst_mask + return bst_mask + + +def create_rectangle_mask(image, xcorners, ycorners): + """ + Give image and x/y coners to create a rectangle mask + image: 2d array + xcorners, list, points of x coners + ycorners, list, points of y coners + Return: + the polygon mask: 2d array, the polygon pixels with values 1 and others with 0 + + Example: + + + """ + from skimage.draw import disk, line, line_aa, polygon + + imy, imx = image.shape + bst_mask = np.zeros_like(image, dtype=bool) + rr, cc = polygon(ycorners, xcorners, shape=image.shape) + bst_mask[rr, cc] = 1 + # full_mask= ~bst_mask + return bst_mask + + +def create_multi_rotated_rectangle_mask(image, center=None, length=100, width=50, angles=[0]): + """Developed at July 10, 2017 by Y.G.@CHX, NSLS2 + Create multi rectangle-shaped mask by rotating a rectangle with a list of angles + The original rectangle is defined by four corners, i.e., + [ (center[1] - width//2, center[0]), + (center[1] + width//2, center[0]), + (center[1] + width//2, center[0] + length), + (center[1] - width//2, center[0] + length) + ] + + Parameters: + image: 2D numpy array, to give mask shape + center: integer list, if None, will be the center of the image + length: integer, the length of the non-ratoted rectangle + width: integer, the width of the non-ratoted rectangle + angles: integer list, a list of rotated angles + + Return: + mask: 2D bool-type numpy array + """ + + from skimage.draw import polygon + from skimage.transform import rotate + + cx, cy = center + imy, imx = image.shape + mask = np.zeros(image.shape, dtype=bool) + wy = length + wx = width + x = np.array([max(0, cx - wx // 2), min(imx, cx + wx // 2), min(imx, cx + wx // 2), max(0, cx - wx // 2)]) + y = np.array([cy, cy, min(imy, cy + wy), min(imy, cy + wy)]) + rr, cc = polygon(y, x, shape=image.shape) + mask[rr, cc] = 1 + mask_rot = np.zeros(image.shape, dtype=bool) + for angle in angles: + mask_rot += np.array(rotate(mask, angle, center=center), dtype=bool) # , preserve_range=True) + return ~mask_rot + + +def create_wedge(image, center, radius, wcors, acute_angle=True): + """YG develop at June 18, 2017, @CHX + Create a wedge by a combination of disk and a triangle defined by center and wcors + wcors: [ [x1,x2,x3...], [y1,y2,y3..] + + """ + from skimage.draw import disk, line, line_aa, polygon + + imy, imx = image.shape + cy, cx = center + x = [cx] + list(wcors[0]) + y = [cy] + list(wcors[1]) + + maskc = np.zeros_like(image, dtype=bool) + rr, cc = disk((cy, cx), radius, shape=image.shape) + maskc[rr, cc] = 1 + + maskp = np.zeros_like(image, dtype=bool) + x = np.array(x) + y = np.array(y) + print(x, y) + rr, cc = polygon(y, x, shape=image.shape) + maskp[rr, cc] = 1 + if acute_angle: + return maskc * maskp + else: + return maskc * ~maskp + + +def create_cross_mask( + image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4, center_disk=True, center_radius=10 +): + """ + Give image and the beam center to create a cross-shaped mask + wy_left: the width of left h-line + wy_right: the width of rigth h-line + wx_up: the width of up v-line + wx_down: the width of down v-line + center_disk: if True, create a disk with center and center_radius + + Return: + the cross mask + """ + from skimage.draw import disk, line, line_aa, polygon + + imy, imx = image.shape + cx, cy = center + bst_mask = np.zeros_like(image, dtype=bool) + ### + # for right part + wy = wy_right + x = np.array([cx, imx, imx, cx]) + y = np.array([cy - wy, cy - wy, cy + wy, cy + wy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 + + ### + # for left part + wy = wy_left + x = np.array([0, cx, cx, 0]) + y = np.array([cy - wy, cy - wy, cy + wy, cy + wy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 + + ### + # for up part + wx = wx_up + x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) + y = np.array([cy, cy, imy, imy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 + + ### + # for low part + wx = wx_down + x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) + y = np.array([0, 0, cy, cy]) + rr, cc = polygon(y, x, shape=image.shape) + bst_mask[rr, cc] = 1 + + if center_radius != 0: + rr, cc = disk((cy, cx), center_radius, shape=bst_mask.shape) + bst_mask[rr, cc] = 1 + + full_mask = ~bst_mask + + return full_mask + + +def generate_edge(centers, width): + """YG. 10/14/2016 + give centers and width (number or list) to get edges""" + edges = np.zeros([len(centers), 2]) + edges[:, 0] = centers - width + edges[:, 1] = centers + width + return edges + + +def export_scan_scalar( + uid, x="dcm_b", y=["xray_eye1_stats1_total"], path="/XF11ID/analysis/2016_3/commissioning/Results/" +): + """YG. 10/17/2016 + export uid data to a txt file + uid: unique scan id + x: the x-col + y: the y-cols + path: save path + Example: + data = export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'], + path='/XF11ID/analysis/2016_3/commissioning/Results/exported/' ) + A plot for the data: + d.plot(x='dcm_b', y = 'xray_eye1_stats1_total', marker='o', ls='-', color='r') + + """ + from databroker import DataBroker as db + + from pyCHX.chx_generic_functions import trans_data_to_pd + + hdr = db[uid] + print(hdr.fields()) + data = db[uid].table() + xp = data[x] + datap = np.zeros([len(xp), len(y) + 1]) + datap[:, 0] = xp + for i, yi in enumerate(y): + datap[:, i + 1] = data[yi] + + datap = trans_data_to_pd(datap, label=[x] + [yi for yi in y]) + datap.to_csv(path + "uid=%s.csv" % uid) + return datap + + +##### +# load data by databroker + + +def get_flatfield(uid, reverse=False): + import h5py + + detector = get_detector(db[uid]) + sud = get_sid_filenames(db[uid]) + master_path = "%s_master.h5" % (sud[2][0]) + print(master_path) + f = h5py.File(master_path, "r") + k = "entry/instrument/detector/detectorSpecific/" # data_collection_date' + d = np.array(f[k]["flatfield"]) + f.close() + if reverse: + d = reverse_updown(d) + + return d + + +def get_detector(header): + """Get the first detector image string by giving header""" + keys = get_detectors(header) + for k in keys: + if "eiger" in k: + return k + + +def get_detectors(header): + """Get all the detector image strings by giving header""" + if "primary" in header.v2: + descriptor = header.v2["primary"].descriptors[0] + keys = [k for k, v in descriptor["data_keys"].items() if "external" in v] + return sorted(set(keys)) + return [] + + +def get_full_data_path(uid): + """A dirty way to get full data path""" + header = db[uid] + d = header.db + s = list(d.get_documents(db[uid])) + # print(s[2]) + p = s[2][1]["resource_path"] + p2 = s[3][1]["datum_kwargs"]["seq_id"] + # print(p,p2) + return p + "_" + str(p2) + "_master.h5" + + +def get_sid_filenames(hdr, verbose=False): + """ + get scan_id, uid and detector filename from databroker + get_sid_filenames(hdr,verbose=False) + hdr = db[uid] + returns (scan_id, uid, filepath) + LW 04/30/2024 + """ + import glob + from time import localtime, strftime + + start_doc = hdr.start + stop_doc = hdr.stop + success = False + + ret = ( + start_doc["scan_id"], + start_doc["uid"], + glob.glob(f"{start_doc['data path']}*_{start_doc['sequence id']}_master.h5"), + ) # looking for (eiger) datafile at the path specified in metadata + if len(ret[2]) == 0: + if verbose: + print('could not find detector filename from "data_path" in metadata: %s' % start_doc["data path"]) + else: + if verbose: + print('Found detector filename from "data_path" in metadata!') + success = True + + if not success: # looking at path in metadata, but taking the date from the run start document + data_path = start_doc["data path"][:-11] + strftime("%Y/%m/%d/", localtime(start_doc["time"])) + ret = ( + start_doc["scan_id"], + start_doc["uid"], + glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5"), + ) + if len(ret[2]) == 0: + if verbose: + print("could not find detector filename in %s" % data_path) + else: + if verbose: + print("Found detector filename in %s" % data_path) + success = True + + if ( + not success + ): # looking at path in metadata, but taking the date from the run stop document (in case the date rolled over between creating the start doc and staging the detector) + data_path = start_doc["data path"][:-11] + strftime("%Y/%m/%d/", localtime(stop_doc["time"])) + ret = ( + start_doc["scan_id"], + start_doc["uid"], + glob.glob(f"{data_path}*_{start_doc['sequence id']}_master.h5"), + ) + if len(ret[2]) == 0: + if verbose: + print("Sorry, could not find detector filename....") + else: + if verbose: + print("Found detector filename in %s" % data_path) + success = True + return ret + + +def get_sid_filenames_v2(run): + """ + get scan_id, uid and detector filename from databroker + get_sid_filenames(run,verbose=False) + run = db[uid] + returns (scan_id, uid, filepath) + 01/26/2025 function by Dan Allan, modified by LW to handle Eiger + oav + """ + from pathlib import Path + import event_model + from area_detector_handlers.eiger import EigerHandler + + run = run.v2 + sid = run.start['scan_id'] + uid = run.start['uid'] + resources = [doc for name, doc in run.documents() if name == "resource"] + for r in resources: + if r['spec'] in list(['AD_EIGER2']): + resource = r + datum_pages = [doc for name, doc in run.documents() if name == "datum_page"] + handler = EigerHandler(str(Path(resource['root'], resource['resource_path'])), **resource['resource_kwargs']) + datums = [] + for datum_page in datum_pages: + for datum in event_model.unpack_datum_page(datum_page): + if 'seq_id' in datum['datum_kwargs'].keys(): + datums.append(datum) + datum_set = sorted(set(handler.get_file_list([datum["datum_kwargs"] for datum in datums]))) + for datum in datum_set: + if "_master.h5" in datum: + return sid, uid, datum + +def get_sid_filenames_v3(run): + """ + get scan_id, uid and detector filename from databroker + get_sid_filenames(run,verbose=False) + run = db[uid] + returns (scan_id, uid, filepath) + 01/26/2025 based on get_sid_filenames_v2 by Dan Allan, modified by LW to handle Eiger +oav as detectors and using md['sequence_id'] from 'series' + """ + run = run.v2 + sid = run.start['scan_id'] + uid = run.start['uid'] + resources = [doc for name, doc in run.documents() if name == "resource"] + for r in resources: + if r['spec'] in list(['AD_EIGER2']): + resource = r + if 'eiger' in resource['root']: + datum = '%s/%s_%s_master.h5'%(resource['root'],resource['resource_path'],run.start['sequence id']) + return sid, uid, datum + + + +def load_dask_data(uid, detector, mask_path_full, reverse=False, rot90=False): + """ + load data as dask-array + get image md (direct beam, wavelength, sample-detector distance,...) from databroker documents (no need to read an actual image) + get pixel_mask and binary_mask from static location (getting it from image metadata takes forever in some conda envs...) + load_dask_data(uid,detector,reverse=False,rot90=False) + uid: uid (str) + detector: md['detector'] + mask_path_full: current standard would be _mask_path_+'pixel_masks/' + returns detector_images(dask-array), image_md + LW 04/26/2024 + """ + import json + + import dask + + hdr = db[uid] + det = detector.split("_image")[0] + # collect image metadata from loading single image + img_md_dict = { + "detector_distance": "det_distance", + "incident_wavelength": "wavelength", + "frame_time": "cam_acquire_period", + "count_time": "cam_acquire_time", + "num_images": "cam_num_images", + "beam_center_x": "beam_center_x", + "beam_center_y": "beam_center_y", + } + + det_mapping = { + "eiger4m": "eiger4m", + "eiger1m": "eiger1m", + "eiger500k": "eiger500K", + "eiger500K": "eiger500K" + } + + det_short = next((short for key, short in det_mapping.items() if key in det), None) + + img_md = {} + for k in list(img_md_dict.keys()): + img_md[k] = hdr.config_data(det)["primary"][0]["%s_%s" % (det, img_md_dict[k])] + if det_short is not None: + img_md.update({"y_pixel_size": 7.5e-05, "x_pixel_size": 7.5e-05}) + got_pixel_mask = True + else: + img_md.update({"y_pixel_size": None, "x_pixel_size": None}) + got_pixel_mask = False + # load pixel mask from static location + if got_pixel_mask: + # json_open = open(_mask_path_ + "pixel_masks/pixel_mask_compression_%s.json" % detector.split("_")[0]) + json_open = open(mask_path_full + "pixel_mask_compression_%s.json" % det_short) + mask_dict = json.load(json_open) + img_md["pixel_mask"] = np.array(mask_dict["pixel_mask"]) + img_md["binary_mask"] = np.array(mask_dict["binary_mask"]) + del mask_dict + + # load image data as dask-arry: + dimg = hdr.xarray_dask()[detector][0] + if reverse: + dimg = dask.array.flip(dimg, axis=(1, 1)) + if rot90: + dimg = dask.array.rot90(dimg, axes=(1, 2)) + return dimg, img_md + + +def load_data(uid, detector="eiger4m_single_image", fill=True, reverse=False, rot90=False): + """load bluesky scan data by giveing uid and detector + + Parameters + ---------- + uid: unique ID of a bluesky scan + detector: the used area detector + fill: True to fill data + reverse: if True, reverse the image upside down to match the "real" image geometry (should always be True in the future) + + Returns + ------- + image data: a pims frames series + if not success read the uid, will return image data as 0 + + Usuage: + imgs = load_data( uid, detector ) + md = imgs.md + """ + hdr = db[uid] + + if False: + ATTEMPTS = 0 + for attempt in range(ATTEMPTS): + try: + (ev,) = hdr.events(fields=[detector], fill=fill) + break + + except Exception: + print("Trying again ...!") + if attempt == ATTEMPTS - 1: + # We're out of attempts. Raise the exception to help with debugging. + raise + else: + # We didn't succeed + raise Exception("Failed after {} repeated attempts".format(ATTEMPTS)) + + # TODO(mrakitin): replace with the lazy loader (when it's implemented): + imgs = list(hdr.data(detector)) + + if len(imgs[0]) >= 1: + md = imgs[0].md + imgs = pims.pipeline(lambda img: img)(imgs[0]) + imgs.md = md + + if reverse: + md = imgs.md + imgs = reverse_updown(imgs) # Why not np.flipud? + imgs.md = md + + if rot90: + md = imgs.md + imgs = rot90_clockwise(imgs) # Why not np.flipud? + imgs.md = md + + return imgs + + +def mask_badpixels(mask, detector): + """ + Mask known bad pixel from the giveing mask + + """ + if detector == "eiger1m_single_image": + # to be determined + mask = mask + elif detector == "eiger4m_single_image" or detector == "image": + mask[513:552, :] = 0 + mask[1064:1103, :] = 0 + mask[1615:1654, :] = 0 + mask[:, 1029:1041] = 0 + mask[:, 0] = 0 + mask[0:, 2069] = 0 + mask[0] = 0 + mask[2166] = 0 + + elif detector == "eiger500K_single_image": + # to be determined + mask = mask + else: + mask = mask + return mask + + +def load_data2(uid, detector="eiger4m_single_image"): + """load bluesky scan data by giveing uid and detector + + Parameters + ---------- + uid: unique ID of a bluesky scan + detector: the used area detector + + Returns + ------- + image data: a pims frames series + if not success read the uid, will return image data as 0 + + Usuage: + imgs = load_data( uid, detector ) + md = imgs.md + """ + hdr = db[uid] + flag = 1 + while flag < 4 and flag != 0: + try: + (ev,) = hdr.events(fields=[detector]) + flag = 0 + except: + flag += 1 + print("Trying again ...!") + + if flag: + print("Can't Load Data!") + uid = "00000" # in case of failling load data + imgs = 0 + else: + imgs = ev["data"][detector] + + # print (imgs) + return imgs + + +def psave_obj(obj, filename): + """save an object with filename by pickle.dump method + This function automatically add '.pkl' as filename extension + Input: + obj: the object to be saved + filename: filename (with full path) to be saved + Return: + None + """ + with open(filename + ".pkl", "wb") as f: + pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) + + +def pload_obj(filename): + """load a pickled filename + This function automatically add '.pkl' to filename extension + Input: + filename: filename (with full path) to be saved + Return: + load the object by pickle.load method + """ + with open(filename + ".pkl", "rb") as f: + return pickle.load(f) + + +def load_mask(path, mask_name, plot_=False, reverse=False, rot90=False, *argv, **kwargs): + """load a mask file + the mask is a numpy binary file (.npy) + + Parameters + ---------- + path: the path of the mask file + mask_name: the name of the mask file + plot_: a boolen type + reverse: if True, reverse the image upside down to match the "real" image geometry (should always be True in the future) + Returns + ------- + mask: array + if plot_ =True, will show the mask + + Usuage: + mask = load_mask( path, mask_name, plot_ = True ) + """ + + mask = np.load(path + mask_name) + mask = np.array(mask, dtype=np.int32) + if reverse: + mask = mask[::-1, :] + if rot90: + mask = np.rot90(mask) + if plot_: + show_img(mask, *argv, **kwargs) + return mask + + +def create_hot_pixel_mask(img, threshold, center=None, center_radius=300, outer_radius=0): + """create a hot pixel mask by giving threshold + Input: + img: the image to create hot pixel mask + threshold: the threshold above which will be considered as hot pixels + center: optional, default=None + else, as a two-element list (beam center), i.e., [center_x, center_y] + if center is not None, the hot pixel will not include a disk region + which is defined by center and center_radius ( in unit of pixel) + Output: + a bool types numpy array (mask), 1 is good and 0 is excluded + + """ + bst_mask = np.ones_like(img, dtype=bool) + if center != None: + from skimage.draw import disk + + imy, imx = img.shape + cy, cx = center + rr, cc = disk((cy, cx), center_radius, shape=img.shape) + bst_mask[rr, cc] = 0 + if outer_radius: + bst_mask = np.zeros_like(img, dtype=bool) + rr2, cc2 = disk((cy, cx), outer_radius, shape=img.shape) + bst_mask[rr2, cc2] = 1 + bst_mask[rr, cc] = 0 + hmask = np.ones_like(img) + hmask[np.where(img * bst_mask > threshold)] = 0 + return hmask + + +def apply_mask(imgs, mask): + """apply mask to imgs to produce a generator + + Usuages: + imgsa = apply_mask( imgs, mask ) + good_series = apply_mask( imgs[good_start:], mask ) + + """ + return pims.pipeline(lambda img: np.int_(mask) * img)(imgs) # lazily apply mask + + +def reverse_updown(imgs): + """reverse imgs upside down to produce a generator + + Usuages: + imgsr = reverse_updown( imgs) + + + """ + return pims.pipeline(lambda img: img[::-1, :])(imgs) # lazily apply mask + + +def rot90_clockwise(imgs): + """reverse imgs upside down to produce a generator + + Usuages: + imgsr = rot90_clockwise( imgs) + + """ + return pims.pipeline(lambda img: np.rot90(img))(imgs) # lazily apply mask + + +def RemoveHot(img, threshold=1e7, plot_=True): + """Remove hot pixel from img""" + + mask = np.ones_like(np.array(img)) + badp = np.where(np.array(img) >= threshold) + if len(badp[0]) != 0: + mask[badp] = 0 + if plot_: + show_img(mask) + return mask + + +############ +###plot data + + +def show_img( + image, + ax=None, + label_array=None, + alpha=0.5, + interpolation="nearest", + xlim=None, + ylim=None, + save=False, + image_name=None, + path=None, + aspect=None, + logs=False, + vmin=None, + vmax=None, + return_fig=False, + cmap="viridis", + show_time=False, + file_name=None, + ylabel=None, + xlabel=None, + extent=None, + show_colorbar=True, + tight=True, + show_ticks=True, + save_format="png", + dpi=None, + center=None, + origin="lower", + lab_fontsize=16, + tick_size=12, + colorbar_fontsize=8, + use_mat_imshow=False, + *argv, + **kwargs, +): + """YG. Sep26, 2017 Add label_array/alpha option to show a mask on top of image + + a simple function to show image by using matplotlib.plt imshow + pass *argv,**kwargs to imshow + + Parameters + ---------- + image : array + Image to show + Returns + ------- + None + """ + if ax == None: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + fig, ax = plt.subplots() + else: + fig, ax = ax + + if center != None: + plot1D(center[1], center[0], ax=ax, c="b", m="o", legend="") + if not logs: + if not use_mat_imshow: + im = imshow( + ax, + image, + origin=origin, + cmap=cmap, + interpolation=interpolation, + vmin=vmin, + vmax=vmax, + extent=extent, + ) # vmin=0,vmax=1, + else: + im = ax.imshow( + image, origin=origin, cmap=cmap, interpolation=interpolation, vmin=vmin, vmax=vmax, extent=extent + ) # vmin=0,vmax=1, + else: + if not use_mat_imshow: + im = imshow( + ax, + image, + origin=origin, + cmap=cmap, + interpolation=interpolation, + norm=LogNorm(vmin, vmax), + extent=extent, + ) + else: + im = ax.imshow( + image, + origin=origin, + cmap=cmap, + interpolation=interpolation, + norm=LogNorm(vmin, vmax), + extent=extent, + ) + if label_array != None: + im2 = show_label_array(ax, label_array, alpha=alpha, cmap=cmap, interpolation=interpolation) + + ax.set_title(image_name) + if xlim != None: + ax.set_xlim(xlim) + if ylim != None: + ax.set_ylim(ylim) + + if not show_ticks: + ax.set_yticks([]) + ax.set_xticks([]) + else: + + ax.tick_params(axis="both", which="major", labelsize=tick_size) + ax.tick_params(axis="both", which="minor", labelsize=tick_size) + # mpl.rcParams['xtick.labelsize'] = tick_size + # mpl.rcParams['ytick.labelsize'] = tick_size + # print(tick_size) + + if ylabel != None: + # ax.set_ylabel(ylabel)#, fontsize = 9) + ax.set_ylabel(ylabel, fontsize=lab_fontsize) + if xlabel != None: + ax.set_xlabel(xlabel, fontsize=lab_fontsize) + + if aspect != None: + # aspect = image.shape[1]/float( image.shape[0] ) + ax.set_aspect(aspect) + else: + ax.set_aspect(aspect="auto") + + if show_colorbar: + cbar = fig.colorbar(im, extend="neither", spacing="proportional", orientation="vertical") + cbar.ax.tick_params(labelsize=colorbar_fontsize) + fig.set_tight_layout(tight) + if save: + if show_time: + dt = datetime.now() + CurTime = "_%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) + fp = path + "%s" % (file_name) + CurTime + "." + save_format + else: + fp = path + "%s" % (image_name) + "." + save_format + if dpi == None: + dpi = fig.dpi + plt.savefig(fp, dpi=dpi) + # fig.set_tight_layout(tight) + if return_fig: + return im # fig + + +def plot1D( + y, + x=None, + yerr=None, + ax=None, + return_fig=False, + ls="-", + figsize=None, + legend=None, + legend_size=None, + lw=None, + markersize=None, + tick_size=8, + *argv, + **kwargs, +): + """a simple function to plot two-column data by using matplotlib.plot + pass *argv,**kwargs to plot + + Parameters + ---------- + y: column-y + x: column-x, by default x=None, the plot will use index of y as x-axis + the other paramaters are defined same as plt.plot + Returns + ------- + None + """ + if ax == None: + if RUN_GUI: + fig = Figure() + ax = fig.add_subplot(111) + else: + if figsize != None: + fig, ax = plt.subplots(figsize=figsize) + else: + fig, ax = plt.subplots() + + if legend == None: + legend = " " + try: + logx = kwargs["logx"] + except: + logx = False + try: + logy = kwargs["logy"] + except: + logy = False + + try: + logxy = kwargs["logxy"] + except: + logxy = False + + if logx == True and logy == True: + logxy = True + + try: + marker = kwargs["marker"] + except: + try: + marker = kwargs["m"] + except: + marker = next(markers_) + try: + color = kwargs["color"] + except: + try: + color = kwargs["c"] + except: + color = next(colors_) + + if x == None: + x = range(len(y)) + if yerr == None: + ax.plot( + x, + y, + marker=marker, + color=color, + ls=ls, + label=legend, + lw=lw, + markersize=markersize, + ) # ,*argv,**kwargs) + else: + ax.errorbar( + x, + y, + yerr, + marker=marker, + color=color, + ls=ls, + label=legend, + lw=lw, + markersize=markersize, + ) # ,*argv,**kwargs) + if logx: + ax.set_xscale("log") + if logy: + ax.set_yscale("log") + if logxy: + ax.set_xscale("log") + ax.set_yscale("log") + + ax.tick_params(axis="both", which="major", labelsize=tick_size) + ax.tick_params(axis="both", which="minor", labelsize=tick_size) + + if "xlim" in kwargs.keys(): + ax.set_xlim(kwargs["xlim"]) + if "ylim" in kwargs.keys(): + ax.set_ylim(kwargs["ylim"]) + if "xlabel" in kwargs.keys(): + ax.set_xlabel(kwargs["xlabel"]) + if "ylabel" in kwargs.keys(): + ax.set_ylabel(kwargs["ylabel"]) + + if "title" in kwargs.keys(): + title = kwargs["title"] + else: + title = "plot" + ax.set_title(title) + # ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)') + if (legend != "") and (legend != None): + ax.legend(loc="best", fontsize=legend_size) + if "save" in kwargs.keys(): + if kwargs["save"]: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + # fp = kwargs['path'] + '%s'%( title ) + CurTime + '.png' + fp = kwargs["path"] + "%s" % (title) + ".png" + plt.savefig(fp, dpi=fig.dpi) + if return_fig: + return fig + + +### + + +def check_shutter_open(data_series, min_inten=0, time_edge=[0, 10], plot_=False, *argv, **kwargs): + """Check the first frame with shutter open + + Parameters + ---------- + data_series: a image series + min_inten: the total intensity lower than min_inten is defined as shtter close + time_edge: the searching frame number range + + return: + shutter_open_frame: a integer, the first frame number with open shutter + + Usuage: + good_start = check_shutter_open( imgsa, min_inten=5, time_edge = [0,20], plot_ = False ) + + """ + imgsum = np.array([np.sum(img) for img in data_series[time_edge[0] : time_edge[1] : 1]]) + if plot_: + fig, ax = plt.subplots() + ax.plot(imgsum, "bo") + ax.set_title("uid=%s--imgsum" % uid) + ax.set_xlabel("Frame") + ax.set_ylabel("Total_Intensity") + # plt.show() + shutter_open_frame = np.where(np.array(imgsum) > min_inten)[0][0] + print("The first frame with open shutter is : %s" % shutter_open_frame) + return shutter_open_frame + + +def get_each_frame_intensity( + data_series, sampling=50, bad_pixel_threshold=1e10, plot_=False, save=False, *argv, **kwargs +): + """Get the total intensity of each frame by sampling every N frames + Also get bad_frame_list by check whether above bad_pixel_threshold + + Usuage: + imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, + bad_pixel_threshold=1e10, plot_ = True) + """ + + # print ( argv, kwargs ) + imgsum = np.array([np.sum(img) for img in tqdm(data_series[::sampling], leave=True)]) + if plot_: + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + fig, ax = plt.subplots() + ax.plot(imgsum, "bo") + ax.set_title("uid= %s--imgsum" % uid) + ax.set_xlabel("Frame_bin_%s" % sampling) + ax.set_ylabel("Total_Intensity") + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "Uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--imgsum-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() + + bad_frame_list = np.where(np.array(imgsum) > bad_pixel_threshold)[0] + if len(bad_frame_list): + print("Bad frame list are: %s" % bad_frame_list) + else: + print("No bad frames are involved.") + return imgsum, bad_frame_list + + +def create_time_slice(N, slice_num, slice_width, edges=None): + """create a ROI time regions""" + if edges != None: + time_edge = edges + else: + if slice_num == 1: + time_edge = [[0, N]] + else: + tstep = N // slice_num + te = np.arange(0, slice_num + 1) * tstep + tc = np.int_((te[:-1] + te[1:]) / 2)[1:-1] + if slice_width % 2: + sw = slice_width // 2 + 1 + time_edge = ( + [ + [0, slice_width], + ] + + [[s - sw + 1, s + sw] for s in tc] + + [[N - slice_width, N]] + ) + else: + sw = slice_width // 2 + time_edge = ( + [ + [0, slice_width], + ] + + [[s - sw, s + sw] for s in tc] + + [[N - slice_width, N]] + ) + + return np.array(time_edge) + + +def show_label_array(ax, label_array, cmap=None, aspect=None, interpolation="nearest", **kwargs): + """ + YG. Sep 26, 2017 + Modified show_label_array(ax, label_array, cmap=None, **kwargs) + from https://github.com/Nikea/xray-vision/blob/master/xray_vision/mpl_plotting/roi.py + Display a labeled array nicely + Additional kwargs are passed through to `ax.imshow`. + If `vmin` is in kwargs, it is clipped to minimum of 0.5. + Parameters + ---------- + ax : Axes + The `Axes` object to add the artist too + label_array: ndarray + Expected to be an unsigned integer array. 0 is background, + positive integers label region of interest + cmap : str or colormap, optional + Color map to use, defaults to 'Paired' + Returns + ------- + img : AxesImage + The artist added to the axes + """ + if cmap == None: + cmap = "viridis" + # print(cmap) + _cmap = copy.copy((mcm.get_cmap(cmap))) + _cmap.set_under("w", 0) + vmin = max(0.5, kwargs.pop("vmin", 0.5)) + im = ax.imshow(label_array, cmap=cmap, interpolation=interpolation, vmin=vmin, **kwargs) + if aspect == None: + ax.set_aspect(aspect="auto") + # ax.set_aspect('equal') + return im + + +def show_label_array_on_image( + ax, + image, + label_array, + cmap=None, + norm=None, + log_img=True, + alpha=0.3, + vmin=0.1, + vmax=5, + imshow_cmap="gray", + **kwargs, +): # norm=LogNorm(), + """ + This will plot the required ROI's(labeled array) on the image + + Additional kwargs are passed through to `ax.imshow`. + If `vmin` is in kwargs, it is clipped to minimum of 0.5. + Parameters + ---------- + ax : Axes + The `Axes` object to add the artist too + image : array + The image array + label_array : array + Expected to be an unsigned integer array. 0 is background, + positive integers label region of interest + cmap : str or colormap, optional + Color map to use for plotting the label_array, defaults to 'None' + imshow_cmap : str or colormap, optional + Color map to use for plotting the image, defaults to 'gray' + norm : str, optional + Normalize scale data, defaults to 'Lognorm()' + Returns + ------- + im : AxesImage + The artist added to the axes + im_label : AxesImage + The artist added to the axes + """ + ax.set_aspect("equal") + + # print (vmin, vmax ) + if log_img: + im = ax.imshow( + image, cmap=imshow_cmap, interpolation="none", norm=LogNorm(vmin, vmax), **kwargs + ) # norm=norm, + else: + im = ax.imshow(image, cmap=imshow_cmap, interpolation="none", vmin=vmin, vmax=vmax, **kwargs) # norm=norm, + + im_label = mpl_plot.show_label_array( + ax, label_array, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha, **kwargs + ) # norm=norm, + + return im, im_label + + +def show_ROI_on_image( + image, + ROI, + center=None, + rwidth=400, + alpha=0.3, + label_on=True, + save=False, + return_fig=False, + rect_reqion=None, + log_img=True, + vmin=0.01, + vmax=5, + show_ang_cor=False, + cmap=cmap_albula, + fig_ax=None, + uid="uid", + path="", + aspect=1, + show_colorbar=True, + show_roi_edge=False, + *argv, + **kwargs, +): + """show ROI on an image + image: the data frame + ROI: the interested region + center: the plot center + rwidth: the plot range around the center + + """ + + if RUN_GUI: + fig = Figure(figsize=(8, 8)) + axes = fig.add_subplot(111) + elif fig_ax != None: + fig, axes = fig_ax + else: + fig, axes = plt.subplots() # plt.subplots(figsize=(8,8)) + + # print( vmin, vmax) + # norm=LogNorm(vmin, vmax) + + axes.set_title("%s_ROI_on_Image" % uid) + if log_img: + if vmin == 0: + vmin += 1e-10 + + vmax = max(1, vmax) + if not show_roi_edge: + # print('here') + im, im_label = show_label_array_on_image( + axes, + image, + ROI, + imshow_cmap="viridis", + cmap=cmap, + alpha=alpha, + log_img=log_img, + vmin=vmin, + vmax=vmax, + origin="lower", + ) + else: + edg = get_image_edge(ROI) + image_ = get_image_with_roi(image, ROI, scale_factor=2) + # fig, axes = plt.subplots( ) + show_img( + image_, + ax=[fig, axes], + vmin=vmin, + vmax=vmax, + logs=log_img, + image_name="%s_ROI_on_Image" % uid, + cmap=cmap, + ) + + if rect_reqion == None: + if center != None: + x1, x2 = [center[1] - rwidth, center[1] + rwidth] + y1, y2 = [center[0] - rwidth, center[0] + rwidth] + axes.set_xlim([x1, x2]) + axes.set_ylim([y1, y2]) + else: + x1, x2, y1, y2 = rect_reqion + axes.set_xlim([x1, x2]) + axes.set_ylim([y1, y2]) + + if label_on: + num_qzr = len(np.unique(ROI)) - 1 + for i in range(1, num_qzr + 1): + ind = np.where(ROI == i)[1] + indz = np.where(ROI == i)[0] + c = "%i" % i + y_val = int(indz.mean()) + x_val = int(ind.mean()) + # print (xval, y) + axes.text(x_val, y_val, c, color="b", va="center", ha="center") + if show_ang_cor: + axes.text(-0.0, 0.5, "-/+180" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(1.0, 0.5, "0" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(0.5, -0.0, "-90" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + axes.text(0.5, 1.0, "90" + r"$^0$", color="r", va="center", ha="center", transform=axes.transAxes) + + axes.set_aspect(aspect) + # fig.colorbar(im_label) + if show_colorbar: + if not show_roi_edge: + fig.colorbar(im) + if save: + fp = path + "%s_ROI_on_Image" % uid + ".png" + plt.savefig(fp, dpi=fig.dpi) + # plt.show() + if return_fig: + return fig, axes, im + + +def crop_image(image, crop_mask): + """Crop the non_zeros pixels of an image to a new image""" + from skimage.util import crop, pad + + pxlst = np.where(crop_mask.ravel())[0] + dims = crop_mask.shape + imgwidthy = dims[1] # dimension in y, but in plot being x + imgwidthx = dims[0] # dimension in x, but in plot being y + # x and y are flipped??? + # matrix notation!!! + pixely = pxlst % imgwidthy + pixelx = pxlst // imgwidthy + + minpixelx = np.min(pixelx) + minpixely = np.min(pixely) + maxpixelx = np.max(pixelx) + maxpixely = np.max(pixely) + crops = crop_mask * image + img_crop = crop(crops, ((minpixelx, imgwidthx - maxpixelx - 1), (minpixely, imgwidthy - maxpixely - 1))) + return img_crop + + +def get_avg_img(data_series, img_samp_index=None, sampling=100, plot_=False, save=False, *argv, **kwargs): + """Get average imagef from a data_series by every sampling number to save time""" + if img_samp_index == None: + avg_img = np.average(data_series[::sampling], axis=0) + else: + avg_img = np.zeros_like(data_series[0]) + n = 0 + for i in img_samp_index: + avg_img += data_series[i] + n += 1 + avg_img = np.array(avg_img) / n + + if plot_: + fig, ax = plt.subplots() + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + + im = ax.imshow(avg_img, cmap="viridis", origin="lower", norm=LogNorm(vmin=0.001, vmax=1e2)) + # ax.set_title("Masked Averaged Image") + ax.set_title("uid= %s--Masked Averaged Image" % uid) + fig.colorbar(im) + + if save: + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + path = kwargs["path"] + if "uid" in kwargs: + uid = kwargs["uid"] + else: + uid = "uid" + # fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' + fp = path + "uid=%s--avg-img-" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + # plt.show() + + return avg_img + + +def check_ROI_intensity(avg_img, ring_mask, ring_number=3, save=False, plot=True, *argv, **kwargs): + """plot intensity versus pixel of a ring + Parameters + ---------- + avg_img: 2D-array, the image + ring_mask: 2D-array + ring_number: which ring to plot + + Returns + ------- + + + """ + # print('here') + + uid = "uid" + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + pixel = roi.roi_pixel_values(avg_img, ring_mask, [ring_number]) + + if plot: + fig, ax = plt.subplots() + ax.set_title("%s--check-RIO-%s-intensity" % (uid, ring_number)) + ax.plot(pixel[0][0], "bo", ls="-") + ax.set_ylabel("Intensity") + ax.set_xlabel("pixel") + if save: + path = kwargs["path"] + fp = path + "%s_Mean_intensity_of_one_ROI" % uid + ".png" + fig.savefig(fp, dpi=fig.dpi) + if save: + path = kwargs["path"] + save_lists( + [range(len(pixel[0][0])), pixel[0][0]], + label=["pixel_list", "roi_intensity"], + filename="%s_Mean_intensity_of_one_ROI" % uid, + path=path, + ) + # plt.show() + return pixel[0][0] + + +# from tqdm import tqdm + + +def cal_g2(image_series, ring_mask, bad_image_process, bad_frame_list=None, good_start=0, num_buf=8, num_lev=None): + """calculation g2 by using a multi-tau algorithm""" + + noframes = len(image_series) # number of frames, not "no frames" + # num_buf = 8 # number of buffers + + if bad_image_process: + import skbeam.core.mask as mask_image + + bad_img_list = np.array(bad_frame_list) - good_start + new_imgs = mask_image.bad_to_nan_gen(image_series, bad_img_list) + + if num_lev == None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + print("%s frames will be processed..." % (noframes)) + print("Bad Frames involved!") + + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(new_imgs)) + print("G2 calculation DONE!") + + else: + + if num_lev == None: + num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 + print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) + print("%s frames will be processed..." % (noframes)) + g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(image_series)) + print("G2 calculation DONE!") + + return g2, lag_steps + + +def run_time(t0): + """Calculate running time of a program + Parameters + ---------- + t0: time_string, t0=time.time() + The start time + Returns + ------- + Print the running time + + One usage + --------- + t0=time.time() + .....(the running code) + run_time(t0) + """ + + elapsed_time = time.time() - t0 + if elapsed_time < 60: + print("Total time: %.3f sec" % (elapsed_time)) + else: + print("Total time: %.3f min" % (elapsed_time / 60.0)) + + +def trans_data_to_pd(data, label=None, dtype="array"): + """ + convert data into pandas.DataFrame + Input: + data: list or np.array + label: the coloum label of the data + dtype: list or array [[NOT WORK or dict (for dict only save the scalar not arrays values)]] + Output: + a pandas.DataFrame + """ + # lists a [ list1, list2...] all the list have the same length + import sys + + import pandas as pd + from numpy import arange, array + + if dtype == "list": + data = array(data).T + N, M = data.shape + elif dtype == "array": + data = array(data) + N, M = data.shape + else: + print("Wrong data type! Now only support 'list' and 'array' tpye") + + index = arange(N) + if label == None: + label = ["data%s" % i for i in range(M)] + # print label + df = pd.DataFrame(data, index=index, columns=label) + return df + + +def save_lists(data, label=None, filename=None, path=None, return_res=False, verbose=False): + """ + save_lists( data, label=None, filename=None, path=None) + + save lists to a CSV file with filename in path + Parameters + ---------- + data: list + label: the column name, the length should be equal to the column number of list + filename: the filename to be saved + path: the filepath to be saved + + Example: + save_arrays( [q,iq], label= ['q_A-1', 'Iq'], filename='uid=%s-q-Iq'%uid, path= data_dir ) + """ + + M, N = len(data[0]), len(data) + d = np.zeros([N, M]) + for i in range(N): + d[i] = data[i] + + df = trans_data_to_pd(d.T, label, "array") + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + if filename == None: + filename = "data" + filename = os.path.join(path, filename) # +'.csv') + df.to_csv(filename) + if verbose: + print("The data was saved in: %s." % filename) + if return_res: + return df + + +def get_pos_val_overlap(p1, v1, p2, v2, Nl): + """get the overlap of v1 and v2 + p1: the index of array1 in array with total length as Nl + v1: the corresponding value of p1 + p2: the index of array2 in array with total length as Nl + v2: the corresponding value of p2 + Return: + The values in v1 with the position in overlap of p1 and p2 + The values in v2 with the position in overlap of p1 and p2 + + An example: + Nl =10 + p1= np.array( [1,3,4,6,8] ) + v1 = np.array( [10,20,30,40,50]) + p2= np.array( [ 0,2,3,5,7,8]) + v2=np.array( [10,20,30,40,50,60,70]) + + get_pos_val_overlap( p1, v1, p2,v2, Nl) + + """ + ind = np.zeros(Nl, dtype=np.int32) + ind[p1] = np.arange(len(p1)) + 1 + w2 = np.where(ind[p2])[0] + w1 = ind[p2[w2]] - 1 + return v1[w1], v2[w2] + + +def save_arrays(data, label=None, dtype="array", filename=None, path=None, return_res=False, verbose=False): + """ + July 10, 2016, Y.G.@CHX + save_arrays( data, label=None, dtype='array', filename=None, path=None): + save data to a CSV file with filename in path + Parameters + ---------- + data: arrays + label: the column name, the length should be equal to the column number of data + dtype: array or list + filename: the filename to be saved + path: the filepath to be saved + + Example: + + save_arrays( qiq, label= ['q_A-1', 'Iq'], dtype='array', filename='uid=%s-q-Iq'%uid, path= data_dir ) + + + """ + df = trans_data_to_pd(data, label, dtype) + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + if filename == None: + filename = "data" + filename_ = os.path.join(path, filename) # +'.csv') + df.to_csv(filename_) + if verbose: + print("The file: %s is saved in %s" % (filename, path)) + # print( 'The g2 of uid= %s is saved in %s with filename as g2-%s-%s.csv'%(uid, path, uid, CurTime)) + if return_res: + return df + + +def cal_particle_g2(radius, viscosity, qr, taus, beta=0.2, T=298): + """YG Dev Nov 20, 2017@CHX + calculate particle g2 fucntion by giving particle radius, Q , and solution viscosity using a simple + exponetional model + Input: + radius: m + qr, list, in A-1 + visocity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) + T: temperture, in K + e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: + 1.38064852*10**(-123)*298 / ( 6*np.pi * 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10**5 A2/s + taus: time + beta: contrast + + cal_particle_g2( radius=125 *10**(-9), qr=[0.01,0.015], viscosity= 8.9*1e-4) + + """ + D0 = get_diffusion_coefficient(viscosity, radius, T=T) + g2_q1 = np.zeros(len(qr), dtype=object) + for i, q1 in enumerate(qr): + relaxation_rate = D0 * q1**2 + g2_q1[i] = simple_exponential(taus, beta=beta, relaxation_rate=relaxation_rate, baseline=1) + return g2_q1 + + +def get_Reynolds_number(flow_rate, flow_radius, fluid_density, fluid_viscosity): + """May 10, 2019, Y.G.@CHX + get Reynolds_number , the ratio of the inertial to viscous forces, V*Dia*density/eta + Reynolds_number << 1000 gives a laminar flow + flow_rate: ul/s + flow_radius: mm + fluid_density: Kg/m^3 ( for water, 1000 Kg/m^3 = 1 g/cm^3 ) + fliud_viscosity: N*s/m^2 ( Kg /(s*m) ) + + return Reynolds_number + """ + return flow_rate * 1e-6 * flow_radius * 1e-3 * 2 * fluid_density / fluid_viscosity + + +def get_Deborah_number(flow_rate, beam_size, q_vector, diffusion_coefficient): + """May 10, 2019, Y.G.@CHX + get Deborah_number, the ratio of transit time to diffusion time, (V/beam_size)/ ( D*q^2) + flow_rate: ul/s + beam_size: ul + q_vector: A-1 + diffusion_coefficient: A^2/s + + return Deborah_number + """ + return (flow_rate / beam_size) / (diffusion_coefficient * q_vector**2) + + +def get_viscosity(diffusion_coefficient, radius, T=298): + """May 10, 2019, Y.G.@CHX + get visocity of a Brownian motion particle with radius in fuild with diffusion_coefficient + diffusion_coefficient in unit of A^2/s + radius: m + T: K + k: 1.38064852(79)*10**(−23) J/T, Boltzmann constant + + return visosity: N*s/m^2 (water at 25K = 8.9*10**(-4) ) + """ + + k = 1.38064852 * 10 ** (-23) + return k * T / (6 * np.pi * diffusion_coefficient * radius) * 10**20 + + +def get_diffusion_coefficient(viscosity, radius, T=298): + """July 10, 2016, Y.G.@CHX + get diffusion_coefficient of a Brownian motion particle with radius in fuild with visocity + viscosity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) + radius: m + T: K + k: 1.38064852(79)×10−23 J/T, Boltzmann constant + + return diffusion_coefficient in unit of A^2/s + e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives: + 1.38064852*10**(−23) *298 / ( 6*np.pi* 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10^5 A2/s + + get_diffusion_coefficient( 0.20871, 250 *10**(-9), T=298) + + """ + + k = 1.38064852 * 10 ** (-23) + return k * T / (6 * np.pi * viscosity * radius) * 10**20 + + +def ring_edges(inner_radius, width, spacing=0, num_rings=None): + """ + Aug 02, 2016, Y.G.@CHX + ring_edges(inner_radius, width, spacing=0, num_rings=None) + + Calculate the inner and outer radius of a set of rings. + + The number of rings, their widths, and any spacing between rings can be + specified. They can be uniform or varied. + + LW 04/02/2024: fixed checking whether width and spacing are iterable + + Parameters + ---------- + inner_radius : float + inner radius of the inner-most ring + + width : float or list of floats + ring thickness + If a float, all rings will have the same thickness. + + spacing : float or list of floats, optional + margin between rings, 0 by default + If a float, all rings will have the same spacing. If a list, + the length of the list must be one less than the number of + rings. + + num_rings : int, optional + number of rings + Required if width and spacing are not lists and number + cannot thereby be inferred. If it is given and can also be + inferred, input is checked for consistency. + + Returns + ------- + edges : array + inner and outer radius for each ring + + Example + ------- + # Make two rings starting at r=1px, each 5px wide + >>> ring_edges(inner_radius=1, width=5, num_rings=2) + [(1, 6), (6, 11)] + # Make three rings of different widths and spacings. + # Since the width and spacings are given individually, the number of + # rings here is simply inferred. + >>> ring_edges(inner_radius=1, width=(5, 4, 3), spacing=(1, 2)) + [(1, 6), (7, 11), (13, 16)] + + """ + # All of this input validation merely checks that width, spacing, and + # num_rings are self-consistent and complete. + try: + iter(width) + width_is_list = True + except: + width_is_list = False + try: + iter(spacing) + spacing_is_list = True + except: + spacing_is_list = False + + # width_is_list = isinstance(width, collections.Iterable) + # spacing_is_list = isinstance(spacing, collections.Iterable) + if width_is_list and spacing_is_list: + if len(width) != len(spacing) + 1: + raise ValueError("List of spacings must be one less than list " "of widths.") + if num_rings == None: + try: + num_rings = len(width) + except TypeError: + try: + num_rings = len(spacing) + 1 + except TypeError: + raise ValueError( + "Since width and spacing are constant, " + "num_rings cannot be inferred and must be " + "specified." + ) + else: + if width_is_list: + if num_rings != len(width): + raise ValueError("num_rings does not match width list") + if spacing_is_list: + if num_rings - 1 != len(spacing): + raise ValueError("num_rings does not match spacing list") + # Now regularlize the input. + if not width_is_list: + width = np.ones(num_rings) * width + + if spacing == None: + spacing = [] + else: + if not spacing_is_list: + spacing = np.ones(num_rings - 1) * spacing + # The inner radius is the first "spacing." + all_spacings = np.insert(spacing, 0, inner_radius) + steps = np.array([all_spacings, width]).T.ravel() + edges = np.cumsum(steps).reshape(-1, 2) + return edges + + +def get_non_uniform_edges( + centers, + width=4, + number_rings=1, + spacing=0, +): + """ + YG CHX Spe 6 + get_non_uniform_edges( centers, width = 4, number_rings=3 ) + + Calculate the inner and outer radius of a set of non uniform distributed + rings by giving ring centers + For each center, there are number_rings with each of width + + LW 04/02/2024: fixed checking whether 'width' is iterable + + Parameters + ---------- + centers : float + the center of the rings + + width : float or list of floats + ring thickness + If a float, all rings will have the same thickness. + + num_rings : int, optional + number of rings + Required if width and spacing are not lists and number + cannot thereby be inferred. If it is given and can also be + inferred, input is checked for consistency. + + Returns + ------- + edges : array + inner and outer radius for each ring + """ + + if number_rings == None: + number_rings = 1 + edges = np.zeros([len(centers) * number_rings, 2]) + + try: + iter(width) + except: + width = np.ones_like(centers) * width + for i, c in enumerate(centers): + edges[i * number_rings : (i + 1) * number_rings, :] = ring_edges( + inner_radius=c - width[i] * number_rings / 2, width=width[i], spacing=spacing, num_rings=number_rings + ) + return edges + + +def trans_tf_to_td(tf, dtype="dframe"): + """July 02, 2015, Y.G.@CHX + Translate epoch time to string + """ + from datetime import datetime + + import numpy as np + import pandas as pd + + """translate time.float to time.date, + td.type dframe: a dataframe + td.type list, a list + """ + if dtype == "dframe": + ind = tf.index + else: + ind = range(len(tf)) + td = np.array([datetime.fromtimestamp(tf[i]) for i in ind]) + return td + + +def trans_td_to_tf(td, dtype="dframe"): + """July 02, 2015, Y.G.@CHX + Translate string to epoch time + + """ + import time + + import numpy as np + + """translate time.date to time.float, + td.type dframe: a dataframe + td.type list, a list + """ + if dtype == "dframe": + ind = td.index + else: + ind = range(len(td)) + # tf = np.array([ time.mktime(td[i].timetuple()) for i in range(len(td)) ]) + tf = np.array([time.mktime(td[i].timetuple()) for i in ind]) + return tf + + +def get_averaged_data_from_multi_res( + multi_res, keystr="g2", different_length=True, verbose=False, cal_errorbar=False +): + """Y.G. Dec 22, 2016 + get average data from multi-run analysis result + Parameters: + multi_res: dict, generated by function run_xpcs_xsvs_single + each key is a uid, inside each uid are also dict with key as 'g2','g4' et.al. + keystr: string, get the averaged keystr + different_length: if True, do careful average for different length results + return: + array, averaged results + + """ + maxM = 0 + mkeys = multi_res.keys() + if not different_length: + n = 0 + for i, key in enumerate(list(mkeys)): + keystri = multi_res[key][keystr] + if i == 0: + keystr_average = keystri + else: + keystr_average += keystri + n += 1 + keystr_average /= n + + else: + length_dict = {} + D = 1 + for i, key in enumerate(list(mkeys)): + if verbose: + print(i, key) + shapes = multi_res[key][keystr].shape + M = shapes[0] + if i == 0: + if len(shapes) == 2: + D = 2 + maxN = shapes[1] + elif len(shapes) == 3: + D = 3 + maxN = shapes[2] # in case of two-time correlation + if (M) not in length_dict: + length_dict[(M)] = 1 + else: + length_dict[(M)] += 1 + maxM = max(maxM, M) + # print( length_dict ) + avg_count = {} + sk = np.array(sorted(length_dict)) + for i, k in enumerate(sk): + avg_count[k] = np.sum(np.array([length_dict[k] for k in sk[i:]])) + # print(length_dict, avg_count) + if D == 2: + # print('here') + keystr_average = np.zeros([maxM, maxN]) + elif D == 3: + keystr_average = np.zeros([maxM, maxM, maxN]) + else: + keystr_average = np.zeros([maxM]) + for i, key in enumerate(list(mkeys)): + keystri = multi_res[key][keystr] + Mi = keystri.shape[0] + if D != 3: + keystr_average[:Mi] += keystri + else: + keystr_average[:Mi, :Mi, :] += keystri + if D != 3: + keystr_average[: sk[0]] /= avg_count[sk[0]] + else: + keystr_average[: sk[0], : sk[0], :] /= avg_count[sk[0]] + for i in range(0, len(sk) - 1): + if D != 3: + keystr_average[sk[i] : sk[i + 1]] /= avg_count[sk[i + 1]] + else: + keystr_average[sk[i] : sk[i + 1], sk[i] : sk[i + 1], :] /= avg_count[sk[i + 1]] + + return keystr_average + + +def save_g2_general(g2, taus, qr=None, qz=None, uid="uid", path=None, return_res=False): + """Y.G. Dec 29, 2016 + + save g2 results, + res_pargs should contain + g2: one-time correlation function + taus, lags of g2 + qr: the qr center, same length as g2 + qz: the qz or angle center, same length as g2 + path: + uid: + + """ + + df = DataFrame(np.hstack([(taus).reshape(len(g2), 1), g2])) + t, qs = g2.shape + if qr is None: + qr = range(qs) + if qz is None: + df.columns = ["tau"] + [str(qr_) for qr_ in qr] + else: + df.columns = ["tau"] + [str(qr_) + "_" + str(qz_) for (qr_, qz_) in zip(qr, qz)] + + # dt =datetime.now() + # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) + + # if filename is None: + + filename = uid + # filename = 'uid=%s--g2.csv' % (uid) + # filename += '-uid=%s-%s.csv' % (uid,CurTime) + # filename += '-uid=%s.csv' % (uid) + filename1 = os.path.join(path, filename) + df.to_csv(filename1) + print("The correlation function is saved in %s with filename as %s" % (path, filename)) + if return_res: + return df + + +########### +# *for g2 fit and plot + + +def stretched_auto_corr_scat_factor(x, beta, relaxation_rate, alpha=1.0, baseline=1): + return beta * np.exp(-2 * (relaxation_rate * x) ** alpha) + baseline + + +def simple_exponential(x, beta, relaxation_rate, baseline=1): + """relation_rate: unit 1/s""" + return beta * np.exp(-2 * relaxation_rate * x) + baseline + + +def simple_exponential_with_vibration(x, beta, relaxation_rate, freq, amp, baseline=1): + return beta * (1 + amp * np.cos(2 * np.pi * freq * x)) * np.exp(-2 * relaxation_rate * x) + baseline + + +def stretched_auto_corr_scat_factor_with_vibration(x, beta, relaxation_rate, alpha, freq, amp, baseline=1): + return beta * (1 + amp * np.cos(2 * np.pi * freq * x)) * np.exp(-2 * (relaxation_rate * x) ** alpha) + baseline + + +def flow_para_function_with_vibration(x, beta, relaxation_rate, flow_velocity, freq, amp, baseline=1): + vibration_part = 1 + amp * np.cos(2 * np.pi * freq * x) + Diff_part = np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 + return beta * vibration_part * Diff_part * Flow_part + baseline + + +def flow_para_function(x, beta, relaxation_rate, flow_velocity, baseline=1): + """flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )""" + + Diff_part = np.exp(-2 * relaxation_rate * x) + Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 + return beta * Diff_part * Flow_part + baseline + + +def flow_para_function_explicitq(x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0): + """Nov 9, 2017 Basically, make q vector to (qr, angle), + ###relaxation_rate is actually a diffusion rate + flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) + Diffusion part: np.exp( -2*D q^2 *tau ) + q_ang: would be np.radians( ang - 90 ) + + """ + + Diff_part = np.exp(-2 * (diffusion * qr**2 * x) ** alpha) + if flow_velocity != 0: + if np.cos(q_ang) >= 1e-8: + Flow_part = ( + np.pi**2 + / (16 * x * flow_velocity * qr * abs(np.cos(q_ang))) + * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity * qr * abs(np.cos(q_ang))))) ** 2 + ) + else: + Flow_part = 1 + else: + Flow_part = 1 + return beta * Diff_part * Flow_part + baseline + + +def get_flow_velocity(average_velocity, shape_factor): + + return average_velocity * (1 - shape_factor) / (1 + shape_factor) + + +def stretched_flow_para_function(x, beta, relaxation_rate, alpha, flow_velocity, baseline=1): + """ + flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) + """ + Diff_part = np.exp(-2 * (relaxation_rate * x) ** alpha) + Flow_part = np.pi**2 / (16 * x * flow_velocity) * abs(erf(np.sqrt(4 / np.pi * 1j * x * flow_velocity))) ** 2 + return beta * Diff_part * Flow_part + baseline + + +def get_g2_fit_general_two_steps( + g2, taus, function="simple_exponential", second_fit_range=[0, 20], sequential_fit=False, *argv, **kwargs +): + """ + Fit g2 in two steps, + i) Using the "function" to fit whole g2 to get baseline and beta (contrast) + ii) Then using the obtained baseline and beta to fit g2 in a "second_fit_range" by using simple_exponential function + """ + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general(g2, taus, function, sequential_fit, *argv, **kwargs) + guess_values = {} + for k in list(g2_fit_result[0].params.keys()): + guess_values[k] = np.array([g2_fit_result[i].params[k].value for i in range(g2.shape[1])]) + + if "guess_limits" in kwargs: + guess_limits = kwargs["guess_limits"] + else: + guess_limits = dict(baseline=[1, 1.8], alpha=[0, 2], beta=[0.0, 1], relaxation_rate=[0.001, 10000]) + + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( + g2, + taus, + function="simple_exponential", + sequential_fit=sequential_fit, + fit_range=second_fit_range, + fit_variables={"baseline": False, "beta": False, "alpha": False, "relaxation_rate": True}, + guess_values=guess_values, + guess_limits=guess_limits, + ) + + return g2_fit_result, taus_fit, g2_fit + + +def get_g2_fit_general( + g2, taus, function="simple_exponential", sequential_fit=False, qval_dict=None, ang_init=90, *argv, **kwargs +): + """ + Nov 9, 2017, give qval_dict for using function of flow_para_function_explicitq + qval_dict: a dict with qr and ang (in unit of degrees).") + + + Dec 29,2016, Y.G.@CHX + + Fit one-time correlation function + + The support functions include simple exponential and stretched/compressed exponential + Parameters + ---------- + g2: one-time correlation function for fit, with shape as [taus, qs] + taus: the time delay + sequential_fit: if True, will use the low-q fit result as initial value to fit the higher Qs + function: + supported function include: + 'simple_exponential' (or 'simple'): fit by a simple exponential function, defined as + beta * np.exp(-2 * relaxation_rate * lags) + baseline + 'streched_exponential'(or 'streched'): fit by a streched exponential function, defined as + beta * ( np.exp( -2 * ( relaxation_rate * tau )**alpha ) + baseline + 'stretched_vibration': fit by a streched exponential function with vibration, defined as + beta * (1 + amp*np.cos( 2*np.pi*60* x) )* np.exp(-2 * (relaxation_rate * x)**alpha) + baseline + 'flow_para_function' (or flow): fit by a flow function + + + kwargs: + could contains: + 'fit_variables': a dict, for vary or not, + keys are fitting para, including + beta, relaxation_rate , alpha ,baseline + values: a False or True, False for not vary + 'guess_values': a dict, for initial value of the fitting para, + the defalut values are + dict( beta=.1, alpha=1.0, relaxation_rate =0.005, baseline=1.0) + + 'guess_limits': a dict, for the limits of the fittting para, for example: + dict( beta=[0, 10],, alpha=[0,100] ) + the default is: + dict( baseline =[0.5, 2.5], alpha=[0, inf] ,beta = [0, 1], relaxation_rate= [0.0,1000] ) + Returns + ------- + fit resutls: a instance in limfit + tau_fit + fit_data by the model, it has the q number of g2 + + an example: + fit_g2_func = 'stretched' + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, + function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, + fit_variables={'baseline':True, 'beta':True, 'alpha':True,'relaxation_rate':True}, + guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,}) + + g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_g2_fit_paras.csv', path=data_dir ) + + + """ + + if "fit_range" in kwargs.keys(): + fit_range = kwargs["fit_range"] + else: + fit_range = None + + num_rings = g2.shape[1] + if "fit_variables" in kwargs: + additional_var = kwargs["fit_variables"] + _vars = [k for k in list(additional_var.keys()) if additional_var[k] == False] + else: + _vars = [] + if function == "simple_exponential" or function == "simple": + _vars = np.unique(_vars + ["alpha"]) + mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= list( _vars) ) + elif function == "stretched_exponential" or function == "stretched": + mod = Model(stretched_auto_corr_scat_factor) # , independent_vars= _vars) + elif function == "stretched_vibration": + mod = Model(stretched_auto_corr_scat_factor_with_vibration) # , independent_vars= _vars) + elif function == "flow_para_function" or function == "flow_para": + mod = Model(flow_para_function) # , independent_vars= _vars) + elif function == "flow_para_function_explicitq" or function == "flow_para_qang": + mod = Model(flow_para_function_explicitq) # , independent_vars= _vars) + elif function == "flow_para_function_with_vibration" or function == "flow_vibration": + mod = Model(flow_para_function_with_vibration) + + else: + print( + "The %s is not supported.The supported functions include simple_exponential and stretched_exponential" + % function + ) + + mod.set_param_hint("baseline", min=0.5, max=2.5) + mod.set_param_hint("beta", min=0.0, max=1.0) + mod.set_param_hint("alpha", min=0.0) + mod.set_param_hint("relaxation_rate", min=0.0, max=1000) + mod.set_param_hint("flow_velocity", min=0) + mod.set_param_hint("diffusion", min=0.0, max=2e8) + + if "guess_limits" in kwargs: + guess_limits = kwargs["guess_limits"] + for k in list(guess_limits.keys()): + mod.set_param_hint(k, min=guess_limits[k][0], max=guess_limits[k][1]) + + if function == "flow_para_function" or function == "flow_para" or function == "flow_vibration": + mod.set_param_hint("flow_velocity", min=0) + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + mod.set_param_hint("flow_velocity", min=0) + mod.set_param_hint("diffusion", min=0.0, max=2e8) + if function == "stretched_vibration" or function == "flow_vibration": + mod.set_param_hint("freq", min=0) + mod.set_param_hint("amp", min=0) + + _guess_val = dict(beta=0.1, alpha=1.0, relaxation_rate=0.005, baseline=1.0) + if "guess_values" in kwargs: + guess_values = kwargs["guess_values"] + _guess_val.update(guess_values) + + _beta = _guess_val["beta"] + _alpha = _guess_val["alpha"] + _relaxation_rate = _guess_val["relaxation_rate"] + _baseline = _guess_val["baseline"] + if isinstance(_beta, (np.ndarray, list)): + _beta_ = _beta[0] + else: + _beta_ = _beta + if isinstance(_baseline, (np.ndarray, list)): + _baseline_ = _baseline[0] + else: + _baseline_ = _baseline + if isinstance(_relaxation_rate, (np.ndarray, list)): + _relaxation_rate_ = _relaxation_rate[0] + else: + _relaxation_rate_ = _relaxation_rate + if isinstance(_alpha, (np.ndarray, list)): + _alpha_ = _alpha[0] + else: + _alpha_ = _alpha + pars = mod.make_params(beta=_beta_, alpha=_alpha_, relaxation_rate=_relaxation_rate_, baseline=_baseline_) + + if function == "flow_para_function" or function == "flow_para": + _flow_velocity = _guess_val["flow_velocity"] + if isinstance(_flow_velocity, (np.ndarray, list)): + _flow_velocity_ = _flow_velocity[0] + else: + _flow_velocity_ = _flow_velocity + pars = mod.make_params( + beta=_beta_, + alpha=_alpha_, + flow_velocity=_flow_velocity_, + relaxation_rate=_relaxation_rate_, + baseline=_baseline_, + ) + + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + _flow_velocity = _guess_val["flow_velocity"] + _diffusion = _guess_val["diffusion"] + _guess_val["qr"] = 1 + _guess_val["q_ang"] = 0 + if isinstance(_flow_velocity, (np.ndarray, list)): + _flow_velocity_ = _flow_velocity[0] + else: + _flow_velocity_ = _flow_velocity + if isinstance(_diffusion, (np.ndarray, list)): + _diffusion_ = _diffusion[0] + else: + _diffusion_ = _diffusion + pars = mod.make_params( + beta=_beta_, + alpha=_alpha_, + flow_velocity=_flow_velocity_, + diffusion=_diffusion_, + baseline=_baseline_, + qr=1, + q_ang=0, + ) + + if function == "stretched_vibration": + _freq = _guess_val["freq"] + _amp = _guess_val["amp"] + pars = mod.make_params( + beta=_beta, alpha=_alpha, freq=_freq, amp=_amp, relaxation_rate=_relaxation_rate, baseline=_baseline + ) + + if function == "flow_vibration": + _flow_velocity = _guess_val["flow_velocity"] + _freq = _guess_val["freq"] + _amp = _guess_val["amp"] + pars = mod.make_params( + beta=_beta, + freq=_freq, + amp=_amp, + flow_velocity=_flow_velocity, + relaxation_rate=_relaxation_rate, + baseline=_baseline, + ) + for v in _vars: + pars["%s" % v].vary = False + # print( pars ) + fit_res = [] + model_data = [] + for i in range(num_rings): + if fit_range != None: + y_ = g2[1:, i][fit_range[0] : fit_range[1]] + lags_ = taus[1:][fit_range[0] : fit_range[1]] + else: + y_ = g2[1:, i] + lags_ = taus[1:] + + mm = ~np.isnan(y_) + y = y_[mm] + lags = lags_[mm] + # print( i, mm.shape, y.shape, y_.shape, lags.shape, lags_.shape ) + # y=y_ + # lags=lags_ + # print( _relaxation_rate ) + for k in list(pars.keys()): + # print(k, _guess_val[k] ) + try: + if isinstance(_guess_val[k], (np.ndarray, list)): + pars[k].value = _guess_val[k][i] + except: + pass + + if True: + if isinstance(_beta, (np.ndarray, list)): + # pars['beta'].value = _guess_val['beta'][i] + _beta_ = _guess_val["beta"][i] + if isinstance(_baseline, (np.ndarray, list)): + # pars['baseline'].value = _guess_val['baseline'][i] + _baseline_ = _guess_val["baseline"][i] + if isinstance(_relaxation_rate, (np.ndarray, list)): + # pars['relaxation_rate'].value = _guess_val['relaxation_rate'][i] + _relaxation_rate_ = _guess_val["relaxation_rate"][i] + if isinstance(_alpha, (np.ndarray, list)): + # pars['alpha'].value = _guess_val['alpha'][i] + _alpha_ = _guess_val["alpha"][i] + # for k in list(pars.keys()): + # print(k, _guess_val[k] ) + # pars[k].value = _guess_val[k][i] + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + if qval_dict == None: + print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") + else: + + pars = mod.make_params( + beta=_beta_, + alpha=_alpha_, + flow_velocity=_flow_velocity_, + diffusion=_diffusion_, + baseline=_baseline_, + qr=qval_dict[i][0], + q_ang=abs(np.radians(qval_dict[i][1] - ang_init)), + ) + + pars["qr"].vary = False + pars["q_ang"].vary = False + for v in _vars: + pars["%s" % v].vary = False + + # if i==20: + # print(pars) + # print( pars ) + result1 = mod.fit(y, pars, x=lags) + # print(qval_dict[i][0], qval_dict[i][1], y) + if sequential_fit: + for k in list(pars.keys()): + # print( pars ) + if k in list(result1.best_values.keys()): + pars[k].value = result1.best_values[k] + fit_res.append(result1) + # model_data.append( result1.best_fit ) + yf = result1.model.eval(params=result1.params, x=lags_) + model_data.append(yf) + return fit_res, lags_, np.array(model_data).T + + +def get_short_long_labels_from_qval_dict(qval_dict, geometry="saxs"): + """Y.G. 2016, Dec 26 + Get short/long labels from a qval_dict + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + geometry: + 'saxs': a saxs with Qr partition + 'ang_saxs': a saxs with Qr and angular partition + 'gi_saxs': gisaxs with Qz, Qr + """ + + Nqs = len(qval_dict.keys()) + len_qrz = len(list(qval_dict.values())[0]) + # qr_label = sorted( np.array( list( qval_dict.values() ) )[:,0] ) + qr_label = np.array(list(qval_dict.values()))[:, 0] + if geometry == "gi_saxs" or geometry == "ang_saxs": # or geometry=='gi_waxs': + if len_qrz < 2: + print("please give qz or qang for the q-label") + else: + # qz_label = sorted( np.array( list( qval_dict.values() ) )[:,1] ) + qz_label = np.array(list(qval_dict.values()))[:, 1] + else: + qz_label = np.array([0]) + + uqz_label = np.unique(qz_label) + num_qz = len(uqz_label) + + uqr_label = np.unique(qr_label) + num_qr = len(uqr_label) + + # print( uqr_label, uqz_label ) + if len(uqr_label) >= len(uqz_label): + master_plot = "qz" # one qz for many sub plots of each qr + else: + master_plot = "qr" + + mastp = master_plot + if geometry == "ang_saxs": + mastp = "ang" + num_short = min(num_qz, num_qr) + num_long = max(num_qz, num_qr) + + # print( mastp, num_short, num_long) + if num_qz != num_qr: + short_label = [qz_label, qr_label][np.argmin([num_qz, num_qr])] + long_label = [qz_label, qr_label][np.argmax([num_qz, num_qr])] + short_ulabel = [uqz_label, uqr_label][np.argmin([num_qz, num_qr])] + long_ulabel = [uqz_label, uqr_label][np.argmax([num_qz, num_qr])] + else: + short_label = qz_label + long_label = qr_label + short_ulabel = uqz_label + long_ulabel = uqr_label + # print( long_ulabel ) + # print( qz_label,qr_label ) + # print( short_label, long_label ) + + if geometry == "saxs" or geometry == "gi_waxs": + ind_long = [range(num_long)] + else: + ind_long = [np.where(short_label == i)[0] for i in short_ulabel] + + if Nqs == 1: + long_ulabel = list(qval_dict.values())[0] + long_label = list(qval_dict.values())[0] + return ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) + + +############################################ +##a good func to plot g2 for all types of geogmetries +############################################ + + +def plot_g2_general( + g2_dict, + taus_dict, + qval_dict, + g2_err_dict=None, + fit_res=None, + geometry="saxs", + filename="g2", + path=None, + function="simple_exponential", + g2_labels=None, + fig_ysize=12, + qth_interest=None, + ylabel="g2", + return_fig=False, + append_name="", + outsize=(2000, 2400), + max_plotnum_fig=16, + figsize=(10, 12), + show_average_ang_saxs=True, + qphi_analysis=False, + fontsize_sublabel=12, + *argv, + **kwargs, +): + """ + Jan 10, 2018 add g2_err_dict option to plot g2 with error bar + Oct31, 2017 add qth_interest option + + Dec 26,2016, Y.G.@CHX + + Plot one/four-time correlation function (with fit) for different geometry + + The support functions include simple exponential and stretched/compressed exponential + Parameters + ---------- + g2_dict: dict, format as {1: g2_1, 2: g2_2, 3: g2_3...} one-time correlation function, g1,g2, g3,...must have the same shape + taus_dict, dict, format {1: tau_1, 2: tau_2, 3: tau_3...}, tau1,tau2, tau3,...must have the same shape + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + + fit_res: give all the fitting parameters for showing in the plot + qth_interest: if not None: should be a list, and will only plot the qth_interest qs + filename: for the title of plot + append_name: if not None, will save as filename + append_name as filename + path: the path to save data + outsize: for gi/ang_saxs, will combine all the different qz images together with outsize + function: + 'simple_exponential': fit by a simple exponential function, defined as + beta * np.exp(-2 * relaxation_rate * lags) + baseline + 'streched_exponential': fit by a streched exponential function, defined as + beta * (np.exp(-2 * relaxation_rate * lags))**alpha + baseline + geometry: + 'saxs': a saxs with Qr partition + 'ang_saxs': a saxs with Qr and angular partition + 'gi_saxs': gisaxs with Qz, Qr + + one_plot: if True, plot all images in one pannel + kwargs: + + Returns + ------- + None + + ToDoList: plot an average g2 for ang_saxs for each q + + """ + + if ylabel == "g2": + ylabel = "g_2" + if ylabel == "g4": + ylabel = "g_4" + + if geometry == "saxs": + if qphi_analysis: + geometry = "ang_saxs" + if qth_interest != None: + if not isinstance(qth_interest, list): + print("Please give a list for qth_interest") + else: + # g2_dict0, taus_dict0, qval_dict0, fit_res0= g2_dict, taus_dict, qval_dict, fit_res + qth_interest = np.array(qth_interest) - 1 + g2_dict_ = {} + # taus_dict_ = {} + for k in list(g2_dict.keys()): + g2_dict_[k] = g2_dict[k][:, [i for i in qth_interest]] + # for k in list(taus_dict.keys()): + # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] + taus_dict_ = taus_dict + qval_dict_ = {k: qval_dict[k] for k in qth_interest} + if fit_res != None: + fit_res_ = [fit_res[k] for k in qth_interest] + else: + fit_res_ = None + else: + g2_dict_, taus_dict_, qval_dict_, fit_res_ = g2_dict, taus_dict, qval_dict, fit_res + + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) + fps = [] + + # $print( num_short, num_long ) + + for s_ind in range(num_short): + ind_long_i = ind_long[s_ind] + num_long_i = len(ind_long_i) + # if show_average_ang_saxs: + # if geometry=='ang_saxs': + # num_long_i += 1 + if RUN_GUI: + fig = Figure(figsize=(10, 12)) + else: + # fig = plt.figure( ) + if num_long_i <= 4: + if master_plot != "qz": + fig = plt.figure(figsize=(8, 6)) + else: + if num_short > 1: + fig = plt.figure(figsize=(8, 4)) + else: + fig = plt.figure(figsize=(10, 6)) + # print('Here') + elif num_long_i > max_plotnum_fig: + num_fig = int(np.ceil(num_long_i / max_plotnum_fig)) # num_long_i //16 + fig = [plt.figure(figsize=figsize) for i in range(num_fig)] + # print( figsize ) + else: + # print('Here') + if master_plot != "qz": + fig = plt.figure(figsize=figsize) + else: + fig = plt.figure(figsize=(10, 10)) + + if master_plot == "qz": + if geometry == "ang_saxs": + title_short = "Angle= %.2f" % (short_ulabel[s_ind]) + r"$^\circ$" + elif geometry == "gi_saxs": + title_short = r"$Q_z= $" + "%.4f" % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + else: + title_short = "" + else: # qr + if geometry == "ang_saxs" or geometry == "gi_saxs": + title_short = r"$Q_r= $" + "%.5f " % (short_ulabel[s_ind]) + r"$\AA^{-1}$" + else: + title_short = "" + # print(geometry) + # filename ='' + til = "%s:--->%s" % (filename, title_short) + if num_long_i <= 4: + plt.title(til, fontsize=14, y=1.15) + # plt.title( til,fontsize=20, y =1.06) + # print('here') + else: + plt.title(til, fontsize=20, y=1.06) + # print( num_long ) + if num_long != 1: + # print( 'here') + plt.axis("off") + # sy = min(num_long_i,4) + sy = min(num_long_i, int(np.ceil(min(max_plotnum_fig, num_long_i) / 4))) + # fig.set_size_inches(10, 12) + # fig.set_size_inches(10, fig_ysize ) + else: + sy = 1 + # fig.set_size_inches(8,6) + # plt.axis('off') + sx = min(4, int(np.ceil(min(max_plotnum_fig, num_long_i) / float(sy)))) + + temp = sy + sy = sx + sx = temp + + # print( num_long_i, sx, sy ) + # print( master_plot ) + # print(ind_long_i, len(ind_long_i) ) + + for i, l_ind in enumerate(ind_long_i): + if num_long_i <= max_plotnum_fig: + # if s_ind ==2: + # print('Here') + # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) + ax = fig.add_subplot(sx, sy, i + 1) + if sx == 1: + if sy == 1: + plt.axis("on") + else: + # fig_subnum = l_ind//max_plotnum_fig + # ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) + fig_subnum = i // max_plotnum_fig + # print( i, sx,sy, fig_subnum, max_plotnum_fig, i + 1 - fig_subnum*max_plotnum_fig ) + ax = fig[fig_subnum].add_subplot(sx, sy, i + 1 - fig_subnum * max_plotnum_fig) + + ax.set_ylabel(r"$%s$" % ylabel + "(" + r"$\tau$" + ")") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + if master_plot == "qz" or master_plot == "angle": + if geometry != "gi_waxs": + title_long = r"$Q_r= $" + "%.5f " % (long_label[l_ind]) + r"$\AA^{-1}$" + else: + title_long = r"$Q_r= $" + "%i " % (long_label[l_ind]) + # print( title_long,long_label,l_ind ) + else: + if geometry == "ang_saxs": + # title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) + r'$^\circ$' + '( %d )'%(l_ind) + title_long = "Ang= " + "%.2f" % (long_label[l_ind]) # + r'$^\circ$' + '( %d )'%(l_ind) + elif geometry == "gi_saxs": + title_long = r"$Q_z= $" + "%.5f " % (long_label[l_ind]) + r"$\AA^{-1}$" + else: + title_long = "" + # print( master_plot ) + if master_plot != "qz": + ax.set_title(title_long + " (%s )" % (1 + l_ind), y=1.1, fontsize=12) + else: + ax.set_title(title_long + " (%s )" % (1 + l_ind), y=1.05, fontsize=fontsize_sublabel) + # print( geometry ) + # print( title_long ) + if qth_interest != None: # it might have a bug here, todolist!!! + lab = sorted(list(qval_dict_.keys())) + # print( lab, l_ind) + ax.set_title(title_long + " (%s )" % (lab[l_ind] + 1), y=1.05, fontsize=12) + for ki, k in enumerate(list(g2_dict_.keys())): + if ki == 0: + c = "b" + if fit_res == None: + m = "-o" + else: + m = "o" + elif ki == 1: + c = "r" + if fit_res == None: + m = "s" + else: + m = "-" + elif ki == 2: + c = "g" + m = "-D" + else: + c = colors[ki + 2] + m = "-%s" % markers[ki + 2] + try: + dumy = g2_dict_[k].shape + # print( 'here is the shape' ) + islist = False + except: + islist_n = len(g2_dict_[k]) + islist = True + # print( 'here is the list' ) + if islist: + for nlst in range(islist_n): + m = "-%s" % markers[nlst] + # print(m) + y = g2_dict_[k][nlst][:, l_ind] + x = taus_dict_[k][nlst] + if ki == 0: + ymin, ymax = min(y), max(y[1:]) + if g2_err_dict == None: + if g2_labels == None: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + # print('here ki ={} nlst = {}'.format( ki, nlst )) + if nlst == 0: + ax.semilogx(x, y, m, color=c, markersize=6, label=g2_labels[ki]) + else: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + yerr = g2_err_dict[k][nlst][:, l_ind] + if g2_labels == None: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) + else: + if nlst == 0: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6, label=g2_labels[ki]) + else: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) + ax.set_xscale("log", nonposx="clip") + if nlst == 0: + if l_ind == 0: + ax.legend(loc="best", fontsize=8, fancybox=True, framealpha=0.5) + + else: + y = g2_dict_[k][:, l_ind] + x = taus_dict_[k] + if ki == 0: + ymin, ymax = min(y), max(y[1:]) + if g2_err_dict == None: + if g2_labels == None: + ax.semilogx(x, y, m, color=c, markersize=6) + else: + ax.semilogx(x, y, m, color=c, markersize=6, label=g2_labels[ki]) + else: + yerr = g2_err_dict[k][:, l_ind] + # print(x.shape, y.shape, yerr.shape) + # print(yerr) + if g2_labels == None: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) + else: + ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6, label=g2_labels[ki]) + ax.set_xscale("log", nonposx="clip") + if l_ind == 0: + ax.legend(loc="best", fontsize=8, fancybox=True, framealpha=0.5) + + if fit_res_ != None: + result1 = fit_res_[l_ind] + # print (result1.best_values) + + beta = result1.best_values["beta"] + baseline = result1.best_values["baseline"] + if function == "simple_exponential" or function == "simple": + rate = result1.best_values["relaxation_rate"] + alpha = 1.0 + elif function == "stretched_exponential" or function == "stretched": + rate = result1.best_values["relaxation_rate"] + alpha = result1.best_values["alpha"] + elif function == "stretched_vibration": + rate = result1.best_values["relaxation_rate"] + alpha = result1.best_values["alpha"] + freq = result1.best_values["freq"] + elif function == "flow_vibration": + rate = result1.best_values["relaxation_rate"] + freq = result1.best_values["freq"] + if function == "flow_para_function" or function == "flow_para" or function == "flow_vibration": + rate = result1.best_values["relaxation_rate"] + flow = result1.best_values["flow_velocity"] + if function == "flow_para_function_explicitq" or function == "flow_para_qang": + diff = result1.best_values["diffusion"] + qrr = short_ulabel[s_ind] + # print(qrr) + rate = diff * qrr**2 + flow = result1.best_values["flow_velocity"] + if qval_dict_ == None: + print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") + else: + pass + + if rate != 0: + txts = r"$\tau_0$" + r"$ = %.3f$" % (1 / rate) + r"$ s$" + else: + txts = r"$\tau_0$" + r"$ = inf$" + r"$ s$" + x = 0.25 + y0 = 0.9 + fontsize = 12 + ax.text(x=x, y=y0, s=txts, fontsize=fontsize, transform=ax.transAxes) + # print(function) + dt = 0 + if ( + function != "flow_para_function" + and function != "flow_para" + and function != "flow_vibration" + and function != "flow_para_qang" + ): + txts = r"$\alpha$" + r"$ = %.3f$" % (alpha) + dt += 0.1 + # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r"$baseline$" + r"$ = %.3f$" % (baseline) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + if ( + function == "flow_para_function" + or function == "flow_para" + or function == "flow_vibration" + or function == "flow_para_qang" + ): + txts = r"$flow_v$" + r"$ = %.3f$" % (flow) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + if function == "stretched_vibration" or function == "flow_vibration": + txts = r"$vibration$" + r"$ = %.1f Hz$" % (freq) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + txts = r"$\beta$" + r"$ = %.3f$" % (beta) + dt += 0.1 + ax.text(x=x, y=y0 - dt, s=txts, fontsize=fontsize, transform=ax.transAxes) + + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + try: + ax.set_ylim([ymin * vmin, ymax * vmax]) + except: + pass + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + if num_short == 1: + fp = path + filename + else: + fp = path + filename + "_%s_%s" % (mastp, s_ind) + + if append_name != "": + fp = fp + append_name + fps.append(fp + ".png") + # if num_long_i <= 16: + if num_long_i <= max_plotnum_fig: + fig.set_tight_layout(True) + # fig.tight_layout() + # print(fig) + try: + plt.savefig(fp + ".png", dpi=fig.dpi) + except: + print("Can not save figure here.") + + else: + fps = [] + for fn, f in enumerate(fig): + f.set_tight_layout(True) + fp = path + filename + "_q_%s_%s" % (fn * 16, (fn + 1) * 16) + if append_name != "": + fp = fp + append_name + fps.append(fp + ".png") + f.savefig(fp + ".png", dpi=f.dpi) + # plt.savefig( fp + '.png', dpi=fig.dpi) + # combine each saved images together + + if (num_short != 1) or (num_long_i > 16): + outputfile = path + filename + ".png" + if append_name != "": + outputfile = path + filename + append_name + "__joint.png" + else: + outputfile = path + filename + "__joint.png" + combine_images(fps, outputfile, outsize=outsize) + if return_fig: + return fig + + +def power_func(x, D0, power=2): + return D0 * x**power + + +def get_q_rate_fit_general(qval_dict, rate, geometry="saxs", weights=None, *argv, **kwargs): + """ + Dec 26,2016, Y.G.@CHX + + Fit q~rate by a power law function and fit curve pass (0,0) + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + Return: + D0 + qrate_fit_res + """ + + power_variable = False + + if "fit_range" in kwargs.keys(): + fit_range = kwargs["fit_range"] + else: + fit_range = None + + mod = Model(power_func) + # mod.set_param_hint( 'power', min=0.5, max= 10 ) + # mod.set_param_hint( 'D0', min=0 ) + pars = mod.make_params(power=2, D0=1 * 10 ^ (-5)) + if power_variable: + pars["power"].vary = True + else: + pars["power"].vary = False + + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + Nqr = num_long + Nqz = num_short + D0 = np.zeros(Nqz) + power = 2 # np.zeros( Nqz ) + qrate_fit_res = [] + # print(Nqz) + for i in range(Nqz): + ind_long_i = ind_long[i] + y = np.array(rate)[ind_long_i] + x = long_label[ind_long_i] + # print(y,x) + if fit_range != None: + y = y[fit_range[0] : fit_range[1]] + x = x[fit_range[0] : fit_range[1]] + # print (i, y,x) + _result = mod.fit(y, pars, x=x, weights=weights) + qrate_fit_res.append(_result) + D0[i] = _result.best_values["D0"] + # power[i] = _result.best_values['power'] + print("The fitted diffusion coefficient D0 is: %.3e A^2S-1" % D0[i]) + return D0, qrate_fit_res + + +def plot_q_rate_fit_general( + qval_dict, + rate, + qrate_fit_res, + geometry="saxs", + ylim=None, + plot_all_range=True, + plot_index_range=None, + show_text=True, + return_fig=False, + show_fit=True, + *argv, + **kwargs, +): + """ + Dec 26,2016, Y.G.@CHX + + plot q~rate fitted by a power law function and fit curve pass (0,0) + + Parameters + ---------- + qval_dict, dict, with key as roi number, + format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs + format as {1: [qr1], 2: [qr2] ...} for saxs + format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs + rate: relaxation_rate + plot_index_range: + Option: + if power_variable = False, power =2 to fit q^2~rate, + Otherwise, power is variable. + show_fit:, bool, if False, not show the fit + + """ + + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + ( + qr_label, + qz_label, + num_qz, + num_qr, + num_short, + num_long, + short_label, + long_label, + short_ulabel, + long_ulabel, + ind_long, + master_plot, + mastp, + ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) + + power = 2 + fig, ax = plt.subplots() + plt.title(r"$Q^%s$" % (power) + "-Rate-%s_Fit" % (uid), fontsize=20, y=1.06) + Nqz = num_short + if Nqz != 1: + ls = "--" + else: + ls = "" + for i in range(Nqz): + ind_long_i = ind_long[i] + y = np.array(rate)[ind_long_i] + x = long_label[ind_long_i] + D0 = qrate_fit_res[i].best_values["D0"] + # print(i, x, y, D0 ) + if Nqz != 1: + label = r"$q_z=%.5f$" % short_ulabel[i] + else: + label = "" + ax.plot(x**power, y, marker="o", ls=ls, label=label) + yfit = qrate_fit_res[i].best_fit + + if show_fit: + if plot_all_range: + ax.plot(x**power, x**power * D0, "-r") + else: + ax.plot((x**power)[: len(yfit)], yfit, "-r") + + if show_text: + txts = r"$D0: %.3e$" % D0 + r" $A^2$" + r"$s^{-1}$" + dy = 0.1 + ax.text(x=0.15, y=0.65 - dy * i, s=txts, fontsize=14, transform=ax.transAxes) + if Nqz != 1: + legend = ax.legend(loc="best") + + if plot_index_range != None: + d1, d2 = plot_index_range + d2 = min(len(x) - 1, d2) + ax.set_xlim((x**power)[d1], (x**power)[d2]) + ax.set_ylim(y[d1], y[d2]) + if ylim != None: + ax.set_ylim(ylim) + + ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$)") + ax.set_xlabel("$q^%s$" r"($\AA^{-2}$)" % power) + fp = path + "%s_Q_Rate" % (uid) + "_fit.png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + if return_fig: + return fig, ax + + +def save_g2_fit_para_tocsv(fit_res, filename, path): + """Y.G. Dec 29, 2016, + save g2 fitted parameter to csv file + """ + col = list(fit_res[0].best_values.keys()) + m, n = len(fit_res), len(col) + data = np.zeros([m, n]) + for i in range(m): + data[i] = list(fit_res[i].best_values.values()) + df = DataFrame(data) + df.columns = col + filename1 = os.path.join(path, filename) # + '.csv') + df.to_csv(filename1) + print("The g2 fitting parameters are saved in %s" % filename1) + return df + + +def R_2(ydata, fit_data): + """Calculates R squared for a particular fit - by L.W. + usage R_2(ydata,fit_data) + returns R2 + by L.W. Feb. 2019 + """ + y_ave = np.average(ydata) + SS_tot = np.sum((np.array(ydata) - y_ave) ** 2) + # print('SS_tot: %s'%SS_tot) + SS_res = np.sum((np.array(ydata) - np.array(fit_data)) ** 2) + # print('SS_res: %s'%SS_res) + return 1 - SS_res / SS_tot + + +def is_outlier(points, thresh=3.5, verbose=False): + """MAD test""" + points.tolist() + if len(points) == 1: + points = points[:, None] + if verbose: + print("input to is_outlier is a single point...") + median = np.median(points) * np.ones(np.shape(points)) # , axis=0) + + diff = (points - median) ** 2 + diff = np.sqrt(diff) + med_abs_deviation = np.median(diff) + modified_z_score = 0.6745 * diff / med_abs_deviation + return modified_z_score > thresh + + +def outlier_mask( + avg_img, mask, roi_mask, outlier_threshold=7.5, maximum_outlier_fraction=0.1, verbose=False, plot=False +): + """ + outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) + avg_img: average image data (2D) + mask: 2D array, same size as avg_img with pixels that are already masked + roi_mask: 2D array, same size as avg_img, ROI labels 'encoded' as mask values (i.e. all pixels belonging to ROI 5 have the value 5) + outlier_threshold: threshold for MAD test + maximum_outlier_fraction: maximum fraction of pixels in an ROI that can be classifed as outliers. If the detected fraction is higher, no outliers will be masked for that ROI. + verbose: 'True' enables message output + plot: 'True' enables visualization of outliers + returns: mask (dtype=float): 0 for pixels that have been classified as outliers, 1 else + dependency: is_outlier() + + function does outlier detection for each ROI separately based on pixel intensity in avg_img*mask and ROI specified by roi_mask, using the median-absolute-deviation (MAD) method + + by LW 06/21/2023 + """ + hhmask = np.ones(np.shape(roi_mask)) + pc = 1 + + for rn in np.arange(1, np.max(roi_mask) + 1, 1): + rm = np.zeros(np.shape(roi_mask)) + rm = rm - 1 + rm[np.where(roi_mask == rn)] = 1 + pixel = roi.roi_pixel_values(avg_img * rm, roi_mask, [rn]) + out_l = is_outlier((avg_img * mask * rm)[rm > -1], thresh=outlier_threshold) + if np.nanmax(out_l) > 0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l < 1]) + if verbose: + print("ROI #%s\naverage ROI intensity: %s" % (rn, ave_roi_int)) + try: + upper_outlier_threshold = np.nanmin((out_l * pixel[0][0])[out_l * pixel[0][0] > ave_roi_int]) + if verbose: + print("upper outlier threshold: %s" % upper_outlier_threshold) + except: + upper_outlier_threshold = False + if verbose: + print("no upper outlier threshold found") + ind1 = (out_l * pixel[0][0]) > 0 + ind2 = (out_l * pixel[0][0]) < ave_roi_int + try: + lower_outlier_threshold = np.nanmax((out_l * pixel[0][0])[ind1 * ind2]) + except: + lower_outlier_threshold = False + if verbose: + print("no lower outlier threshold found") + else: + if verbose: + print("ROI #%s: no outliers detected" % rn) + + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l) / len(pixel[0][0]) + if verbose: + print("fraction of pixel values detected as outliers: %s" % np.round(outlier_fraction, 2)) + if outlier_fraction > maximum_outlier_fraction: + if verbose: + print( + "fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed" + % maximum_outlier_fraction + ) + upper_outlier_threshold = False + lower_outlier_threshold = False + + if upper_outlier_threshold: + hhmask[avg_img * rm > upper_outlier_threshold] = 0 + if lower_outlier_threshold: + hhmask[avg_img * rm < lower_outlier_threshold] = 0 + + if plot: + if pc == 1: + fig, ax = plt.subplots(1, 5, figsize=(24, 4)) + plt.subplot(1, 5, pc) + pc += 1 + if pc > 5: + pc = 1 + pixel = roi.roi_pixel_values(avg_img * rm * mask, roi_mask, [rn]) + plt.plot(pixel[0][0], "bo", markersize=1.5) + if upper_outlier_threshold or lower_outlier_threshold: + x = np.arange(len(out_l)) + plt.plot( + [x[0], x[-1]], + [ave_roi_int, ave_roi_int], + "g--", + label="ROI average: %s" % np.round(ave_roi_int, 4), + ) + if upper_outlier_threshold: + ind = (out_l * pixel[0][0]) > upper_outlier_threshold + plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") + plt.plot( + [x[0], x[-1]], + [upper_outlier_threshold, upper_outlier_threshold], + "r--", + label="upper thresh.: %s" % np.round(upper_outlier_threshold, 4), + ) + if lower_outlier_threshold: + ind = (out_l * pixel[0][0]) < lower_outlier_threshold + plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") + plt.plot( + [x[0], x[-1]], + [lower_outlier_threshold, lower_outlier_threshold], + "r--", + label="lower thresh.: %s" % np.round(upper_outlier_threshold, 4), + ) + plt.ylabel("Intensity") + plt.xlabel("pixel") + plt.title("ROI #: %s" % rn) + plt.legend(loc="best", fontsize=8) + + if plot: + fig, ax = plt.subplots() + plt.imshow(hhmask) + hot_dark = np.nonzero(hhmask < 1) + cmap = plt.cm.get_cmap("viridis") + plt.plot(hot_dark[1], hot_dark[0], "+", color=cmap(0)) + plt.xlabel("pixel") + plt.ylabel("pixel") + plt.title("masked pixels with outlier threshold: %s" % outlier_threshold) + + return hhmask diff --git a/pyCHX/backups/pyCHX-backup/chx_handlers.py b/pyCHX/backups/pyCHX-backup/chx_handlers.py new file mode 100644 index 0000000..998ce9c --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_handlers.py @@ -0,0 +1,48 @@ +###Copied from chxtools/chxtools/handlers.py +###https://github.com/NSLS-II-CHX/chxtools/blob/master/chxtools/handlers.py + + +# handler registration and database instantiation should be done +# here and only here! +from databroker import Broker +from databroker.assets.handlers_base import HandlerBase +from eiger_io.fs_handler import EigerHandler as EigerHandlerPIMS +from eiger_io.fs_handler import EigerImages as EigerImagesPIMS + +# from chxtools.pims_readers.eiger import EigerImages +from eiger_io.fs_handler_dask import EigerHandlerDask, EigerImagesDask + +""" +Tried to allow function to change namespace did not work. +DO NOT USE +""" + + +# toggle use of dask or no dask +# TODO : eventually choose one of the two +def use_pims(db): + global EigerImages, EigerHandler + EigerImages = EigerImagesPIMS + EigerHandler = EigerHandlerPIMS + db.reg.register_handler("AD_EIGER2", EigerHandler, overwrite=True) + db.reg.register_handler("AD_EIGER", EigerHandler, overwrite=True) + db.reg.register_handler("AD_EIGER_SLICE", EigerHandler, overwrite=True) + + +def use_dask(db): + global EigerImages, EigerHandler + + EigerImages = EigerImagesDask + EigerHandler = EigerHandlerDask + db.reg.register_handler("AD_EIGER2", EigerHandler, overwrite=True) + db.reg.register_handler("AD_EIGER", EigerHandler, overwrite=True) + db.reg.register_handler("AD_EIGER_SLICE", EigerHandler, overwrite=True) + + +# call use_pims or use_dask +# default is use_dask() +# TODO : This is hard coded +# calling this after import won't change things, need to find a better way +if __name__ == "__main__": + db = Broker.named("chx") + use_pims(db) diff --git a/pyCHX/backups/pyCHX-backup/chx_libs.py b/pyCHX/backups/pyCHX-backup/chx_libs.py new file mode 100644 index 0000000..4440215 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_libs.py @@ -0,0 +1,441 @@ +""" +Dec 10, 2015 Developed by Y.G.@CHX +yuzhang@bnl.gov +This module is for the necessary packages for the XPCS analysis +""" + +## Import all the required packages for Data Analysis +from databroker import Broker +from databroker.assets.path_only_handlers import RawHandler + +# edit handlers here to switch to PIMS or dask +# this does the databroker import +# from chxtools.handlers import EigerHandler +from eiger_io.fs_handler import EigerHandler +from IPython.core.magics.display import Javascript +from modest_image import imshow +from skbeam.core.utils import multi_tau_lags +from skimage.draw import disk, ellipse, line, line_aa, polygon + +db = Broker.named("chx") +import collections +import copy +import getpass +import itertools +import os +import pickle +import random +import sys +import time +import warnings +from datetime import datetime + +import h5py +import matplotlib as mpl +import matplotlib.cm as mcm +import matplotlib.pyplot as plt +import numpy as np +import pims +import skbeam.core.correlation as corr +import skbeam.core.roi as roi +import skbeam.core.utils as utils + +# * scikit-beam - data analysis tools for X-ray science +# - https://github.com/scikit-beam/scikit-beam +# * xray-vision - plotting helper functions for X-ray science +# - https://github.com/Nikea/xray-vision +import xray_vision +import xray_vision.mpl_plotting as mpl_plot +from lmfit import Model, Parameter, Parameters, minimize, report_fit +from matplotlib import gridspec +from matplotlib.colors import LogNorm +from matplotlib.figure import Figure +from mpl_toolkits.axes_grid1 import make_axes_locatable +from pandas import DataFrame +from PIL import Image +from tqdm import tqdm +from xray_vision.mask.manual_mask import ManualMask +from xray_vision.mpl_plotting import speckle + +mcolors = itertools.cycle(["b", "g", "r", "c", "m", "y", "k", "darkgoldenrod", "oldlace", "brown", "dodgerblue"]) +markers = itertools.cycle(list(plt.Line2D.filled_markers)) +lstyles = itertools.cycle(["-", "--", "-.", ".", ":"]) +colors = itertools.cycle( + [ + "blue", + "darkolivegreen", + "brown", + "m", + "orange", + "hotpink", + "darkcyan", + "red", + "gray", + "green", + "black", + "cyan", + "purple", + "navy", + ] +) +colors_copy = itertools.cycle( + [ + "blue", + "darkolivegreen", + "brown", + "m", + "orange", + "hotpink", + "darkcyan", + "red", + "gray", + "green", + "black", + "cyan", + "purple", + "navy", + ] +) +markers = itertools.cycle( + [ + "o", + "2", + "p", + "1", + "s", + "*", + "4", + "+", + "8", + "v", + "3", + "D", + "H", + "^", + ] +) +markers_copy = itertools.cycle( + [ + "o", + "2", + "p", + "1", + "s", + "*", + "4", + "+", + "8", + "v", + "3", + "D", + "H", + "^", + ] +) +RUN_GUI = False # if True for gui setup; else for notebook; the main code difference is the Figure() or plt.figure(figsize=(8, 6)) +markers = [ + "o", + "D", + "v", + "^", + "<", + ">", + "p", + "s", + "H", + "h", + "*", + "d", + "$I$", + "$L$", + "$O$", + "$V$", + "$E$", + "$c$", + "$h$", + "$x$", + "$b$", + "$e$", + "$a$", + "$m$", + "$l$", + "$i$", + "$n$", + "$e$", + "8", + "1", + "3", + "2", + "4", + "+", + "x", + "_", + "|", + ",", + "1", +] +markers = np.array(markers * 100) +markers = [ + "o", + "D", + "v", + "^", + "<", + ">", + "p", + "s", + "H", + "h", + "*", + "d", + "8", + "1", + "3", + "2", + "4", + "+", + "x", + "_", + "|", + ",", + "1", +] +markers = np.array(markers * 100) +colors = np.array( + [ + "darkorange", + "mediumturquoise", + "seashell", + "mediumaquamarine", + "darkblue", + "yellowgreen", + "mintcream", + "royalblue", + "springgreen", + "slategray", + "yellow", + "slateblue", + "darkslateblue", + "papayawhip", + "bisque", + "firebrick", + "burlywood", + "dodgerblue", + "dimgrey", + "chartreuse", + "deepskyblue", + "honeydew", + "orchid", + "teal", + "steelblue", + "limegreen", + "antiquewhite", + "linen", + "saddlebrown", + "grey", + "khaki", + "hotpink", + "darkslategray", + "forestgreen", + "lightsalmon", + "turquoise", + "navajowhite", + "darkgrey", + "darkkhaki", + "slategrey", + "indigo", + "darkolivegreen", + "aquamarine", + "moccasin", + "beige", + "ivory", + "olivedrab", + "whitesmoke", + "paleturquoise", + "blueviolet", + "tomato", + "aqua", + "palegoldenrod", + "cornsilk", + "navy", + "mediumvioletred", + "palevioletred", + "aliceblue", + "azure", + "orangered", + "lightgrey", + "lightpink", + "orange", + "wheat", + "darkorchid", + "mediumslateblue", + "lightslategray", + "green", + "lawngreen", + "mediumseagreen", + "darksalmon", + "pink", + "oldlace", + "sienna", + "dimgray", + "fuchsia", + "lemonchiffon", + "maroon", + "salmon", + "gainsboro", + "indianred", + "crimson", + "mistyrose", + "lightblue", + "darkgreen", + "lightgreen", + "deeppink", + "palegreen", + "thistle", + "lightcoral", + "lightgray", + "lightskyblue", + "mediumspringgreen", + "mediumblue", + "peru", + "lightgoldenrodyellow", + "darkseagreen", + "mediumorchid", + "coral", + "lightyellow", + "chocolate", + "lavenderblush", + "darkred", + "lightseagreen", + "darkviolet", + "lightcyan", + "cadetblue", + "blanchedalmond", + "midnightblue", + "lightsteelblue", + "darkcyan", + "floralwhite", + "darkgray", + "lavender", + "sandybrown", + "cornflowerblue", + "gray", + "mediumpurple", + "lightslategrey", + "seagreen", + "silver", + "darkmagenta", + "darkslategrey", + "darkgoldenrod", + "rosybrown", + "goldenrod", + "darkturquoise", + "plum", + "purple", + "olive", + "gold", + "powderblue", + "peachpuff", + "violet", + "lime", + "greenyellow", + "tan", + "skyblue", + "magenta", + "black", + "brown", + "green", + "cyan", + "red", + "blue", + ] + * 100 +) + +colors = colors[::-1] +colors_ = itertools.cycle(colors) +# colors_ = itertools.cycle(sorted_colors_ ) +markers_ = itertools.cycle(markers) +# Custom colormaps +################################################################################ +# ROYGBVR but with Cyan-Blue instead of Blue +color_list_cyclic_spectrum = [ + [1.0, 0.0, 0.0], + [1.0, 165.0 / 255.0, 0.0], + [1.0, 1.0, 0.0], + [0.0, 1.0, 0.0], + [0.0, 0.2, 1.0], + [148.0 / 255.0, 0.0, 211.0 / 255.0], + [1.0, 0.0, 0.0], +] +cmap_cyclic_spectrum = mpl.colors.LinearSegmentedColormap.from_list( + "cmap_cyclic_spectrum", color_list_cyclic_spectrum +) + +# classic jet, slightly tweaked +# (bears some similarity to mpl.cm.nipy_spectral) +color_list_jet_extended = [ + [0, 0, 0], + [0.18, 0, 0.18], + [0, 0, 0.5], + [0, 0, 1], + [0.0, 0.38888889, 1.0], + [0.0, 0.83333333, 1.0], + [0.3046595, 1.0, 0.66308244], + [0.66308244, 1.0, 0.3046595], + [1.0, 0.90123457, 0.0], + [1.0, 0.48971193, 0.0], + [1.0, 0.0781893, 0.0], + [1, 0, 0], + [0.5, 0.0, 0.0], +] +cmap_jet_extended = mpl.colors.LinearSegmentedColormap.from_list("cmap_jet_extended", color_list_jet_extended) + +# Tweaked version of "view.gtk" default color scale +color_list_vge = [ + [0.0 / 255.0, 0.0 / 255.0, 0.0 / 255.0], + [0.0 / 255.0, 0.0 / 255.0, 254.0 / 255.0], + [188.0 / 255.0, 2.0 / 255.0, 107.0 / 255.0], + [254.0 / 255.0, 55.0 / 255.0, 0.0 / 255.0], + [254.0 / 255.0, 254.0 / 255.0, 0.0 / 255.0], + [254.0 / 255.0, 254.0 / 255.0, 254.0 / 255.0], +] +cmap_vge = mpl.colors.LinearSegmentedColormap.from_list("cmap_vge", color_list_vge) + +# High-dynamic-range (HDR) version of VGE +color_list_vge_hdr = [ + [255.0 / 255.0, 255.0 / 255.0, 255.0 / 255.0], + [0.0 / 255.0, 0.0 / 255.0, 0.0 / 255.0], + [0.0 / 255.0, 0.0 / 255.0, 255.0 / 255.0], + [188.0 / 255.0, 0.0 / 255.0, 107.0 / 255.0], + [254.0 / 255.0, 55.0 / 255.0, 0.0 / 255.0], + [254.0 / 255.0, 254.0 / 255.0, 0.0 / 255.0], + [254.0 / 255.0, 254.0 / 255.0, 254.0 / 255.0], +] +cmap_vge_hdr = mpl.colors.LinearSegmentedColormap.from_list("cmap_vge_hdr", color_list_vge_hdr) + +# Simliar to Dectris ALBULA default color-scale +color_list_hdr_albula = [ + [255.0 / 255.0, 255.0 / 255.0, 255.0 / 255.0], + [0.0 / 255.0, 0.0 / 255.0, 0.0 / 255.0], + [255.0 / 255.0, 0.0 / 255.0, 0.0 / 255.0], + [255.0 / 255.0, 255.0 / 255.0, 0.0 / 255.0], + # [ 255.0/255.0, 255.0/255.0, 255.0/255.0], +] +cmap_hdr_albula = mpl.colors.LinearSegmentedColormap.from_list("cmap_hdr_albula", color_list_hdr_albula) +cmap_albula = cmap_hdr_albula +cmap_albula_r = mpl.colors.LinearSegmentedColormap.from_list("cmap_hdr_r", color_list_hdr_albula[::-1]) + +# Ugly color-scale, but good for highlighting many features in HDR data +color_list_cur_hdr_goldish = [ + [255.0 / 255.0, 255.0 / 255.0, 255.0 / 255.0], # white + [0.0 / 255.0, 0.0 / 255.0, 0.0 / 255.0], # black + [100.0 / 255.0, 127.0 / 255.0, 255.0 / 255.0], # light blue + [0.0 / 255.0, 0.0 / 255.0, 127.0 / 255.0], # dark blue + # [ 0.0/255.0, 127.0/255.0, 0.0/255.0], # dark green + [127.0 / 255.0, 60.0 / 255.0, 0.0 / 255.0], # orange + [255.0 / 255.0, 255.0 / 255.0, 0.0 / 255.0], # yellow + [200.0 / 255.0, 0.0 / 255.0, 0.0 / 255.0], # red + [255.0 / 255.0, 255.0 / 255.0, 255.0 / 255.0], # white +] +cmap_hdr_goldish = mpl.colors.LinearSegmentedColormap.from_list("cmap_hdr_goldish", color_list_cur_hdr_goldish) diff --git a/pyCHX/backups/pyCHX-backup/chx_olog.py b/pyCHX/backups/pyCHX-backup/chx_olog.py new file mode 100644 index 0000000..01b8a0b --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_olog.py @@ -0,0 +1,136 @@ +from pyOlog import Attachment, LogEntry, OlogClient, SimpleOlogClient +from pyOlog.OlogDataTypes import Logbook +olog_client = SimpleOlogClient(url='https://epics-services-chx.nsls2.bnl.local:38981/Olog') + +def create_olog_entry(text, logbooks="Data Acquisition"): + """ + Create a log entry to xf11id. + + Parameters + ---------- + text : str + the text string to add to the logbook + logbooks : str, optional + the name of the logbook to update + + Returns + ------- + eid : the entry id returned from the Olog server + """ + olog_client = SimpleOlogClient() + eid = olog_client.log(text, logbooks=logbooks) + return eid + + +def update_olog_uid_with_file(uid, text, filename, append_name=""): + """ + Attach text and file (with filename) to CHX olog with entry defined by uid. + + Parameters + ---------- + uid : str + string of unique id + text : str + string to put into olog book + filename : str + file name + append_name : str + first try to attach olog with the file, if there is already a same file + in attached file, copy the file with different filename (append + append_name), and then attach to olog + """ + atch = [Attachment(open(filename, "rb"))] + + try: + update_olog_uid(olog_client, uid=uid, text=text, attachments=atch) + except Exception: + from shutil import copyfile + + npname = f"{filename[:-4]}_{append_name}.pdf" + copyfile(filename, npname) + atch = [Attachment(open(npname, "rb"))] + print(f"Append {append_name} to the filename.") + update_olog_uid(olog_client,uid=uid, text=text, attachments=atch) + +def update_olog_logid_with_file(logid, text, filename=None, verbose=False): + """ + Attach text and file (with filename) to CHX olog with entry defined by + logid. + + Parameters + ---------- + logid : str + the log entry id + text : str + string to put into olog book + filename : str + file name + """ + if filename is not None: + atch = [Attachment(open(filename, "rb"))] + else: + atch = None + try: + update_olog_id(logid=logid, text=text, attachments=atch, verbose=verbose) + except Exception: + pass + + +def update_olog_id(olog_client, logid, text, attachments, verbose=True): + """ + Update olog book logid entry with text and attachments files. + + Parameters + ---------- + logid : integer + the log entry id + text : str + the text to update, will add this text to the old text + attachments : ??? + add new attachment files + + Example + ------- + filename1 = ('/XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/' + 'Report_uid=af8f66.pdf') + atch = [Attachment(open(filename1, 'rb'))] + + update_olog_id(logid=29327, text='add_test_atch', attachmenents=atch) + """ + client = olog_client.session # This is an instance of OlogClient + url = client._url + + old_text = olog_client.find(id=logid)[0]["text"] + upd = LogEntry( + text=f"{old_text}\n{text}", + attachments=attachments, + logbooks=[Logbook(name="Operations", owner=None, active=True)], + ) + client.updateLog(logid, upd) + if verbose: + print(f"The url={url} was successfully updated with {text} and with " f"the attachments") + return old_text + +def update_olog_uid(olog_client, uid, text, attachments): + """ + Update olog book logid entry cotaining uid string with text and attachments + files. + + Parameters + ---------- + uid: str + the uid of a scan or a specficial string (only gives one log entry) + text: str + the text to update, will add this text to the old text + attachments: ??? + add new attachment files + + Example + ------- + filename1 = ('/XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/' + 'Report_uid=af8f66.pdf') + atch = [Attachment(open(filename1, 'rb'))] + update_olog_uid(uid='af8f66', text='Add xpcs pdf report', attachments=atch) + """ + logid = olog_client.find(search=f"*{uid}*")[0]["id"] + update_olog_id(olog_client, logid, text, attachments) diff --git a/pyCHX/backups/pyCHX-backup/chx_outlier_detection.py b/pyCHX/backups/pyCHX-backup/chx_outlier_detection.py new file mode 100644 index 0000000..0e62cd8 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_outlier_detection.py @@ -0,0 +1,143 @@ +import numpy as np +try: # some genius moved roi within skbeam.... + from skbeam.core.utils import roi +except: + from skbeam.core import roi + +def is_outlier(points, thresh=3.5, verbose=False): + """MAD test""" + points.tolist() + if len(points) == 1: + points = points[:, None] + if verbose: + print("input to is_outlier is a single point...") + median = np.median(points) * np.ones(np.shape(points)) # , axis=0) + + diff = (points - median) ** 2 + diff = np.sqrt(diff) + med_abs_deviation = np.median(diff) + modified_z_score = 0.6745 * diff / med_abs_deviation + return modified_z_score > thresh + + +def outlier_mask( + avg_img, mask, roi_mask, outlier_threshold=7.5, maximum_outlier_fraction=0.1, verbose=False, plot=False +): + """ + outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) + avg_img: average image data (2D) + mask: 2D array, same size as avg_img with pixels that are already masked + roi_mask: 2D array, same size as avg_img, ROI labels 'encoded' as mask values (i.e. all pixels belonging to ROI 5 have the value 5) + outlier_threshold: threshold for MAD test + maximum_outlier_fraction: maximum fraction of pixels in an ROI that can be classifed as outliers. If the detected fraction is higher, no outliers will be masked for that ROI. + verbose: 'True' enables message output + plot: 'True' enables visualization of outliers + returns: mask (dtype=float): 0 for pixels that have been classified as outliers, 1 else + dependency: is_outlier() + + function does outlier detection for each ROI separately based on pixel intensity in avg_img*mask and ROI specified by roi_mask, using the median-absolute-deviation (MAD) method + + by LW 06/21/2023 + """ + hhmask = np.ones(np.shape(roi_mask)) + pc = 1 + + for rn in np.arange(1, np.max(roi_mask) + 1, 1): + rm = np.zeros(np.shape(roi_mask)) + rm = rm - 1 + rm[np.where(roi_mask == rn)] = 1 + pixel = roi.roi_pixel_values(avg_img * rm, roi_mask, [rn]) + out_l = is_outlier((avg_img * mask * rm)[rm > -1], thresh=outlier_threshold) + if np.nanmax(out_l) > 0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l < 1]) + if verbose: + print("ROI #%s\naverage ROI intensity: %s" % (rn, ave_roi_int)) + try: + upper_outlier_threshold = np.nanmin((out_l * pixel[0][0])[out_l * pixel[0][0] > ave_roi_int]) + if verbose: + print("upper outlier threshold: %s" % upper_outlier_threshold) + except: + upper_outlier_threshold = False + if verbose: + print("no upper outlier threshold found") + ind1 = (out_l * pixel[0][0]) > 0 + ind2 = (out_l * pixel[0][0]) < ave_roi_int + try: + lower_outlier_threshold = np.nanmax((out_l * pixel[0][0])[ind1 * ind2]) + except: + lower_outlier_threshold = False + if verbose: + print("no lower outlier threshold found") + else: + if verbose: + print("ROI #%s: no outliers detected" % rn) + + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l) / len(pixel[0][0]) + if verbose: + print("fraction of pixel values detected as outliers: %s" % np.round(outlier_fraction, 2)) + if outlier_fraction > maximum_outlier_fraction: + if verbose: + print( + "fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed" + % maximum_outlier_fraction + ) + upper_outlier_threshold = False + lower_outlier_threshold = False + + if upper_outlier_threshold: + hhmask[avg_img * rm > upper_outlier_threshold] = 0 + if lower_outlier_threshold: + hhmask[avg_img * rm < lower_outlier_threshold] = 0 + + if plot: + if pc == 1: + fig, ax = plt.subplots(1, 5, figsize=(24, 4)) + plt.subplot(1, 5, pc) + pc += 1 + if pc > 5: + pc = 1 + pixel = roi.roi_pixel_values(avg_img * rm * mask, roi_mask, [rn]) + plt.plot(pixel[0][0], "bo", markersize=1.5) + if upper_outlier_threshold or lower_outlier_threshold: + x = np.arange(len(out_l)) + plt.plot( + [x[0], x[-1]], + [ave_roi_int, ave_roi_int], + "g--", + label="ROI average: %s" % np.round(ave_roi_int, 4), + ) + if upper_outlier_threshold: + ind = (out_l * pixel[0][0]) > upper_outlier_threshold + plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") + plt.plot( + [x[0], x[-1]], + [upper_outlier_threshold, upper_outlier_threshold], + "r--", + label="upper thresh.: %s" % np.round(upper_outlier_threshold, 4), + ) + if lower_outlier_threshold: + ind = (out_l * pixel[0][0]) < lower_outlier_threshold + plt.plot(x[ind], (out_l * pixel[0][0])[ind], "r+") + plt.plot( + [x[0], x[-1]], + [lower_outlier_threshold, lower_outlier_threshold], + "r--", + label="lower thresh.: %s" % np.round(upper_outlier_threshold, 4), + ) + plt.ylabel("Intensity") + plt.xlabel("pixel") + plt.title("ROI #: %s" % rn) + plt.legend(loc="best", fontsize=8) + + if plot: + fig, ax = plt.subplots() + plt.imshow(hhmask) + hot_dark = np.nonzero(hhmask < 1) + cmap = plt.cm.get_cmap("viridis") + plt.plot(hot_dark[1], hot_dark[0], "+", color=cmap(0)) + plt.xlabel("pixel") + plt.ylabel("pixel") + plt.title("masked pixels with outlier threshold: %s" % outlier_threshold) + + return hhmask diff --git a/pyCHX/backups/pyCHX-backup/chx_packages.py b/pyCHX/backups/pyCHX-backup/chx_packages.py new file mode 100644 index 0000000..318e158 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_packages.py @@ -0,0 +1,259 @@ +import pickle as cpk + +import historydict +from eiger_io.fs_handler import EigerImages +from skimage.draw import line, line_aa, polygon + +from pyCHX.chx_handlers import use_dask, use_pims +from pyCHX.chx_libs import ( + EigerHandler, + Javascript, + LogNorm, + Model, + cmap_albula, + cmap_vge, + datetime, + db, + getpass, + h5py, + multi_tau_lags, + np, + os, + pims, + plt, + random, + roi, + time, + tqdm, + utils, + warnings, +) + +use_pims(db) # use pims for importing eiger data, register_handler 'AD_EIGER2' and 'AD_EIGER' + +from pyCHX.chx_compress import ( + MultifileBNLCustom, + combine_binary_files, + compress_eigerdata, + create_compress_header, + get_eigerImage_per_file, + init_compress_eigerdata, + para_compress_eigerdata, + para_segment_compress_eigerdata, + read_compressed_eigerdata, + segment_compress_eigerdata, +) +from pyCHX.chx_compress_analysis import ( + Multifile, + cal_each_ring_mean_intensityc, + cal_waterfallc, + compress_eigerdata, + get_avg_imgc, + get_each_frame_intensityc, + get_each_ring_mean_intensityc, + get_time_edge_avg_img, + mean_intensityc, + plot_each_ring_mean_intensityc, + plot_waterfallc, + read_compressed_eigerdata, +) +from pyCHX.chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq +from pyCHX.chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF +from pyCHX.chx_crosscor import CrossCorrelator2, run_para_ccorr_sym +from pyCHX.chx_generic_functions import ( + R_2, + apply_mask, + average_array_withNan, + check_bad_uids, + check_lost_metadata, + check_ROI_intensity, + check_shutter_open, + combine_images, + copy_data, + create_cross_mask, + create_fullImg_with_box, + create_hot_pixel_mask, + create_polygon_mask, + create_rectangle_mask, + create_ring_mask, + create_seg_ring, + create_time_slice, + create_user_folder, + delete_data, + extract_data_from_file, + filter_roi_mask, + find_bad_pixels, + find_bad_pixels_FD, + find_good_xpcs_uids, + find_index, + find_uids, + fit_one_peak_curve, + get_averaged_data_from_multi_res, + get_avg_img, + get_bad_frame_list, + get_base_all_filenames, + get_cross_point, + get_current_pipeline_filename, + get_current_pipeline_fullpath, + get_curve_turning_points, + get_detector, + get_detectors, + get_each_frame_intensity, + get_echos, + get_eigerImage_per_file, + get_fit_by_two_linear, + get_fra_num_by_dose, + get_g2_fit_general, + get_image_edge, + get_image_with_roi, + get_img_from_iq, + get_last_uids, + get_mass_center_one_roi, + get_max_countc, + get_meta_data, + get_multi_tau_lag_steps, + get_non_uniform_edges, + get_print_uids, + get_q_rate_fit_general, + get_qval_dict, + get_qval_qwid_dict, + get_roi_mask_qval_qwid_by_shift, + get_roi_nr, + get_series_g2_taus, + get_SG_norm, + get_sid_filenames, + get_sid_filenames_v2, + get_sid_filenames_v3, + get_today_date, + get_touched_qwidth, + get_waxs_beam_center, + lin2log_g2, + linear_fit, + load_dask_data, + load_data, + load_mask, + load_pilatus, + ls_dir, + mask_badpixels, + mask_exclude_badpixel, + move_beamstop, + pad_length, + pload_obj, + plot1D, + plot_fit_two_linear_fit, + plot_g2_general, + plot_q_g2fitpara_general, + plot_q_rate_fit_general, + plot_q_rate_general, + plot_xy_with_fit, + plot_xy_x2, + print_dict, + psave_obj, + read_dict_csv, + refine_roi_mask, + reverse_updown, + ring_edges, + run_time, + save_array_to_tiff, + save_arrays, + save_current_pipeline, + save_dict_csv, + save_g2_fit_para_tocsv, + save_g2_general, + save_lists, + save_oavs_tifs, + save_oavs_tifs_v2, + sgolay2d, + shift_mask, + show_img, + show_ROI_on_image, + shrink_image, + trans_data_to_pd, + update_qval_dict, + update_roi_mask, + validate_uid, +) +from pyCHX.chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file +from pyCHX.chx_specklecp import ( + get_binned_his_std, + get_contrast, + get_his_std_from_pds, + get_xsvs_fit, + plot_g2_contrast, + plot_xsvs_fit, + save_bin_his_std, + save_KM, + xsvsc, + xsvsp, +) +from pyCHX.Create_Report import ( + create_multi_pdf_reports_for_uids, + create_one_pdf_reports_for_uids, + create_pdf_report, + export_xpcs_results_to_h5, + extract_xpcs_results_from_h5, + make_pdf_report, +) +from pyCHX.DataGonio import qphiavg +from pyCHX.SAXS import ( + fit_form_factor, + fit_form_factor2, + form_factor_residuals_bg_lmfit, + form_factor_residuals_lmfit, + get_form_factor_fit_lmfit, + poly_sphere_form_factor_intensity, + show_saxs_qmap, +) +from pyCHX.Two_Time_Correlation_Function import ( + get_aged_g2_from_g12, + get_aged_g2_from_g12q, + get_four_time_from_two_time, + get_one_time_from_two_time, + rotate_g12q_to_rectangle, + show_C12, +) +from pyCHX.XPCS_GiSAXS import ( + cal_1d_qr, + convert_gisaxs_pixel_to_q, + fit_qr_qz_rate, + get_1d_qr, + get_each_box_mean_intensity, + get_gisaxs_roi, + get_qedge, + get_qmap_label, + get_qr_tick_label, + get_qzr_map, + get_qzrmap, + get_reflected_angles, + get_t_qrc, + multi_uids_gisaxs_xpcs_analysis, + plot_gisaxs_g4, + plot_gisaxs_two_g2, + plot_qr_1d_with_ROI, + plot_qrt_pds, + plot_qzr_map, + plot_t_qrc, + show_qzr_map, + show_qzr_roi, +) +from pyCHX.XPCS_SAXS import ( + cal_g2, + combine_two_roi_mask, + create_hot_pixel_mask, + get_angular_mask, + get_circular_average, + get_cirucular_average_std, + get_each_ring_mean_intensity, + get_QrQw_From_RoiMask, + get_ring_mask, + get_seg_from_ring_mask, + get_t_iq, + get_t_iqc, + get_t_iqc_imstack, + multi_uids_saxs_xpcs_analysis, + plot_circular_average, + plot_qIq_with_ROI, + plot_t_iqc, + recover_img_from_iq, + save_lists, +) diff --git a/pyCHX/backups/pyCHX-backup/chx_packages_local.py b/pyCHX/backups/pyCHX-backup/chx_packages_local.py new file mode 100644 index 0000000..979f9dc --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_packages_local.py @@ -0,0 +1,323 @@ +### This enables local import of pyCHX for testing + +import pickle as cpk + +import historydict + +# from pyCHX.chx_handlers import use_dask, use_pims +from chx_handlers import use_dask, use_pims + +# from pyCHX.chx_libs import ( +from chx_libs import ( + EigerHandler, + Javascript, + LogNorm, + Model, + cmap_albula, + cmap_vge, + datetime, + db, + getpass, + h5py, + multi_tau_lags, + np, + os, + pims, + plt, + random, + roi, + time, + tqdm, + utils, + warnings, +) +from eiger_io.fs_handler import EigerImages +from skimage.draw import line, line_aa, polygon + +# changes to current version of chx_packages.py +# added load_dask_data in generic_functions + + +use_pims(db) # use pims for importing eiger data, register_handler 'AD_EIGER2' and 'AD_EIGER' + +# from pyCHX.chx_compress import ( +from chx_compress import ( + MultifileBNLCustom, + combine_binary_files, + compress_eigerdata, + create_compress_header, + get_eigerImage_per_file, + init_compress_eigerdata, + para_compress_eigerdata, + para_segment_compress_eigerdata, + read_compressed_eigerdata, + segment_compress_eigerdata, +) + +# from pyCHX.chx_compress_analysis import ( +from chx_compress_analysis import ( + Multifile, + cal_each_ring_mean_intensityc, + cal_waterfallc, + compress_eigerdata, + get_avg_imgc, + get_each_frame_intensityc, + get_each_ring_mean_intensityc, + get_time_edge_avg_img, + mean_intensityc, + plot_each_ring_mean_intensityc, + plot_waterfallc, + read_compressed_eigerdata, +) + +# from pyCHX.chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq +from chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq + +# from pyCHX.chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF +from chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF + +# from pyCHX.chx_crosscor import CrossCorrelator2, run_para_ccorr_sym +from chx_crosscor import CrossCorrelator2, run_para_ccorr_sym + +# from pyCHX.chx_generic_functions import ( +from chx_generic_functions import ( + R_2, + RemoveHot, + apply_mask, + average_array_withNan, + check_bad_uids, + check_lost_metadata, + check_ROI_intensity, + check_shutter_open, + combine_images, + copy_data, + create_cross_mask, + create_fullImg_with_box, + create_hot_pixel_mask, + create_multi_rotated_rectangle_mask, + create_polygon_mask, + create_rectangle_mask, + create_ring_mask, + create_seg_ring, + create_time_slice, + create_user_folder, + delete_data, + extract_data_from_file, + filter_roi_mask, + find_bad_pixels, + find_bad_pixels_FD, + find_good_xpcs_uids, + find_index, + find_uids, + fit_one_peak_curve, + get_averaged_data_from_multi_res, + get_avg_img, + get_bad_frame_list, + get_base_all_filenames, + get_cross_point, + get_current_pipeline_filename, + get_current_pipeline_fullpath, + get_curve_turning_points, + get_detector, + get_detectors, + get_each_frame_intensity, + get_echos, + get_eigerImage_per_file, + get_fit_by_two_linear, + get_fra_num_by_dose, + get_g2_fit_general, + get_image_edge, + get_image_with_roi, + get_img_from_iq, + get_last_uids, + get_mass_center_one_roi, + get_max_countc, + get_meta_data, + get_multi_tau_lag_steps, + get_non_uniform_edges, + get_print_uids, + get_q_rate_fit_general, + get_qval_dict, + get_qval_qwid_dict, + get_roi_mask_qval_qwid_by_shift, + get_roi_nr, + get_series_g2_taus, + get_SG_norm, + get_sid_filenames, + get_sid_filenames_v2, + get_sid_filenames_v3, + get_today_date, + get_touched_qwidth, + get_waxs_beam_center, + lin2log_g2, + linear_fit, + load_dask_data, + load_data, + load_mask, + load_pilatus, + ls_dir, + mask_badpixels, + mask_exclude_badpixel, + move_beamstop, + pad_length, + pload_obj, + plot1D, + plot_fit_two_linear_fit, + plot_g2_general, + plot_q_g2fitpara_general, + plot_q_rate_fit_general, + plot_q_rate_general, + plot_xy_with_fit, + plot_xy_x2, + print_dict, + psave_obj, + read_dict_csv, + refine_roi_mask, + reverse_updown, + ring_edges, + run_time, + save_array_to_tiff, + save_arrays, + save_current_pipeline, + save_dict_csv, + save_g2_fit_para_tocsv, + save_g2_general, + save_lists, + save_oavs_tifs, + save_oavs_tifs_v2, + sgolay2d, + shift_mask, + show_img, + show_ROI_on_image, + shrink_image, + trans_data_to_pd, + update_qval_dict, + update_roi_mask, + validate_uid, +) + +# from pyCHX.chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file +# from chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file + +# from pyCHX.chx_outlier_detection import ( +from chx_outlier_detection import is_outlier, outlier_mask + +# from pyCHX.chx_specklecp import ( +from chx_specklecp import ( + get_binned_his_std, + get_contrast, + get_his_std_from_pds, + get_xsvs_fit, + plot_g2_contrast, + plot_xsvs_fit, + save_bin_his_std, + save_KM, + xsvsc, + xsvsp, +) + +# from pyCH.chx_xpcs_xsvs_jupyter_V1 import( +from chx_xpcs_xsvs_jupyter_V1 import ( + compress_multi_uids, + do_compress_on_line, + get_fra_num_by_dose, + get_iq_from_uids, + get_series_g2_from_g12, + get_series_one_time_mulit_uids, + get_t_iqc_uids, + get_two_time_mulit_uids, + get_uids_by_range, + get_uids_in_time_period, + plot_dose_g2, + plot_entries_from_csvlist, + plot_entries_from_uids, + plot_t_iqc_uids, + plot_t_iqtMq2, + realtime_xpcs_analysis, + run_xpcs_xsvs_single, + wait_data_acquistion_finish, + wait_func, +) + +# from pyCHX.Create_Report import ( +from Create_Report import ( + create_multi_pdf_reports_for_uids, + create_one_pdf_reports_for_uids, + create_pdf_report, + export_xpcs_results_to_h5, + extract_xpcs_results_from_h5, + make_pdf_report, +) + +# from pyCHX.DataGonio import qphiavg +from DataGonio import qphiavg + +# from pyCHX.SAXS import ( +from SAXS import ( + fit_form_factor, + fit_form_factor2, + form_factor_residuals_bg_lmfit, + form_factor_residuals_lmfit, + get_form_factor_fit_lmfit, + poly_sphere_form_factor_intensity, + show_saxs_qmap, +) + +# from pyCHX.Two_Time_Correlation_Function import ( +from Two_Time_Correlation_Function import ( + get_aged_g2_from_g12, + get_aged_g2_from_g12q, + get_four_time_from_two_time, + get_one_time_from_two_time, + rotate_g12q_to_rectangle, + show_C12, +) + +# from pyCHX.XPCS_GiSAXS import ( +from XPCS_GiSAXS import ( + cal_1d_qr, + convert_gisaxs_pixel_to_q, + fit_qr_qz_rate, + get_1d_qr, + get_each_box_mean_intensity, + get_gisaxs_roi, + get_qedge, + get_qmap_label, + get_qr_tick_label, + get_qzr_map, + get_qzrmap, + get_reflected_angles, + get_t_qrc, + multi_uids_gisaxs_xpcs_analysis, + plot_gisaxs_g4, + plot_gisaxs_two_g2, + plot_qr_1d_with_ROI, + plot_qrt_pds, + plot_qzr_map, + plot_t_qrc, + show_qzr_map, + show_qzr_roi, +) + +# from pyCHX.XPCS_SAXS import ( +from XPCS_SAXS import ( + cal_g2, + combine_two_roi_mask, + create_hot_pixel_mask, + get_angular_mask, + get_circular_average, + get_cirucular_average_std, + get_each_ring_mean_intensity, + get_QrQw_From_RoiMask, + get_ring_mask, + get_seg_from_ring_mask, + get_t_iq, + get_t_iqc, + get_t_iqc_imstack, + multi_uids_saxs_xpcs_analysis, + plot_circular_average, + plot_qIq_with_ROI, + plot_t_iqc, + recover_img_from_iq, + save_lists, +) diff --git a/pyCHX/backups/pyCHX-backup/chx_speckle.py b/pyCHX/backups/pyCHX-backup/chx_speckle.py new file mode 100644 index 0000000..a6eb8f3 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_speckle.py @@ -0,0 +1,1145 @@ +""" +X-ray speckle visibility spectroscopy(XSVS) - Dynamic information of +the speckle patterns are obtained by analyzing the speckle statistics +and calculating the speckle contrast in single scattering patterns. +This module will provide XSVS analysis tools +""" + +from __future__ import absolute_import, division, print_function + +import logging +import time + +import six +from skbeam.core import roi +from skbeam.core.utils import bin_edges_to_centers, geometric_series + +logger = logging.getLogger(__name__) + +import sys +from datetime import datetime + +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import scipy as sp +import scipy.stats as st +from matplotlib.colors import LogNorm +from scipy.optimize import leastsq, minimize + + +def xsvs( + image_sets, + label_array, + number_of_img, + timebin_num=2, + time_bin=None, + only_first_level=False, + max_cts=None, + bad_images=None, + threshold=None, +): + """ + This function will provide the probability density of detecting photons + for different integration times. + The experimental probability density P(K) of detecting photons K is + obtained by histogramming the speckle counts over an ensemble of + equivalent pixels and over a number of speckle patterns recorded + with the same integration time T under the same condition. + Parameters + ---------- + image_sets : array + sets of images + label_array : array + labeled array; 0 is background. + Each ROI is represented by a distinct label (i.e., integer). + number_of_img : int + number of images (how far to go with integration times when finding + the time_bin, using skxray.utils.geometric function) + timebin_num : int, optional + integration time; default is 2 + max_cts : int, optional + the brightest pixel in any ROI in any image in the image set. + defaults to using skxray.core.roi.roi_max_counts to determine + the brightest pixel in any of the ROIs + + + bad_images: array, optional + the bad images number list, the XSVS will not analyze the binning image groups which involve any bad images + threshold: float, optional + If one image involves a pixel with intensity above threshold, such image will be considered as a bad image. + + + Returns + ------- + prob_k_all : array + probability density of detecting photons + prob_k_std_dev : array + standard deviation of probability density of detecting photons + Notes + ----- + These implementation is based on following references + References: text [1]_, text [2]_ + .. [1] L. Li, P. Kwasniewski, D. Oris, L Wiegart, L. Cristofolini, + C. Carona and A. Fluerasu , "Photon statistics and speckle visibility + spectroscopy with partially coherent x-rays" J. Synchrotron Rad., + vol 21, p 1288-1295, 2014. + .. [2] R. Bandyopadhyay, A. S. Gittings, S. S. Suh, P.K. Dixon and + D.J. Durian "Speckle-visibilty Spectroscopy: A tool to study + time-varying dynamics" Rev. Sci. Instrum. vol 76, p 093110, 2005. + There is an example in https://github.com/scikit-xray/scikit-xray-examples + It will demonstrate the use of these functions in this module for + experimental data. + """ + if max_cts is None: + max_cts = roi.roi_max_counts(image_sets, label_array) + + # find the label's and pixel indices for ROI's + labels, indices = roi.extract_label_indices(label_array) + nopixels = len(indices) + # number of ROI's + u_labels = list(np.unique(labels)) + num_roi = len(u_labels) + + # create integration times + if time_bin is None: + time_bin = geometric_series(timebin_num, number_of_img) + if only_first_level: + time_bin = [1] + # number of times in the time bin + num_times = len(time_bin) + + # number of pixels per ROI + num_pixels = np.bincount(labels, minlength=(num_roi + 1))[1:] + + # probability density of detecting photons + prob_k_all = np.zeros([num_times, num_roi], dtype=np.object) + + # square of probability density of detecting photons + prob_k_pow_all = np.zeros_like(prob_k_all) + + # standard deviation of probability density of detecting photons + prob_k_std_dev = np.zeros_like(prob_k_all) + + # get the bin edges for each time bin for each ROI + bin_edges = np.zeros(prob_k_all.shape[0], dtype=prob_k_all.dtype) + for i in range(num_times): + bin_edges[i] = np.arange(max_cts * 2**i) + + start_time = time.time() # used to log the computation time (optionally) + + for i, images in enumerate(image_sets): + # print( i, images ) + # Ring buffer, a buffer with periodic boundary conditions. + # Images must be keep for up to maximum delay in buf. + # buf = np.zeros([num_times, timebin_num], dtype=np.object) # matrix of buffers + + buf = np.ma.zeros([num_times, timebin_num, nopixels]) + buf.mask = True + + # to track processing each time level + track_level = np.zeros(num_times) + track_bad_level = np.zeros(num_times) + # to increment buffer + cur = np.int_(np.full(num_times, timebin_num)) + + # to track how many images processed in each level + img_per_level = np.zeros(num_times, dtype=np.int64) + + prob_k = np.zeros_like(prob_k_all) + prob_k_pow = np.zeros_like(prob_k_all) + + try: + noframes = len(images) + except: + noframes = images.length + + # Num= { key: [0]* len( dict_dly[key] ) for key in list(dict_dly.keys()) } + + for n, img in enumerate(images): + cur[0] = 1 + cur[0] % timebin_num + # read each frame + # Put the image into the ring buffer. + + img_ = (np.ravel(img))[indices] + + if threshold is not None: + if img_.max() >= threshold: + print("bad image: %s here!" % n) + img_ = np.ma.zeros(len(img_)) + img_.mask = True + + if bad_images is not None: + if n in bad_images: + print("bad image: %s here!" % n) + img_ = np.ma.zeros(len(img_)) + img_.mask = True + + buf[0, cur[0] - 1] = img_ + + # print( n, np.sum(buf[0, cur[0] - 1] ), np.sum( img ) ) + + _process( + num_roi, + 0, + cur[0] - 1, + buf, + img_per_level, + labels, + max_cts, + bin_edges[0], + prob_k, + prob_k_pow, + track_bad_level, + ) + + # print (0, img_per_level) + + # check whether the number of levels is one, otherwise + # continue processing the next level + level = 1 + if number_of_img > 1: + processing = 1 + else: + processing = 0 + # print ('track_level: %s'%track_level) + # while level < num_times: + # if not track_level[level]: + # track_level[level] = 1 + if only_first_level: + processing = 0 + while processing: + if track_level[level]: + prev = 1 + (cur[level - 1] - 2) % timebin_num + cur[level] = 1 + cur[level] % timebin_num + + bufa = buf[level - 1, prev - 1] + bufb = buf[level - 1, cur[level - 1] - 1] + + if (bufa.data == 0).all(): + buf[level, cur[level] - 1] = bufa + elif (bufb.data == 0).all(): + buf[level, cur[level] - 1] = bufb + else: + buf[level, cur[level] - 1] = bufa + bufb + + # print (level, cur[level]-1) + + track_level[level] = 0 + + _process( + num_roi, + level, + cur[level] - 1, + buf, + img_per_level, + labels, + max_cts, + bin_edges[level], + prob_k, + prob_k_pow, + track_bad_level, + ) + level += 1 + if level < num_times: + processing = 1 + else: + processing = 0 + + else: + track_level[level] = 1 + processing = 0 + # print ('track_level: %s'%track_level) + + if noframes >= 10 and n % (int(noframes / 10)) == 0: + sys.stdout.write("#") + sys.stdout.flush() + + prob_k_all += (prob_k - prob_k_all) / (i + 1) + prob_k_pow_all += (prob_k_pow - prob_k_pow_all) / (i + 1) + + prob_k_std_dev = np.power((prob_k_pow_all - np.power(prob_k_all, 2)), 0.5) + + for i in range(num_times): + if isinstance(prob_k_all[i, 0], float): + for j in range(len(u_labels)): + prob_k_all[i, j] = np.array([0] * (len(bin_edges[i]) - 1)) + prob_k_std_dev[i, j] = np.array([0] * (len(bin_edges[i]) - 1)) + + logger.info("Processing time for XSVS took %s seconds." "", (time.time() - start_time)) + elapsed_time = time.time() - start_time + # print (Num) + print("Total time: %.2f min" % (elapsed_time / 60.0)) + + # print (img_per_level - track_bad_level) + # print (buf) + + return bin_edges, prob_k_all, prob_k_std_dev + + +def _process( + num_roi, level, buf_no, buf, img_per_level, labels, max_cts, bin_edges, prob_k, prob_k_pow, track_bad_level +): + """ + Internal helper function. This modifies inputs in place. + This helper function calculate probability of detecting photons for + each integration time. + .. warning :: This function mutates the input values. + Parameters + ---------- + num_roi : int + number of ROI's + level : int + current time level(integration time) + buf_no : int + current buffer number + buf : array + image data array to use for XSVS + img_per_level : int + to track how many images processed in each level + labels : array + labels of the required region of interests(ROI's) + max_cts: int + maximum pixel count + bin_edges : array + bin edges for each integration times and each ROI + prob_k : array + probability density of detecting photons + prob_k_pow : array + squares of probability density of detecting photons + """ + img_per_level[level] += 1 + data = buf[level, buf_no] + if (data.data == 0).all(): + track_bad_level[level] += 1 + + # print (img_per_level,track_bad_level) + + u_labels = list(np.unique(labels)) + + if not (data.data == 0).all(): + for j, label in enumerate(u_labels): + roi_data = data[labels == label] + spe_hist, bin_edges = np.histogram(roi_data, bins=bin_edges, density=True) + spe_hist = np.nan_to_num(spe_hist) + prob_k[level, j] += (spe_hist - prob_k[level, j]) / (img_per_level[level] - track_bad_level[level]) + + prob_k_pow[level, j] += (np.power(spe_hist, 2) - prob_k_pow[level, j]) / ( + img_per_level[level] - track_bad_level[level] + ) + + +def normalize_bin_edges(num_times, num_rois, mean_roi, max_cts): + """ + This will provide the normalized bin edges and bin centers for each + integration time. + Parameters + ---------- + num_times : int + number of integration times for XSVS + num_rois : int + number of ROI's + mean_roi : array + mean intensity of each ROI + shape (number of ROI's) + max_cts : int + maximum pixel counts + Returns + ------- + norm_bin_edges : array + normalized speckle count bin edges + shape (num_times, num_rois) + norm_bin_centers :array + normalized speckle count bin centers + shape (num_times, num_rois) + """ + norm_bin_edges = np.zeros((num_times, num_rois), dtype=object) + norm_bin_centers = np.zeros_like(norm_bin_edges) + for i in range(num_times): + for j in range(num_rois): + norm_bin_edges[i, j] = np.arange(max_cts * 2**i) / (mean_roi[j] * 2**i) + norm_bin_centers[i, j] = bin_edges_to_centers(norm_bin_edges[i, j]) + + return norm_bin_edges, norm_bin_centers + + +def get_bin_edges(num_times, num_rois, mean_roi, max_cts): + """ + This will provide the normalized bin edges and bin centers for each + integration time. + Parameters + ---------- + num_times : int + number of integration times for XSVS + num_rois : int + number of ROI's + mean_roi : array + mean intensity of each ROI + shape (number of ROI's) + max_cts : int + maximum pixel counts + Returns + ------- + norm_bin_edges : array + normalized speckle count bin edges + shape (num_times, num_rois) + norm_bin_centers :array + normalized speckle count bin centers + shape (num_times, num_rois) + """ + norm_bin_edges = np.zeros((num_times, num_rois), dtype=object) + norm_bin_centers = np.zeros_like(norm_bin_edges) + + bin_edges = np.zeros((num_times, num_rois), dtype=object) + bin_centers = np.zeros_like(bin_edges) + + for i in range(num_times): + for j in range(num_rois): + bin_edges[i, j] = np.arange(max_cts * 2**i) + bin_centers[i, j] = bin_edges_to_centers(bin_edges[i, j]) + norm_bin_edges[i, j] = bin_edges[i, j] / (mean_roi[j] * 2**i) + norm_bin_centers[i, j] = bin_edges_to_centers(norm_bin_edges[i, j]) + + return bin_edges, bin_centers, norm_bin_edges, norm_bin_centers + + +################# +##for fit +################### + +from scipy import stats +from scipy.special import gamma, gammaln + + +def gammaDist(x, params): + """Gamma distribution function + M,K = params, where K is average photon counts , + M is the number of coherent modes, + In case of high intensity, the beam behavors like wave and + the probability density of photon, P(x), satify this gamma function. + """ + + K, M = params + K = float(K) + M = float(M) + coeff = np.exp(M * np.log(M) + (M - 1) * np.log(x) - gammaln(M) - M * np.log(K)) + Gd = coeff * np.exp(-M * x / K) + return Gd + + +def gamma_dist(bin_values, K, M): + """ + Gamma distribution function + Parameters + ---------- + bin_values : array + scattering intensities + K : int + average number of photons + M : int + number of coherent modes + Returns + ------- + gamma_dist : array + Gamma distribution + Notes + ----- + These implementations are based on the references under + nbinom_distribution() function Notes + + : math :: + P(K) =(\frac{M}{})^M \frac{K^(M-1)}{\Gamma(M)}\exp(-M\frac{K}{}) + """ + + # gamma_dist = (stats.gamma(M, 0., K/M)).pdf(bin_values) + x = bin_values + coeff = np.exp(M * np.log(M) + (M - 1) * np.log(x) - gammaln(M) - M * np.log(K)) + gamma_dist = coeff * np.exp(-M * x / K) + return gamma_dist + + +def nbinom_dist(bin_values, K, M): + """ + Negative Binomial (Poisson-Gamma) distribution function + Parameters + ---------- + bin_values : array + scattering bin values + K : int + number of photons + M : int + number of coherent modes + Returns + ------- + nbinom : array + Negative Binomial (Poisson-Gamma) distribution function + Notes + ----- + The negative-binomial distribution function + :math :: + P(K) = \frac{\\Gamma(K + M)} {\\Gamma(K + 1) ||Gamma(M)}(\frac {M} {M + })^M (\frac {}{M + })^K + + These implementation is based on following references + + References: text [1]_ + .. [1] L. Li, P. Kwasniewski, D. Oris, L Wiegart, L. Cristofolini, + C. Carona and A. Fluerasu , "Photon statistics and speckle visibility + spectroscopy with partially coherent x-rays" J. Synchrotron Rad., + vol 21, p 1288-1295, 2014. + + """ + co_eff = np.exp(gammaln(bin_values + M) - gammaln(bin_values + 1) - gammaln(M)) + + nbinom = co_eff * np.power(M / (K + M), M) * np.power(K / (M + K), bin_values) + + return nbinom + + +#########poisson +def poisson(x, K): + """Poisson distribution function. + K is average photon counts + In case of low intensity, the beam behavors like particle and + the probability density of photon, P(x), satify this poisson function. + """ + K = float(K) + Pk = np.exp(-K) * power(K, x) / gamma(x + 1) + return Pk + + +def poisson_dist(bin_values, K): + """ + Poisson Distribution + Parameters + --------- + K : int + average counts of photons + bin_values : array + scattering bin values + Returns + ------- + poisson_dist : array + Poisson Distribution + Notes + ----- + These implementations are based on the references under + nbinom_distribution() function Notes + :math :: + P(K) = \frac{^K}{K!}\exp(-) + """ + # poisson_dist = stats.poisson.pmf(K, bin_values) + K = float(K) + poisson_dist = np.exp(-K) * np.power(K, bin_values) / gamma(bin_values + 1) + return poisson_dist + + +def diff_mot_con_factor(times, relaxation_rate, contrast_factor, cf_baseline=0): + """ + This will provide the speckle contrast factor of samples undergoing + a diffusive motion. + + Parameters + ---------- + times : array + integration times + + relaxation_rate : float + relaxation rate + + contrast_factor : float + contrast factor + + cf_baseline : float, optional + the baseline for the contrast factor + + Return + ------ + diff_contrast_factor : array + speckle contrast factor for samples undergoing a diffusive motion + + Notes + ----- + integration times more information - geometric_series function in + skxray.core.utils module + + These implementations are based on the references under + negative_binom_distribution() function Notes + + """ + co_eff = (np.exp(-2 * relaxation_rate * times) - 1 + 2 * relaxation_rate * times) / ( + 2 * (relaxation_rate * times) ** 2 + ) + + return contrast_factor * co_eff + cf_baseline + + +def get_roi(data, threshold=1e-3): + roi = np.where(data > threshold) + if len(roi[0]) > len(data) - 3: + roi = (np.array(roi[0][:-3]),) + elif len(roi[0]) < 3: + roi = np.where(data >= 0) + return roi[0] + + +def plot_sxvs(Knorm_bin_edges, spe_cts_all, uid=None, q_ring_center=None, xlim=[0, 3.5], time_steps=None): + """a convinent function to plot sxvs results""" + num_rings = spe_cts_all.shape[1] + num_times = Knorm_bin_edges.shape[0] + sx = int(round(np.sqrt(num_rings))) + if num_rings % sx == 0: + sy = int(num_rings / sx) + else: + sy = int(num_rings / sx + 1) + fig = plt.figure(figsize=(10, 6)) + plt.title("uid= %s" % uid, fontsize=20, y=1.02) + plt.axes(frameon=False) + plt.xticks([]) + plt.yticks([]) + if time_steps is None: + time_steps = [2**i for i in range(num_times)] + for i in range(num_rings): + for j in range(num_times): + axes = fig.add_subplot(sx, sy, i + 1) + axes.set_xlabel("K/") + axes.set_ylabel("P(K)") + (art,) = axes.plot( + Knorm_bin_edges[j, i][:-1], spe_cts_all[j, i], "-o", label=str(time_steps[j]) + " ms" + ) + axes.set_xlim(xlim) + axes.set_title("Q " + "%.4f " % (q_ring_center[i]) + r"$\AA^{-1}$") + axes.legend(loc="best", fontsize=6) + # plt.show() + fig.tight_layout() + + +def fit_xsvs1( + Knorm_bin_edges, + bin_edges, + spe_cts_all, + K_mean=None, + func="bn", + threshold=1e-7, + uid=None, + q_ring_center=None, + xlim=[0, 3.5], + ylim=None, + time_steps=None, +): + """a convinent function to plot sxvs results + supporting fit function include: + 'bn': Negative Binomaial Distribution + 'gm': Gamma Distribution + 'ps': Poission Distribution + + """ + from lmfit import Model + from scipy.interpolate import UnivariateSpline + + if func == "bn": + mod = Model(nbinom_dist) + elif func == "gm": + mod = Model(gamma_dist, indepdent_vars=["K"]) + elif func == "ps": + mod = Model(poisson_dist) + else: + print("the current supporting function include 'bn', 'gm','ps'") + + # g_mod = Model(gamma_dist, indepdent_vars=['K']) + # g_mod = Model( gamma_dist ) + # n_mod = Model(nbinom_dist) + # p_mod = Model(poisson_dist) + # dc_mod = Model(diff_mot_con_factor) + + num_rings = spe_cts_all.shape[1] + num_times = Knorm_bin_edges.shape[0] + + M_val = {} + K_val = {} + sx = int(round(np.sqrt(num_rings))) + if num_rings % sx == 0: + sy = int(num_rings / sx) + else: + sy = int(num_rings / sx + 1) + fig = plt.figure(figsize=(10, 6)) + plt.title("uid= %s" % uid + " Fitting with Negative Binomial Function", fontsize=20, y=1.02) + plt.axes(frameon=False) + plt.xticks([]) + plt.yticks([]) + if time_steps is None: + time_steps = [2**i for i in range(num_times)] + + for i in range(num_rings): + M_val[i] = [] + K_val[i] = [] + for j in range(num_times): + # find the best values for K and M from fitting + if threshold is not None: + rois = get_roi(data=spe_cts_all[j, i], threshold=threshold) + else: + rois = range(len(spe_cts_all[j, i])) + + # print ( rois ) + if func == "bn": + result = mod.fit(spe_cts_all[j, i][rois], bin_values=bin_edges[j, i][:-1][rois], K=5 * 2**j, M=12) + elif func == "gm": + result = mod.fit( + spe_cts_all[j, i][rois], bin_values=bin_edges[j, i][:-1][rois], K=K_mean[i] * 2**j, M=20 + ) + elif func == "ps": + result = mod.fit( + spe_cts_all[j, i][rois], bin_values=bin_edges[j, i][:-1][rois], K=K_mean[i] * 2**j + ) + else: + pass + + if func == "bn": + K_val[i].append(result.best_values["K"]) + M_val[i].append(result.best_values["M"]) + elif func == "gm": + M_val[i].append(result.best_values["M"]) + elif func == "ps": + K_val[i].append(result.best_values["K"]) + else: + pass + + axes = fig.add_subplot(sx, sy, i + 1) + axes.set_xlabel("K/") + axes.set_ylabel("P(K)") + + # Using the best K and M values interpolate and get more values for fitting curve + fitx_ = np.linspace(0, max(Knorm_bin_edges[j, i][:-1]), 1000) + fitx = np.linspace(0, max(bin_edges[j, i][:-1]), 1000) + if func == "bn": + fity = nbinom_dist(fitx, K_val[i][j], M_val[i][j]) # M and K are fitted best values + label = "nbinom" + txt = "K=" + "%.3f" % (K_val[i][0]) + "," + "M=" + "%.3f" % (M_val[i][0]) + elif func == "gm": + fity = gamma_dist(fitx, K_mean[i] * 2**j, M_val[i][j]) + label = "gamma" + txt = "M=" + "%.3f" % (M_val[i][0]) + elif func == "ps": + fity = poisson_dist(fitx, K_val[i][j]) + label = "poisson" + txt = "K=" + "%.3f" % (K_val[i][0]) + else: + pass + + if j == 0: + (art,) = axes.plot(fitx_, fity, "-b", label=label) + else: + (art,) = axes.plot(fitx_, fity, "-b") + + if i == 0: + (art,) = axes.plot( + Knorm_bin_edges[j, i][:-1], spe_cts_all[j, i], "o", label=str(time_steps[j]) + " ms" + ) + else: + (art,) = axes.plot( + Knorm_bin_edges[j, i][:-1], + spe_cts_all[j, i], + "o", + ) + + axes.set_xlim(0, 3.5) + if ylim is not None: + axes.set_ylim(ylim) + # Annotate the best K and M values on the plot + + axes.annotate( + r"%s" % txt, + xy=(1, 0.25), + xycoords="axes fraction", + fontsize=10, + horizontalalignment="right", + verticalalignment="bottom", + ) + axes.set_title("Q " + "%.4f " % (q_ring_center[i]) + r"$\AA^{-1}$") + axes.legend(loc="best", fontsize=6) + # plt.show() + fig.tight_layout() + + return M_val, K_val + + +def plot_xsvs_g2(g2, taus, res_pargs=None, *argv, **kwargs): + """plot g2 results, + g2: one-time correlation function + taus: the time delays + res_pargs, a dict, can contains + uid/path/qr_center/qz_center/ + kwargs: can contains + vlim: [vmin,vmax]: for the plot limit of y, the y-limit will be [vmin * min(y), vmx*max(y)] + ylim/xlim: the limit of y and x + + e.g. + plot_gisaxs_g2( g2b, taus= np.arange( g2b.shape[0]) *timeperframe, q_ring_center = q_ring_center, vlim=[.99, 1.01] ) + + """ + + if res_pargs is not None: + uid = res_pargs["uid"] + path = res_pargs["path"] + q_ring_center = res_pargs["q_ring_center"] + + else: + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + + if "q_ring_center" in kwargs.keys(): + q_ring_center = kwargs["q_ring_center"] + else: + q_ring_center = np.arange(g2.shape[1]) + + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + + num_rings = g2.shape[1] + sx = int(round(np.sqrt(num_rings))) + if num_rings % sx == 0: + sy = int(num_rings / sx) + else: + sy = int(num_rings / sx + 1) + + # print (num_rings) + if num_rings != 1: + # fig = plt.figure(figsize=(14, 10)) + fig = plt.figure(figsize=(12, 10)) + plt.axis("off") + # plt.axes(frameon=False) + # print ('here') + plt.xticks([]) + plt.yticks([]) + + else: + fig = plt.figure(figsize=(8, 8)) + + plt.title("uid= %s" % uid, fontsize=20, y=1.06) + for i in range(num_rings): + ax = fig.add_subplot(sx, sy, i + 1) + ax.set_ylabel("beta") + ax.set_title(" Q= " + "%.5f " % (q_ring_center[i]) + r"$\AA^{-1}$") + y = g2[:, i] + # print (y) + ax.semilogx(taus, y, "-o", markersize=6) + # ax.set_ylim([min(y)*.95, max(y[1:])*1.05 ]) + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + + dt = datetime.now() + CurTime = "%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) + + fp = path + "g2--uid=%s" % (uid) + CurTime + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + # plt.show() + + +###########################3 + +# + + +def nbinomlog(p, hist, x, N): + """Residuals for maximum likelihood fit to nbinom distribution. + Vary M (shape param) and mu (count rate) vary (using leastsq)""" + mu, M = p + mu = abs(mu) + M = abs(M) + w = np.where(hist > 0.0) + Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M)) + err = 2 * (Np - hist) + err[w] = err[w] - 2 * hist[w] * np.log(Np[w] / hist[w]) # note: sum(Np-hist)==0 + return np.sqrt(np.abs(err)) + # return err + + +def nbinomlog1(p, hist, x, N, mu): + """Residuals for maximum likelihood fit to nbinom distribution. + Vary M (shape param) but mu (count rate) fixed (using leastsq)""" + M = abs(p[0]) + w = np.where(hist > 0.0) + Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M)) + err = 2 * (Np - hist) + err[w] = err[w] - 2 * hist[w] * np.log(Np[w] / hist[w]) # note: sum(Np-hist)==0 + return np.sqrt(np.abs(err)) + + +def nbinomlog1_notworknow(p, hist, x, N, mu): + """Residuals for maximum likelihood fit to nbinom distribution. + Vary M (shape param) but mu (count rate) fixed (using leastsq)""" + M = abs(p[0]) + w = np.where(hist > 0.0) + Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M)) + err = 2 * (Np - hist) + err[w] = err[w] - 2 * hist[w] * np.log(Np[w] / hist[w]) # note: sum(Np-hist)==0 + # return np.sqrt(err) + return err + + +def nbinomres(p, hist, x, N): + """residuals to leastsq() to fit normal chi-square""" + mu, M = p + Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M)) + err = (hist - Np) / np.sqrt(Np) + return err + + +def get_xsvs_fit(spe_cts_all, K_mean, varyK=True, max_bins=None, qth=None, g2=None, times=None, taus=None): + """ + Fit the xsvs by Negative Binomial Function using max-likelihood chi-squares + """ + + max_cts = spe_cts_all[0][0].shape[0] - 1 + num_times, num_rings = spe_cts_all.shape + if max_bins is not None: + num_times = min(num_times, max_bins) + + bin_edges, bin_centers, Knorm_bin_edges, Knorm_bin_centers = get_bin_edges( + num_times, num_rings, K_mean, int(max_cts + 2) + ) + + if g2 is not None: + g2c = g2.copy() + g2c[0] = g2[1] + ML_val = {} + KL_val = {} + K_ = [] + if qth is not None: + range_ = range(qth, qth + 1) + else: + range_ = range(num_rings) + for i in range_: + N = 1 + ML_val[i] = [] + KL_val[i] = [] + + if g2 is not None: + mi_g2 = 1 / (g2c[:, i] - 1) + m_ = np.interp(times, taus, mi_g2) + for j in range(num_times): + x_, x, y = bin_edges[j, i][:-1], Knorm_bin_edges[j, i][:-1], spe_cts_all[j, i] + if g2 is not None: + m0 = m_[j] + else: + m0 = 10 + # resultL = minimize(nbinom_lnlike, [K_mean[i] * 2**j, m0], args=(x_, y) ) + # the normal leastsq + # result_n = leastsq(nbinomres, [K_mean[i] * 2**j, m0], args=(y,x_,N),full_output=1) + # not vary K + if not varyK: + resultL = leastsq( + nbinomlog1, + [m0], + args=(y, x_, N, K_mean[i] * 2**j), + ftol=1.49012e-38, + xtol=1.49012e-38, + factor=100, + full_output=1, + ) + ML_val[i].append(abs(resultL[0][0])) + KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) + + else: + # vary M and K + resultL = leastsq( + nbinomlog, + [K_mean[i] * 2**j, m0], + args=(y, x_, N), + ftol=1.49012e-38, + xtol=1.49012e-38, + factor=100, + full_output=1, + ) + + ML_val[i].append(abs(resultL[0][1])) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) + if j == 0: + K_.append(KL_val[i][0]) + return ML_val, KL_val, np.array(K_) + + +def plot_xsvs_fit( + spe_cts_all, + ML_val, + KL_val, + K_mean, + xlim=[0, 15], + ylim=[1e-8, 1], + q_ring_center=None, + uid="uid", + qth=None, + times=None, + fontsize=3, +): + fig = plt.figure(figsize=(9, 6)) + plt.title("uid= %s" % uid + " Fitting with Negative Binomial Function", fontsize=20, y=1.02) + plt.axes(frameon=False) + plt.xticks([]) + plt.yticks([]) + + max_cts = spe_cts_all[0][0].shape[0] - 1 + num_times, num_rings = spe_cts_all.shape + + bin_edges, bin_centers, Knorm_bin_edges, Knorm_bin_centers = get_bin_edges( + num_times, num_rings, K_mean, int(max_cts + 2) + ) + + if qth is not None: + range_ = range(qth, qth + 1) + num_times = len(ML_val[qth]) + else: + range_ = range(num_rings) + num_times = len(ML_val[0]) + # for i in range(num_rings): + + sx = int(round(np.sqrt(len(range_)))) + + if len(range_) % sx == 0: + sy = int(len(range_) / sx) + else: + sy = int(len(range_) / sx + 1) + n = 1 + for i in range_: + axes = fig.add_subplot(sx, sy, n) + axes.set_xlabel("K/") + axes.set_ylabel("P(K)") + n += 1 + for j in range(num_times): + # print( i, j ) + x_, x, y = bin_edges[j, i][:-1], Knorm_bin_edges[j, i][:-1], spe_cts_all[j, i] + # Using the best K and M values interpolate and get more values for fitting curve + + xscale = bin_edges[j, i][:-1][1] / Knorm_bin_edges[j, i][:-1][1] + fitx = np.linspace(0, max_cts * 2**j, 5000) + fitx_ = fitx / xscale + + # fity = nbinom_dist( fitx, K_val[i][j], M_val[i][j] ) + fitL = nbinom_dist(fitx, KL_val[i][j], ML_val[i][j]) + + if j == 0: + (art,) = axes.semilogy(fitx_, fitL, "-r", label="nbinom_L") + # art, = axes.semilogy( fitx_,fity, '--b', label="nbinom") + else: + (art,) = axes.plot(fitx_, fitL, "-r") + # art, = axes.plot( fitx_,fity, '--b') + if i == 0: + if times is not None: + label = str(times[j] * 1000) + " ms" + else: + label = "Bin_%s" % (2**j) + + (art,) = axes.plot(x, y, "o", label=label) + else: + (art,) = axes.plot( + x, + y, + "o", + ) + + axes.set_xlim(xlim) + axes.set_ylim(ylim) + + axes.set_title("Q=" + "%.4f " % (q_ring_center[i]) + r"$\AA^{-1}$") + axes.legend(loc="best", fontsize=fontsize) + # plt.show() + fig.tight_layout() + + +def get_max_countc(FD, labeled_array): + """Compute the max intensity of ROIs in the compressed file (FD) + + Parameters + ---------- + FD: Multifile class + compressed file + labeled_array : array + labeled array; 0 is background. + Each ROI is represented by a nonzero integer. It is not required that + the ROI labels are contiguous + index : int, list, optional + The ROI's to use. If None, this function will extract averages for all + ROIs + + Returns + ------- + max_intensity : a float + index : list + The labels for each element of the `mean_intensity` list + """ + + qind, pixelist = roi.extract_label_indices(labeled_array) + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + + if labeled_array.shape != (FD.md["ncols"], FD.md["nrows"]): + raise ValueError( + " `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" + % (FD.md["ncols"], FD.md["nrows"], labeled_array.shape[0], labeled_array.shape[1]) + ) + + max_inten = 0 + for i in tqdm(range(FD.beg, FD.end, 1), desc="Get max intensity of ROIs in all frames"): + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + + max_inten = max(max_inten, np.max(v[w])) + return max_inten + + +def get_contrast(ML_val): + nq, nt = len(ML_val.keys()), len(ML_val[list(ML_val.keys())[0]]) + contrast_factorL = np.zeros([nq, nt]) + for i in range(nq): + for j in range(nt): + contrast_factorL[i, j] = 1 / ML_val[i][j] + return contrast_factorL + + +def plot_g2_contrast(contrast_factorL, g2, times, taus, q_ring_center=None, uid=None, vlim=[0.8, 1.2], qth=None): + nq, nt = contrast_factorL.shape + + if qth is not None: + range_ = range(qth, qth + 1) + else: + range_ = range(nq) + num_times = nt + nr = len(range_) + sx = int(round(np.sqrt(nr))) + if nr % sx == 0: + sy = int(nr / sx) + else: + sy = int(nr / sx + 1) + # fig = plt.figure(figsize=(14, 10)) + + fig = plt.figure() + plt.title("uid= %s_" % uid + "Contrast Factor for Each Q Rings", fontsize=14, y=1.08) + if qth is None: + plt.axis("off") + n = 1 + for sn in range_: + # print( sn ) + ax = fig.add_subplot(sx, sy, n) + n += 1 + yL = contrast_factorL[sn, :] + g = g2[1:, sn] - 1 + ax.semilogx(times[:nt], yL, "-bs", label="vis") + ax.semilogx(taus[1:], g, "-rx", label="xpcs") + ax.set_title(" Q=" + "%.5f " % (q_ring_center[sn]) + r"$\AA^{-1}$") + + # ym = np.mean( g ) + ax.set_ylim([g.min() * vlim[0], g.max() * vlim[1]]) + + fig.tight_layout() diff --git a/pyCHX/backups/pyCHX-backup/chx_specklecp.py b/pyCHX/backups/pyCHX-backup/chx_specklecp.py new file mode 100644 index 0000000..d03ea3b --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_specklecp.py @@ -0,0 +1,2036 @@ +""" +X-ray speckle visibility spectroscopy(XSVS) - Dynamic information of +the speckle patterns are obtained by analyzing the speckle statistics +and calculating the speckle contrast in single scattering patterns. +This module will provide XSVS analysis tools +""" + +from __future__ import absolute_import, division, print_function + +import logging +import time + +import six +from skbeam.core import roi +from skbeam.core.utils import bin_edges_to_centers, geometric_series + +logger = logging.getLogger(__name__) + +import itertools +import os +import sys +from datetime import datetime +from multiprocessing import Pool + +import dill +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import scipy as sp +import scipy.stats as st +from matplotlib.colors import LogNorm +from scipy.optimize import leastsq, minimize +from tqdm import tqdm + +from pyCHX.chx_compress import apply_async, go_through_FD, map_async, pass_FD, run_dill_encoded +from pyCHX.chx_generic_functions import trans_data_to_pd + + +def xsvsp( + FD, + label_array, + only_two_levels=True, + only_first_level=False, + timebin_num=2, + time_bin=None, + max_cts=None, + bad_images=None, + threshold=1e8, + imgsum=None, + norm=None, +): + """ + FD: a list of FD or a single FD + See other parameters in xsvsc funcs + """ + + if not isinstance(FD, list): + bin_edges, prob_k, prob_k_std_dev, his_sum = xsvsp_single( + FD, + label_array, + only_two_levels, + only_first_level, + timebin_num, + time_bin, + max_cts, + bad_images, + threshold, + imgsum, + norm, + ) + else: + bin_edges, prob_k, prob_k_std_dev, his_sum = xsvsp_multi( + FD, + label_array, + only_two_levels, + only_first_level, + timebin_num, + time_bin, + max_cts, + bad_images, + threshold, + imgsum, + norm, + ) + + return bin_edges, prob_k, prob_k_std_dev, his_sum + + +def xsvsp_multi( + FD_set, + label_array, + only_two_levels=True, + only_first_level=False, + timebin_num=2, + time_bin=None, + max_cts=None, + bad_images=None, + threshold=1e8, + imgsum=None, + norm=None, +): + """ + FD_set: a list of FD + See other parameters in xsvsc funcs + """ + N = len(FD_set) + for n in range(N): + bin_edges, prob_k, prob_k_std_dev, his_sum = xsvsp_single( + FD_set[n], + label_array, + only_two_levels, + only_first_level, + timebin_num, + time_bin, + max_cts, + bad_images, + threshold, + imgsum, + norm, + ) + if n == 0: + prob_k_all = prob_k + his_sum_all = his_sum + prob_k_std_dev_all = prob_k_std_dev + else: + prob_k_all += (prob_k - prob_k_all) / (n + 1) + his_sum_all += (his_sum - his_sum_all) / (n + 1) + prob_k_std_dev_all += (prob_k_std_dev - prob_k_std_dev_all) / (n + 1) + + return bin_edges, prob_k_all, prob_k_std_dev_all, his_sum_all + + +def xsvsp_single( + FD, + label_array, + only_two_levels=True, + only_first_level=False, + timebin_num=2, + time_bin=None, + max_cts=None, + bad_images=None, + threshold=1e8, + imgsum=None, + norm=None, +): + """ + Calculate probability density of detecting photons using parallel algorithm + """ + + noframes = FD.end - FD.beg + 1 # number of frames, not "no frames" + number_of_img = noframes + for i in range(FD.beg, FD.end): + pass_FD(FD, i) + label_arrays = [np.array(label_array == i, dtype=np.int64) for i in np.unique(label_array)[1:]] + qind, pixelist = roi.extract_label_indices(label_array) + if norm is not None: + norms = [ + norm[np.in1d(pixelist, extract_label_indices(np.array(label_array == i, dtype=np.int64))[1])] + for i in np.unique(label_array)[1:] + ] + + inputs = range(len(label_arrays)) + + pool = Pool(processes=len(inputs)) + print("Starting assign the tasks...") + results = {} + progress_bar = False + if norm is not None: + for i in tqdm(inputs): + results[i] = apply_async( + pool, + xsvsc_single, + ( + FD, + label_arrays[i], + only_two_levels, + only_first_level, + timebin_num, + time_bin, + max_cts, + bad_images, + threshold, + imgsum, + norms[i], + progress_bar, + ), + ) + else: + for i in tqdm(inputs): + results[i] = apply_async( + pool, + xsvsc_single, + ( + FD, + label_arrays[i], + only_two_levels, + only_first_level, + timebin_num, + time_bin, + max_cts, + bad_images, + threshold, + imgsum, + None, + progress_bar, + ), + ) + pool.close() + + print("Starting running the tasks...") + res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] + + u_labels = list(np.unique(qind)) + num_roi = len(u_labels) + if time_bin is None: + time_bin = geometric_series(timebin_num, number_of_img) + if only_first_level: + time_bin = [1] + elif only_two_levels: + time_bin = [1, 2] + # print(time_bin) + # number of times in the time bin + num_times = len(time_bin) + prob_k = np.zeros([num_times, num_roi], dtype=np.object) + prob_k_std_dev = np.zeros_like(prob_k) + his_sum = np.zeros([num_times, num_roi]) + # print( len(res) ) + # print( prob_k.shape ) + + for i in inputs: + # print( i) + bin_edges, prob_k[:, i], prob_k_std_dev[:, i], his_sum[:, i] = ( + res[i][0], + res[i][1][:, 0], + res[i][2][:, 0], + res[i][3][:, 0], + ) + + print("Histogram calculation DONE!") + del results + del res + return bin_edges, prob_k, prob_k_std_dev, his_sum + + +def xsvsc( + FD, + label_array, + only_two_levels=True, + only_first_level=False, + timebin_num=2, + time_bin=None, + max_cts=None, + bad_images=None, + threshold=1e8, + imgsum=None, + norm=None, +): + """ + FD: a list of FD or a single FD + See other parameters in xsvsc funcs + """ + + if not isinstance(FD, list): + bin_edges, prob_k, prob_k_std_dev, his_sum = xsvsc_single( + FD, + label_array, + only_two_levels, + only_first_level, + timebin_num, + time_bin, + max_cts, + bad_images, + threshold, + imgsum, + norm, + ) + else: + bin_edges, prob_k, prob_k_std_dev, his_sum = xsvsc_multi( + FD, + label_array, + only_two_levels, + only_first_level, + timebin_num, + time_bin, + max_cts, + bad_images, + threshold, + imgsum, + norm, + ) + + return bin_edges, prob_k, prob_k_std_dev, his_sum + + +def xsvsc_multi( + FD_set, + label_array, + only_two_levels=True, + only_first_level=False, + timebin_num=2, + time_bin=None, + max_cts=None, + bad_images=None, + threshold=1e8, + imgsum=None, + norm=None, +): + """ + FD_set: a list of FD + See other parameters in xsvsc funcs + """ + N = len(FD_set) + for n in range(N): + bin_edges, prob_k, prob_k_std_dev, his_sum = xsvsc_single( + FD_set[n], + label_array, + only_two_levels, + only_first_level, + timebin_num, + time_bin, + max_cts, + bad_images, + threshold, + imgsum, + norm, + ) + if n == 0: + prob_k_all = prob_k + prob_k_std_dev_all = prob_k_std_dev + his_sum_all = his_sum + else: + prob_k_all += (prob_k - prob_k_all) / (n + 1) + his_sum_all += (his_sum - his_sum_all) / (n + 1) + prob_k_std_dev_all += (prob_k_std_dev - prob_k_std_dev_all) / (n + 1) + return bin_edges, prob_k_all, prob_k_std_dev_all, his_sum_all + + +def xsvsc_single( + FD, + label_array, + only_two_levels=True, + only_first_level=False, + timebin_num=2, + time_bin=None, + max_cts=None, + bad_images=None, + threshold=1e8, + imgsum=None, + norm=None, + progress_bar=True, +): + """YG MOD@Octo 12, 2017, Change photon statistic error bar from sampling statistic bar to error bar with phisical meaning, + photon_number@one_particular_count = photon_tolal_number * photon_distribution@one_particular_count +/- + sqrt( photon_number@one_particular_count ) + + + This function will provide the probability density of detecting photons + for different integration times. + The experimental probability density P(K) of detecting photons K is + obtained by histogramming the speckle counts over an ensemble of + equivalent pixels and over a number of speckle patterns recorded + with the same integration time T under the same condition. + Parameters + ---------- + image_iterable : FD, a compressed eiger file by Multifile class + + label_array : array + labeled array; 0 is background. + Each ROI is represented by a distinct label (i.e., integer). + number_of_img : int + number of images (how far to go with integration times when finding + the time_bin, using skxray.utils.geometric function) + timebin_num : int, optional + integration time; default is 2 + max_cts : int, optional + the brightest pixel in any ROI in any image in the image set. + defaults to using skxray.core.roi.roi_max_counts to determine + the brightest pixel in any of the ROIs + + + bad_images: array, optional + the bad images number list, the XSVS will not analyze the binning image groups which involve any bad images + threshold: float, optional + If one image involves a pixel with intensity above threshold, such image will be considered as a bad image. + + + Returns + ------- + prob_k_all : array + probability density of detecting photons + prob_k_std_dev : array + standard deviation of probability density of detecting photons + Notes + ----- + These implementation is based on following references + References: text [1]_, text [2]_ + .. [1] L. Li, P. Kwasniewski, D. Oris, L Wiegart, L. Cristofolini, + C. Carona and A. Fluerasu , "Photon statistics and speckle visibility + spectroscopy with partially coherent x-rays" J. Synchrotron Rad., + vol 21, p 1288-1295, 2014. + .. [2] R. Bandyopadhyay, A. S. Gittings, S. S. Suh, P.K. Dixon and + D.J. Durian "Speckle-visibilty Spectroscopy: A tool to study + time-varying dynamics" Rev. Sci. Instrum. vol 76, p 093110, 2005. + There is an example in https://github.com/scikit-xray/scikit-xray-examples + It will demonstrate the use of these functions in this module for + experimental data. + """ + + label_array = np.int_(label_array) + if max_cts is None: + max_cts = roi.roi_max_counts(FD, label_array) + # find the label's and pixel indices for ROI's + labels, indices = roi.extract_label_indices(label_array) + nopixels = len(indices) + # number of ROI's + u_labels = list(np.unique(labels)) + num_roi = len(u_labels) + # create integration times + noframes = FD.end - FD.beg + 1 + number_of_img = noframes + if time_bin is None: + time_bin = geometric_series(timebin_num, noframes) + if only_first_level: + time_bin = [1] + elif only_two_levels: + time_bin = [1, 2] + # print(time_bin) + # number of times in the time bin + num_times = len(time_bin) + # number of pixels per ROI + num_pixels = np.bincount(labels, minlength=(num_roi + 1))[1:] + # probability density of detecting photons + prob_k = np.zeros([num_times, num_roi], dtype=np.object) + his_sum = np.zeros([num_times, num_roi]) + # square of probability density of detecting photons + prob_k_pow = np.zeros_like(prob_k) + # standard deviation of probability density of detecting photons + prob_k_std_dev = np.zeros_like(prob_k) + # get the bin edges for each time bin for each ROI + bin_edges = np.zeros(prob_k.shape[0], dtype=prob_k.dtype) + for i in range(num_times): + bin_edges[i] = np.arange(max_cts * timebin_num**i) + # start_time = time.time() # used to log the computation time (optionally) + bad_frame_list = bad_images + if bad_frame_list is None: + bad_frame_list = [] + pixelist = indices + fra_pix = np.zeros_like(pixelist, dtype=np.float64) + + timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32) + timg[pixelist] = np.arange(1, len(pixelist) + 1) + # print( n, FD, num_times, timebin_num, nopixels ) + buf = np.zeros([num_times, timebin_num, nopixels], dtype=np.float64) + # to track processing each time level + track_level = np.zeros(num_times) # , dtype=bool) + track_bad_level = np.zeros(num_times) # , dtype=np.int32 ) + # to increment buffer + cur = np.int_(np.full(num_times, timebin_num, dtype=np.int64)) + # cur = np.full(num_times, timebin_num) + # to track how many images processed in each level + img_per_level = np.zeros(num_times, dtype=np.int64) + + if progress_bar: + xr = tqdm(range(FD.beg, FD.end)) + else: + xr = range(FD.beg, FD.end) + + for i in xr: + # Ring buffer, a buffer with periodic boundary conditions. + # Images must be keep for up to maximum delay in buf. + # buf = np.zeros([num_times, timebin_num], dtype=np.object) # matrix of buffers + if i in bad_frame_list: + fra_pix[:] = np.nan + # print( 'here is a bad frmae--%i'%i ) + else: + fra_pix[:] = 0 + (p, v) = FD.rdrawframe(i) + w = np.where(timg[p])[0] + pxlist = timg[p[w]] - 1 + if imgsum is None: + if norm is None: + fra_pix[pxlist] = v[w] + else: + fra_pix[pxlist] = v[w] / norm[pxlist] # -1.0 + else: + if norm is None: + fra_pix[pxlist] = v[w] / imgsum[i] + else: + fra_pix[pxlist] = v[w] / imgsum[i] / norm[pxlist] + # level =0 + cur[0] = 1 + cur[0] % timebin_num + # read each frame + # Put the image into the ring buffer. + img_ = fra_pix + # Put the ROI pixels into the ring buffer. + # fra_pix[:]=0 + if threshold is not None: + if img_.max() >= threshold: + print("bad image: %s here!" % n) + img_[:] = np.nan + buf[0, cur[0] - 1] = img_ + _process( + num_roi, + 0, + cur[0] - 1, + buf, + img_per_level, + labels, + max_cts, + bin_edges[0], + prob_k, + prob_k_pow, + track_bad_level, + ) + # check whether the number of levels is one, otherwise + # continue processing the next level + level = 1 + if number_of_img > 1: + processing = 1 + else: + processing = 0 + if only_first_level: + processing = 0 + + while processing: + # print( 'here') + if track_level[level]: + prev = 1 + (cur[level - 1] - 2) % timebin_num + cur[level] = 1 + cur[level] % timebin_num + bufa = buf[level - 1, prev - 1] + bufb = buf[level - 1, cur[level - 1] - 1] + buf[level, cur[level] - 1] = bufa + bufb + # print( buf[level, cur[level]-1] ) + track_level[level] = 0 + _process( + num_roi, + level, + cur[level] - 1, + buf, + img_per_level, + labels, + max_cts, + bin_edges[level], + prob_k, + prob_k_pow, + track_bad_level, + ) + level += 1 + if level < num_times: + processing = 1 + else: + processing = 0 + else: + track_level[level] = 1 + processing = 0 + # print( level ) + # prob_k_std_dev = np.power((prob_k_pow - + # np.power(prob_k, 2)), .5) + + for i in range(num_times): + for j in range(num_roi): + # print( prob_k[i,j] ) + his_sum[i, j] = np.sum(np.array(prob_k[i, j])) / 1.0 + # print(his_sum[i,j]) + prob_k_std_dev[i, j] = np.sqrt(prob_k[i, j]) / his_sum[i, j] + prob_k[i, j] = prob_k[i, j] / his_sum[i, j] + + # for i in range(num_times): + # if isinstance(prob_k[i,0], float ) or isinstance(prob_k[i,0], int ): + # pass + + return bin_edges, prob_k, prob_k_std_dev, his_sum + + +def _process( + num_roi, level, buf_no, buf, img_per_level, labels, max_cts, bin_edges, prob_k, prob_k_pow, track_bad_level +): + """ + Internal helper function. This modifies inputs in place. + This helper function calculate probability of detecting photons for + each integration time. + .. warning :: This function mutates the input values. + Parameters + ---------- + num_roi : int + number of ROI's + level : int + current time level(integration time) + buf_no : int + current buffer number + buf : array + image data array to use for XSVS + img_per_level : int + to track how many images processed in each level + labels : array + labels of the required region of interests(ROI's) + max_cts: int + maximum pixel count + bin_edges : array + bin edges for each integration times and each ROI + prob_k : array + probability density of detecting photons + prob_k_pow : array + squares of probability density of detecting photons + """ + img_per_level[level] += 1 + data = buf[level, buf_no] + if np.isnan(data).any(): + track_bad_level[level] += 1 + # print (img_per_level,track_bad_level) + u_labels = list(np.unique(labels)) + ############## + ##To Do list here, change histogram to bincount + ##Change error bar calculation + if not (np.isnan(data).any()): + for j, label in enumerate(u_labels): + roi_data = data[labels == label] + # print( np.max( bin_edges), prob_k[level, j] ) + # print( level, j, bin_edges ) + spe_hist = np.bincount(np.int_(roi_data), minlength=np.max(bin_edges)) + # spe_hist, bin_edges = np.histogram(roi_data, bins=bin_edges, density=True) + spe_hist = np.nan_to_num(spe_hist) + # print( spe_hist.shape ) + # prob_k[level, j] += (spe_hist - + # prob_k[level, j])/( img_per_level[level] - track_bad_level[level] ) + # print( prob_k[level, j] ) + prob_k[level, j] += spe_hist + # print( spe_hist.shape, prob_k[level, j] ) + # prob_k_pow[level, j] += (np.power(spe_hist, 2) - + # prob_k_pow[level, j])/(img_per_level[level] - track_bad_level[level]) + + +def normalize_bin_edges(num_times, num_rois, mean_roi, max_cts): + """ + This will provide the normalized bin edges and bin centers for each + integration time. + Parameters + ---------- + num_times : int + number of integration times for XSVS + num_rois : int + number of ROI's + mean_roi : array + mean intensity of each ROI + shape (number of ROI's) + max_cts : int + maximum pixel counts + Returns + ------- + norm_bin_edges : array + normalized speckle count bin edges + shape (num_times, num_rois) + norm_bin_centers :array + normalized speckle count bin centers + shape (num_times, num_rois) + """ + norm_bin_edges = np.zeros((num_times, num_rois), dtype=object) + norm_bin_centers = np.zeros_like(norm_bin_edges) + for i in range(num_times): + for j in range(num_rois): + norm_bin_edges[i, j] = np.arange(max_cts * 2**i) / (mean_roi[j] * 2**i) + norm_bin_centers[i, j] = bin_edges_to_centers(norm_bin_edges[i, j]) + + return norm_bin_edges, norm_bin_centers + + +def get_his_std_qi(data_pixel_qi, max_cts=None): + """ + YG. Dev 16, 2016 + Calculate the photon histogram for one q by giving + Parameters: + data_pixel_qi: one-D array, for the photon counts + max_cts: for bin max, bin will be [0,1,2,..., max_cts] + Return: + bins + his + std + """ + if max_cts is None: + max_cts = np.max(data_pixel_qi) + 1 + bins = np.arange(max_cts) + dqn, dqm = data_pixel_qi.shape + # get histogram here + H = np.apply_along_axis(np.bincount, 1, np.int_(data_pixel_qi), minlength=max_cts) / dqm + # do average for different frame + his = np.average(H, axis=0) + std = np.std(H, axis=0) + # cal average photon counts + kmean = np.average(data_pixel_qi) + return bins, his, std, kmean + + +def get_his_std(data_pixel, rois, max_cts=None): + """ + YG. Dev 16, 2016 + Calculate the photon histogram for multi-q by giving + Parameters: + data_pixel: multi-D array, for the photon counts + max_cts: for bin max, bin will be [0,1,2,..., max_cts] + Return: + bins + his + std + """ + if max_cts is None: + max_cts = np.max(data_pixel) + 1 + qind, pixelist = roi.extract_label_indices(rois) + noqs = len(np.unique(qind)) + his = np.zeros([noqs], dtype=np.object) + std = np.zeros_like(his, dtype=np.object) + kmean = np.zeros_like(his, dtype=np.object) + for qi in range(noqs): + pixelist_qi = np.where(qind == qi + 1)[0] + # print(qi, max_cts) + bins, his[qi], std[qi], kmean[qi] = get_his_std_qi(data_pixel[:, pixelist_qi], max_cts) + return bins, his, std, kmean + + +def save_bin_his_std(spec_bins, spec_his, spec_std, filename, path): + """YG. Dec 23, 2016 + save spec_bin, spec_his, spec_std to a csv file + """ + ql = spec_his.shape[1] + tl = spec_bins[-1].shape[0] - 1 + # print( tl, ql) + mhis, nhis = spec_his.shape + mstd, nstd = spec_std.shape + spec_data = np.zeros([tl, 1 + ql * (mhis + mstd)]) + spec_data[:, :] = np.nan + spec_data[:, 0] = spec_bins[-1][:-1] + for i in range(mhis): + max_m = spec_his[i, 0].shape[0] + for j in range(nhis): + spec_data[:max_m, 1 + i * ql + j] = spec_his[i, j] + for i in range(mstd): + max_m = spec_std[i, 0].shape[0] + for j in range(nstd): + spec_data[:max_m, 1 + mhis * ql + i * ql + j] = spec_std[i, j] + label = ["count"] + for l in range(mhis): + for q in range(nhis): + label += ["his_level_%s_q_%s" % (l, q)] + for l in range(mstd): + for q in range(nstd): + label += ["std_level_%s_q_%s" % (l, q)] + spec_pds = trans_data_to_pd(spec_data, label, "array") + filename_ = os.path.join(path, filename) + spec_pds.to_csv(filename_) + print("The file: %s is saved in %s" % (filename, path)) + return spec_pds + + +def reshape_array(array, new_len): + """ + array: shape= [M,N] + new_len: the length of the new array, the reshaped shape will be [M//new_len, new_len, N] + + """ + M, N = array.shape + m = M // new_len + return array[: m * new_len, :].reshape([m, new_len, N]) + + +def get_binned_his_std_qi(data_pixel_qi, lag_steps, max_cts=None): + """ + YG. Dev 16, 2016 + Calculate the binned photon histogram for one q by giving + Parameters: + data_pixel_qi: one-D array, for the photon counts + lag_steps: the binned number + max_cts: for bin max, bin will be [0,1,2,..., max_cts] + Return: + bins + his + std + """ + if max_cts is None: + max_cts = np.max(data_pixel_qi) + 1 + lag_steps = np.array(lag_steps) + lag_steps = lag_steps[np.nonzero(lag_steps)] + nologs = len(lag_steps) + his = np.zeros([nologs], dtype=np.object) + bins = np.zeros_like(his, dtype=np.object) + std = np.zeros_like(his, dtype=np.object) + kmean = np.zeros_like(his, dtype=np.object) + i = 0 + for lag in lag_steps: + data_pixel_qi_ = np.sum(reshape_array(data_pixel_qi, lag), axis=1) + bins[i], his[i], std[i], kmean[i] = get_his_std_qi(data_pixel_qi_, max_cts * lag) + i += 1 + return bins, his, std, kmean + + +def get_binned_his_std(data_pixel, rois, lag_steps, max_cts=None): + """ + YG. Dev 16, 2016 + Calculate the binned photon histogram qs by giving + Parameters: + data_pixel: one-D array, for the photon counts + lag_steps: the binned number + max_cts: for bin max, bin will be [0,1,2,..., max_cts] + Return: + bins + his + std + """ + if max_cts is None: + max_cts = np.max(data_pixel) + 1 + qind, pixelist = roi.extract_label_indices(rois) + noqs = len(np.unique(qind)) + + lag_steps = np.array(lag_steps) + lag_steps = lag_steps[np.nonzero(lag_steps)] + + nologs = len(lag_steps) + his = np.zeros([nologs, noqs], dtype=np.object) + bins = np.zeros([nologs], dtype=np.object) + std = np.zeros_like(his, dtype=np.object) + kmean = np.zeros_like(his, dtype=np.object) + i = 0 + for lag in tqdm(lag_steps): + data_pixel_ = np.sum(reshape_array(data_pixel, lag), axis=1) + # print( data_pixel_.shape) + for qi in range(noqs): + pixelist_qi = np.where(qind == qi + 1)[0] + bins[i], his[i, qi], std[i, qi], kmean[i, qi] = get_his_std_qi( + data_pixel_[:, pixelist_qi], max_cts * lag + ) + i += 1 + + return bins, his, std, kmean + + +def get_bin_edges(num_times, num_rois, mean_roi, max_cts): + """ + This will provide the normalized bin edges and bin centers for each + integration time. + Parameters + ---------- + num_times : int + number of integration times for XSVS + num_rois : int + number of ROI's + mean_roi : array + mean intensity of each ROI + shape (number of ROI's) + max_cts : int + maximum pixel counts + Returns + ------- + norm_bin_edges : array + normalized speckle count bin edges + shape (num_times, num_rois) + norm_bin_centers :array + normalized speckle count bin centers + shape (num_times, num_rois) + """ + norm_bin_edges = np.zeros((num_times, num_rois), dtype=object) + norm_bin_centers = np.zeros_like(norm_bin_edges) + + bin_edges = np.zeros((num_times, num_rois), dtype=object) + bin_centers = np.zeros_like(bin_edges) + + for i in range(num_times): + for j in range(num_rois): + bin_edges[i, j] = np.arange(max_cts * 2**i) + bin_centers[i, j] = bin_edges_to_centers(bin_edges[i, j]) + norm_bin_edges[i, j] = bin_edges[i, j] / (mean_roi[j] * 2**i) + norm_bin_centers[i, j] = bin_edges_to_centers(norm_bin_edges[i, j]) + + return bin_edges, bin_centers, norm_bin_edges, norm_bin_centers + + +################# +##for fit +################### + +from scipy import stats +from scipy.special import gamma, gammaln + +###########################3 +##Dev at Nov 18, 2016 +# + + +def nbinomres_old(p, hist, x, hist_err=None, N=1): + """residuals to leastsq() to fit normal chi-square""" + mu, M = p + Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M)) + err = (hist - Np) / np.sqrt(Np) + return err + + +def nbinomlog_old(p, hist, x, hist_err=None, N=1): + """Residuals for maximum likelihood fit to nbinom distribution. + Vary M (shape param) and mu (count rate) vary (using leastsq)""" + mu, M = p + mu = abs(mu) + M = abs(M) + w = np.where(hist > 0.0) + Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M)) + err = 2 * (Np - hist) + err = err[w] - 2 * hist[w] * np.log(Np[w] / hist[w]) # note: sum(Np-hist)==0 + return np.sqrt(np.abs(err)) + # return err + + +def nbinomlog1_old(p, hist, x, hist_err=None, N=1, mu=1): + """Residuals for maximum likelihood fit to nbinom distribution. + Vary M (shape param) but mu (count rate) fixed (using leastsq)""" + M = abs(p[0]) + w = np.where(hist > 0.0) + Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M)) + err = 2 * (Np - hist) + err[w] = err[w] - 2 * hist[w] * np.log(Np[w] / hist[w]) # note: sum(Np-hist)==0 + return np.sqrt(np.abs(err)) + + +def nbinomres(p, hist, x, hist_err=None, N=1): + """residuals to leastsq() to fit normal chi-square""" + mu, M = p + Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M)) + w = np.where(hist > 0.0) + if hist_err is None: + hist_err = np.ones_like(Np) + scale = np.sqrt(Np[w] / hist_err[w]) + # scale = 1 + err = (hist[w] - Np[w]) / scale + return err + + +########### +##Dev at Octo 12, 2017 + + +def nbinom(p, x, mu): + """Residuals for maximum likelihood fit to nbinom distribution. + Vary M (shape param) but mu (count rate) fixed (using leastsq)""" + M = abs(p[0]) + Np = st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M)) + return Np + + +def nbinomlog1(p, hist, x, N, mu): + """Residuals for maximum likelihood fit to nbinom distribution. + Vary M (shape param) but mu (count rate) fixed (using leastsq) + + p: fitting parameter, in this case is M, coherent mode number + hist: histogram of photon count for each bin (is a number not probablity) + x: photon count + N: total photons count in the statistics, ( probablity = hist / N ) + mu: average photon count for each bin + + """ + M = abs(p[0]) + w = np.where(hist > 0.0) + Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M)) + err = 2 * (Np[w] - hist[w]) + err = err - 2 * hist[w] * np.log(Np[w] / hist[w]) # note: sum(Np-hist)==0 + return np.sqrt(np.abs(err)) + + +def nbinomlog(p, hist, x, N): + """Residuals for maximum likelihood fit to nbinom distribution. + Vary M (shape param) and mu (count rate) vary (using leastsq)""" + mu, M = p + mu = abs(mu) + M = abs(M) + w = np.where(hist > 0.0) + Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M)) + err = 2 * (Np[w] - hist[w]) + err = err - 2 * hist[w] * np.log(Np[w] / hist[w]) # note: sum(Np-hist)==0 + return np.sqrt(np.abs(err)) + + +def get_roi(data, threshold=1e-3): + ind = np.where(data > threshold)[0] + if len(ind) > len(data) - 3: + ind = (np.array(ind[:-3]),) + elif len(ind) < 3: + ind = np.where(data >= 0)[0] + return ind + + +def get_xsvs_fit( + spe_cts_all, + spec_sum, + K_mean, + spec_std=None, + spec_bins=None, + lag_steps=None, + varyK=True, + qth=None, + max_bins=None, + min_cutoff_count=None, + g2=None, + times=None, + taus=None, + fit_range=None, +): + """ + Fit the xsvs by Negative Binomial Function using max-likelihood chi-squares + """ + + max_cts = spe_cts_all[0][0].shape[0] - 1 + num_times, num_rings = spe_cts_all.shape + if max_bins is not None: + num_times = min(num_times, max_bins) + if spec_bins is None: + # print( num_times, num_rings, K_mean[0], int(max_cts+2) ) + bin_edges, bin_centers, Knorm_bin_edges, Knorm_bin_centers = get_bin_edges( + num_times, num_rings, K_mean[0], int(max_cts + 2) + ) + + else: + bin_edges = spec_bins + if lag_steps is None: + print("Please give lag_steps") + lag_steps = [1, 2] + print("The lag_steps is changed to %s" % lag_steps) + lag_steps = np.array(lag_steps) + lag_steps = lag_steps[np.nonzero(lag_steps)] + + if g2 is not None: + g2c = g2.copy() + g2c[0] = g2[1] + ML_val = {} + KL_val = {} + # K_ = np.zeros_like( K_mean ) + K_ = [] + if qth is not None: + range_ = range(qth, qth + 1) + else: + range_ = range(num_rings) + for i in range_: + ML_val[i] = [] + KL_val[i] = [] + if g2 is not None: + mi_g2 = 1 / (g2c[:, i] - 1) + m_ = np.interp(times, taus, mi_g2) + for j in range(num_times): + kmean_guess = K_mean[j, i] + N = spec_sum[j, i] + if spec_bins is None: + x_, x, y = bin_edges[j, i][:-1], Knorm_bin_edges[j, i][:-1], spe_cts_all[j, i] + else: + x_, x, y = bin_edges[j], bin_edges[j] / kmean_guess, spe_cts_all[j, i] + + if spec_std is not None: + yerr = spec_std[j, i] + else: + yerr = None + if g2 is not None: + m0 = m_[j] + else: + m0 = 10 + if min_cutoff_count is not None: + ind = get_roi(y, threshold=min_cutoff_count) + y = y[ind] + x_ = x_[ind] + yerr = yerr[ind] + # print(y) + if fit_range is not None: + f1, f2 = fit_range + y, x_, yerr = y[f1:f2], x_[f1:f2], yerr[f1:f2] + + if not varyK: + fit_func = nbinomlog1 + # print(i,j,m0,) + # print(y,N, x_, kmean_guess) + resultL = leastsq( + fit_func, + [m0], + args=(y * N, x_, N, kmean_guess), + ftol=1.49012e-38, + xtol=1.49012e-38, + factor=100, + full_output=1, + ) + ML_val[i].append(abs(resultL[0][0])) + KL_val[i].append(kmean_guess) # resultL[0][0] ) + else: + # vary M and K + fit_func = nbinomlog + resultL = leastsq( + fit_func, + [kmean_guess, m0], + args=(y * N, x_, N), + ftol=1.49012e-38, + xtol=1.49012e-38, + factor=100, + full_output=1, + ) + + ML_val[i].append(abs(resultL[0][1])) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) + if j == 0: + K_.append(KL_val[i][0]) + return ML_val, KL_val, np.array(K_) + + +def plot_xsvs_fit( + spe_cts_all, + ML_val, + KL_val, + K_mean, + spec_std=None, + xlim=[0, 15], + vlim=[0.9, 1], + q_ring_center=None, + max_bins=None, + uid="uid", + qth=None, + times=None, + fontsize=3, + path=None, + logy=True, + spec_bins=None, + lag_steps=None, + figsize=[10, 10], +): + """ + Plot visibility with fit + if qth is not None, should be an integer, starting from 1 + """ + + # if qth is None: + # fig = plt.figure(figsize=(10,12)) + # else: + # fig = plt.figure(figsize=(8,8)) + + max_cts = spe_cts_all[0][0].shape[0] - 1 + num_times, num_rings = spe_cts_all.shape + + if spec_bins is None: + bin_edges, bin_centers, Knorm_bin_edges, Knorm_bin_centers = get_bin_edges( + num_times, num_rings, K_mean[0], int(max_cts + 2) + ) + + else: + bin_edges = spec_bins + if lag_steps is None: + # lag_steps = times[:num_times] + print("Please give lag_steps") + + lag_steps = np.array(lag_steps) + lag_steps = lag_steps[np.nonzero(lag_steps)] + + if qth is not None: + range_ = range(qth - 1, qth) + num_times = len(ML_val[qth - 1]) + else: + range_ = range(num_rings) + num_times = len(ML_val[0]) + # for i in range(num_rings): + + if max_bins is not None: + num_times = min(num_times, max_bins) + + sx = int(round(np.sqrt(len(range_)))) + + if len(range_) % sx == 0: + sy = int(len(range_) / sx) + else: + sy = int(len(range_) / sx + 1) + n = 1 + if qth is not None: + fontsize = 14 + + if qth is None: + fig = plt.figure(figsize=figsize) + else: + fig = plt.figure() + title = "%s" % uid + "-NB-Fit" + plt.title(title, fontsize=16, y=1.08) + # plt.axes(frameon=False) + flag = True + if (qth is None) and (num_rings != 1): + plt.axis("off") + plt.xticks([]) + plt.yticks([]) + flag = False + + for i in range_: + axes = fig.add_subplot(sx, sy, n) + axes.set_xlabel("K/") + axes.set_ylabel("P(K)") + n += 1 + for j in range(num_times): + kmean_guess = K_mean[j, i] + L = len(spe_cts_all[j, i]) + if spec_bins is None: + max_cts_ = max_cts * 2**j + x_, x, y = bin_edges[j, i][:L], Knorm_bin_edges[j, i][:L], spe_cts_all[j, i] + xscale = (x_ / x)[1] # bin_edges[j, i][:-1][1]/ Knorm_bin_edges[j, i][:-1][1] + # print( xscale ) + else: + max_cts_ = max_cts * lag_steps[j] + x_, x, y = bin_edges[j][:L], bin_edges[j][:L] / kmean_guess, spe_cts_all[j, i] + xscale = kmean_guess + # Using the best K and M values interpolate and get more values for fitting curve + + fitx = np.linspace(0, max_cts_, 5000) + fitx_ = fitx / xscale + + # print (j,i,kmean_guess, xscale, fitx.shape,KL_val[i][j], ML_val[i][j] ) + # fity = nbinom_dist( fitx, K_val[i][j], M_val[i][j] ) + fitL = nbinom_dist(fitx, KL_val[i][j], ML_val[i][j]) + + if qth is None: + ith = 0 + else: + ith = qth + + # print( i, ith, qth ) + if i == ith: + if times is not None: + label = "Data--" + str(round(times[j] * 1000, 3)) + " ms" + else: + label = "Bin_%s" % (2**j) + else: + if qth is not None: + if times is not None: + label = "Data--" + str(round(times[j] * 1000, 3)) + " ms" + else: + label = "Bin_%s" % (2**j) + else: + label = "" + + if spec_std is None: + (art,) = axes.plot(x, y, "o", label=label) + else: + yerr = spec_std[j][i] + # print(x.shape, y.shape, yerr.shape) + axes.errorbar(x, y, yerr, marker="o", label=label) + + # if j == 0: + if j < 2: + label = "nbinom_L" + txts = r"$M=%s$" % round(ML_val[i][j], 2) + "," + r"$K=%s$" % round(KL_val[i][j], 2) + # print( ML_val[i] ) + x = 0.05 + y0 = 0.2 - j * 0.1 + if qth is None: + fontsize_ = fontsize * 2 + else: + fontsize_ = 18 + axes.text(x=x, y=y0, s=txts, fontsize=fontsize_, transform=axes.transAxes) + else: + label = "" + (art,) = axes.plot(fitx_, fitL, "-r", label=label) + if logy: + axes.set_yscale("log") + ny = y[np.nonzero(y)] + axes.set_ylim(vlim[0] * ny.min(), vlim[1] * ny.max()) + + axes.set_xlim(xlim) + til = "Q=" + "%.4f " % (q_ring_center[i]) + r"$\AA^{-1}$" + if flag: + til = title + "--" + til + axes.set_title(til, fontsize=12, y=1.0) + axes.legend(loc="best", fontsize=fontsize, fancybox=True, framealpha=0.5) + + if qth is None: + file_name = "%s_xsvs_fit" % (uid) + else: + file_name = "%s_xsvs_fit_q=%s" % (uid, qth) + fp = path + file_name + ".png" + fig.tight_layout() + plt.savefig(fp, dpi=fig.dpi) + + # plt.show() + + +def get_contrast(ML_val): + nq, nt = len(ML_val.keys()), len(ML_val[list(ML_val.keys())[0]]) + contrast_factorL = np.zeros([nq, nt]) + for i in range(nq): + for j in range(nt): + contrast_factorL[i, j] = 1 / ML_val[i][j] + return contrast_factorL + + +def get_K(KL_val): + nq, nt = len(KL_val.keys()), len(KL_val[list(KL_val.keys())[0]]) + K_ = np.zeros([nq, nt]) + for i in range(nq): + for j in range(nt): + K_[i, j] = KL_val[i][j] + return K_ + + +def save_KM(K_mean, KL_val, ML_val, qs=None, level_time=None, uid=None, path=None): + """save Kmean, K_val, M_val as dataframe""" + + import os + + from pandas import DataFrame + + kl = get_K(KL_val) + ml = 1 / get_contrast(ML_val) + L, n = kl.shape + m2, m1 = K_mean.shape + # print(L,n,m2,m1) + if level_time is None: + l = ( + ["K_mean_%d" % i for i in range(m2)] + + ["K_fit_Bin_%i" % s for s in range(1, n + 1)] + + ["M_Fit_Bin_%i" % s for s in range(1, n + 1)] + + ["Contrast_Fit_Bin_%i" % s for s in range(1, n + 1)] + ) + else: + l = ( + ["K_mean_%s" % i for i in level_time] + + ["K_fit_%s" % s for s in level_time] + + ["M_Fit_%s" % s for s in level_time] + + ["Contrast_Fit_%s" % s for s in level_time] + ) + data = np.hstack([(K_mean).T, kl.reshape(L, n), ml.reshape(L, n), (1 / ml).reshape(L, n)]) + if qs is not None: + qs = np.array(qs) + l = ["q"] + l + # print( (K_mean).T, (K_mean).T.shape ) + # print( qs ) + data = np.hstack( + [qs.reshape(L, 1), (K_mean).T, kl.reshape(L, n), ml.reshape(L, n), (1 / ml).reshape(L, n)] + ) + + df = DataFrame(data) + df.columns = (x for x in l) + filename = "%s_xsvs_fitted_KM.csv" % (uid) + filename1 = os.path.join(path, filename) + print("The K-M values are saved as %s in %s." % (filename, path)) + df.to_csv(filename1) + return df + + +def get_his_std_from_pds(spec_pds, his_shapes=None): + """Y.G.Dec 22, 2016 + get spec_his, spec_std from a pandas.dataframe file + Parameters: + spec_pds: pandas.dataframe, contains columns as 'count', + spec_his (as 'his_level_0_q_0'), spec_std (as 'std_level_0_q_0') + his_shapes: the shape of the returned spec_his, if None, shapes = (2, (len(spec_pds.keys)-1)/4) ) + Return: + spec_his: array, shape as his_shapes + spec_std, array, shape as his_shapes + """ + spkeys = list(spec_pds.keys()) + if his_shapes is None: + M, N = 2, int((len(spkeys) - 1) / 4) + # print(M,N) + spec_his = np.zeros([M, N], dtype=np.object) + spec_std = np.zeros([M, N], dtype=np.object) + for i in range(M): + for j in range(N): + spec_his[i, j] = np.array(spec_pds[spkeys[1 + i * N + j]][~np.isnan(spec_pds[spkeys[1 + i * N + j]])]) + spec_std[i, j] = np.array( + spec_pds[spkeys[1 + 2 * N + i * N + j]][~np.isnan(spec_pds[spkeys[1 + 2 * N + i * N + j]])] + ) + return spec_his, spec_std + + +def plot_g2_contrast( + contrast_factorL, + g2, + times, + taus, + q_ring_center=None, + uid=None, + vlim=[0.8, 1.2], + qth=None, + path=None, + legend_size=16, + figsize=[10, 10], +): + nq, nt = contrast_factorL.shape + + if qth is not None: + range_ = range(qth, qth + 1) + else: + range_ = range(nq) + num_times = nt + nr = len(range_) + sx = int(round(np.sqrt(nr))) + if nr % sx == 0: + sy = int(nr / sx) + else: + sy = int(nr / sx + 1) + # fig = plt.figure(figsize=(14, 10)) + if qth is not None: + fig = plt.figure() + else: + if len(contrast_factorL) == 1: + fig = plt.figure() + else: + fig = plt.figure(figsize=figsize) + # print('here') + title = "%s_" % uid + "Contrast" + plt.title(title, fontsize=14, y=1.08) + if qth is None: + if len(contrast_factorL) != 1: + plt.axis("off") + + n = 1 + for sn in range_: + # print( sn ) + ax = fig.add_subplot(sx, sy, n) + n += 1 + yL = contrast_factorL[sn, :] + ax.semilogx(times[:nt], yL, "-bs", label="xsvs") + ylim = [yL.min() * vlim[0], yL.max() * vlim[1]] + if (g2 != []) and (g2 is not None): + g = g2[1:, sn] - 1 + ax.semilogx(taus[1:], g, "-rx", label="xpcs") + ylim = [g.min() * vlim[0], g.max() * vlim[1]] + # ax.semilogx([times[:nt][-1], taus[1:][0]], [yL[-1],g[0]], "--bs", label='') + til = " Q=" + "%.5f " % (q_ring_center[sn]) + r"$\AA^{-1}$" + if qth is not None: + til = title + til + ax.set_title(til) + # ym = np.mean( g ) + ax.set_ylim(ylim) + # if qth is not None:legend_size=12 + if n == 2: + ax.legend(loc="best", fontsize=legend_size) + + if qth is None: + file_name = "%s_contrast" % (uid) + else: + file_name = "%s_contrast_q=%s" % (uid, qth) + + fig.tight_layout() + fp = path + file_name + ".png" + plt.savefig(fp, dpi=fig.dpi) + + # plt.show() + + +def get_xsvs_fit_old(spe_cts_all, K_mean, varyK=True, qth=None, max_bins=2, g2=None, times=None, taus=None): + """ + Fit the xsvs by Negative Binomial Function using max-likelihood chi-squares + """ + + max_cts = spe_cts_all[0][0].shape[0] - 1 + num_times, num_rings = spe_cts_all.shape + if max_bins is not None: + num_times = min(num_times, max_bins) + + bin_edges, bin_centers, Knorm_bin_edges, Knorm_bin_centers = get_bin_edges( + num_times, num_rings, K_mean, int(max_cts + 2) + ) + + if g2 is not None: + g2c = g2.copy() + g2c[0] = g2[1] + ML_val = {} + KL_val = {} + K_ = [] + if qth is not None: + range_ = range(qth, qth + 1) + else: + range_ = range(num_rings) + for i in range_: + N = 1 + ML_val[i] = [] + KL_val[i] = [] + + if g2 is not None: + mi_g2 = 1 / (g2c[:, i] - 1) + m_ = np.interp(times, taus, mi_g2) + for j in range(num_times): + x_, x, y = bin_edges[j, i][:-1], Knorm_bin_edges[j, i][:-1], spe_cts_all[j, i] + if g2 is not None: + m0 = m_[j] + else: + m0 = 10 + # resultL = minimize(nbinom_lnlike, [K_mean[i] * 2**j, m0], args=(x_, y) ) + # the normal leastsq + # result_n = leastsq(nbinomres, [K_mean[i] * 2**j, m0], args=(y,x_,N),full_output=1) + # not vary K + if not varyK: + resultL = leastsq( + nbinomlog1, + [m0], + args=(y, x_, N, K_mean[i] * 2**j), + ftol=1.49012e-38, + xtol=1.49012e-38, + factor=100, + full_output=1, + ) + ML_val[i].append(abs(resultL[0][0])) + KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) + + else: + # vary M and K + resultL = leastsq( + nbinomlog, + [K_mean[i] * 2**j, m0], + args=(y, x_, N), + ftol=1.49012e-38, + xtol=1.49012e-38, + factor=100, + full_output=1, + ) + + ML_val[i].append(abs(resultL[0][1])) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) + if j == 0: + K_.append(KL_val[i][0]) + # if max_bins==2: + # ML_val = np.array( [ML_val[k][0] for k in sorted(list(ML_val.keys()))] ) + # KL_val = np.array( [KL_val[k][0] for k in sorted(list(KL_val.keys()))] ) + + return ML_val, KL_val, np.array(K_) + + +def gammaDist(x, params): + """Gamma distribution function + M,K = params, where K is average photon counts , + M is the number of coherent modes, + In case of high intensity, the beam behavors like wave and + the probability density of photon, P(x), satify this gamma function. + """ + + K, M = params + K = float(K) + M = float(M) + coeff = np.exp(M * np.log(M) + (M - 1) * np.log(x) - gammaln(M) - M * np.log(K)) + Gd = coeff * np.exp(-M * x / K) + return Gd + + +def gamma_dist(bin_values, K, M): + """ + Gamma distribution function + Parameters + ---------- + bin_values : array + scattering intensities + K : int + average number of photons + M : int + number of coherent modes + Returns + ------- + gamma_dist : array + Gamma distribution + Notes + ----- + These implementations are based on the references under + nbinom_distribution() function Notes + + : math :: + P(K) =(\frac{M}{})^M \frac{K^(M-1)}{\Gamma(M)}\exp(-M\frac{K}{}) + """ + + # gamma_dist = (stats.gamma(M, 0., K/M)).pdf(bin_values) + x = bin_values + coeff = np.exp(M * np.log(M) + (M - 1) * np.log(x) - gammaln(M) - M * np.log(K)) + gamma_dist = coeff * np.exp(-M * x / K) + return gamma_dist + + +def nbinom_dist(bin_values, K, M): + """ + Negative Binomial (Poisson-Gamma) distribution function + Parameters + ---------- + bin_values : array + scattering bin values + K : int + number of photons + M : int + number of coherent modes + Returns + ------- + nbinom : array + Negative Binomial (Poisson-Gamma) distribution function + Notes + ----- + The negative-binomial distribution function + :math :: + P(K) = \frac{\\Gamma(K + M)} {\\Gamma(K + 1) ||Gamma(M)}(\frac {M} {M + })^M (\frac {}{M + })^K + + These implementation is based on following references + + References: text [1]_ + .. [1] L. Li, P. Kwasniewski, D. Oris, L Wiegart, L. Cristofolini, + C. Carona and A. Fluerasu , "Photon statistics and speckle visibility + spectroscopy with partially coherent x-rays" J. Synchrotron Rad., + vol 21, p 1288-1295, 2014. + + """ + co_eff = np.exp(gammaln(bin_values + M) - gammaln(bin_values + 1) - gammaln(M)) + + nbinom = co_eff * np.power(M / (K + M), M) * np.power(K / (M + K), bin_values) + + return nbinom + + +#########poisson +def poisson(x, K): + """Poisson distribution function. + K is average photon counts + In case of low intensity, the beam behavors like particle and + the probability density of photon, P(x), satify this poisson function. + """ + K = float(K) + Pk = np.exp(-K) * power(K, x) / gamma(x + 1) + return Pk + + +def poisson_dist(bin_values, K): + """ + Poisson Distribution + Parameters + --------- + K : int + average counts of photons + bin_values : array + scattering bin values + Returns + ------- + poisson_dist : array + Poisson Distribution + Notes + ----- + These implementations are based on the references under + nbinom_distribution() function Notes + :math :: + P(K) = \frac{^K}{K!}\exp(-) + """ + # poisson_dist = stats.poisson.pmf(K, bin_values) + K = float(K) + poisson_dist = np.exp(-K) * np.power(K, bin_values) / gamma(bin_values + 1) + return poisson_dist + + +def diff_mot_con_factor(times, relaxation_rate, contrast_factor, cf_baseline=0): + """ + This will provide the speckle contrast factor of samples undergoing + a diffusive motion. + + Parameters + ---------- + times : array + integration times + + relaxation_rate : float + relaxation rate + + contrast_factor : float + contrast factor + + cf_baseline : float, optional + the baseline for the contrast factor + + Return + ------ + diff_contrast_factor : array + speckle contrast factor for samples undergoing a diffusive motion + + Notes + ----- + integration times more information - geometric_series function in + skxray.core.utils module + + These implementations are based on the references under + negative_binom_distribution() function Notes + + """ + co_eff = (np.exp(-2 * relaxation_rate * times) - 1 + 2 * relaxation_rate * times) / ( + 2 * (relaxation_rate * times) ** 2 + ) + + return contrast_factor * co_eff + cf_baseline + + +def plot_sxvs(Knorm_bin_edges, spe_cts_all, uid=None, q_ring_center=None, xlim=[0, 3.5], time_steps=None): + """a convinent function to plot sxvs results""" + num_rings = spe_cts_all.shape[1] + num_times = Knorm_bin_edges.shape[0] + sx = int(round(np.sqrt(num_rings))) + if num_rings % sx == 0: + sy = int(num_rings / sx) + else: + sy = int(num_rings / sx + 1) + fig = plt.figure(figsize=(10, 6)) + plt.title("uid= %s" % uid, fontsize=20, y=1.02) + plt.axes(frameon=False) + plt.xticks([]) + plt.yticks([]) + if time_steps is None: + time_steps = [2**i for i in range(num_times)] + for i in range(num_rings): + for j in range(num_times): + axes = fig.add_subplot(sx, sy, i + 1) + axes.set_xlabel("K/") + axes.set_ylabel("P(K)") + (art,) = axes.plot( + Knorm_bin_edges[j, i][:-1], spe_cts_all[j, i], "-o", label=str(time_steps[j]) + " ms" + ) + axes.set_xlim(xlim) + axes.set_title("Q " + "%.4f " % (q_ring_center[i]) + r"$\AA^{-1}$") + axes.legend(loc="best", fontsize=6) + # plt.show() + fig.tight_layout() + + +def fit_xsvs1( + Knorm_bin_edges, + bin_edges, + spe_cts_all, + K_mean=None, + func="bn", + threshold=1e-7, + uid=None, + q_ring_center=None, + xlim=[0, 3.5], + ylim=None, + time_steps=None, +): + """a convinent function to plot sxvs results + supporting fit function include: + 'bn': Negative Binomaial Distribution + 'gm': Gamma Distribution + 'ps': Poission Distribution + + """ + from lmfit import Model + from scipy.interpolate import UnivariateSpline + + if func == "bn": + mod = Model(nbinom_dist) + elif func == "gm": + mod = Model(gamma_dist, indepdent_vars=["K"]) + elif func == "ps": + mod = Model(poisson_dist) + else: + print("the current supporting function include 'bn', 'gm','ps'") + + # g_mod = Model(gamma_dist, indepdent_vars=['K']) + # g_mod = Model( gamma_dist ) + # n_mod = Model(nbinom_dist) + # p_mod = Model(poisson_dist) + # dc_mod = Model(diff_mot_con_factor) + + num_rings = spe_cts_all.shape[1] + num_times = Knorm_bin_edges.shape[0] + + M_val = {} + K_val = {} + sx = int(round(np.sqrt(num_rings))) + if num_rings % sx == 0: + sy = int(num_rings / sx) + else: + sy = int(num_rings / sx + 1) + fig = plt.figure(figsize=(10, 6)) + plt.title("uid= %s" % uid + " Fitting with Negative Binomial Function", fontsize=20, y=1.02) + plt.axes(frameon=False) + plt.xticks([]) + plt.yticks([]) + if time_steps is None: + time_steps = [2**i for i in range(num_times)] + + for i in range(num_rings): + M_val[i] = [] + K_val[i] = [] + for j in range(num_times): + # find the best values for K and M from fitting + if threshold is not None: + rois = get_roi(data=spe_cts_all[j, i], threshold=threshold) + else: + rois = range(len(spe_cts_all[j, i])) + + # print ( rois ) + if func == "bn": + result = mod.fit(spe_cts_all[j, i][rois], bin_values=bin_edges[j, i][:-1][rois], K=5 * 2**j, M=12) + elif func == "gm": + result = mod.fit( + spe_cts_all[j, i][rois], bin_values=bin_edges[j, i][:-1][rois], K=K_mean[i] * 2**j, M=20 + ) + elif func == "ps": + result = mod.fit( + spe_cts_all[j, i][rois], bin_values=bin_edges[j, i][:-1][rois], K=K_mean[i] * 2**j + ) + else: + pass + + if func == "bn": + K_val[i].append(result.best_values["K"]) + M_val[i].append(result.best_values["M"]) + elif func == "gm": + M_val[i].append(result.best_values["M"]) + elif func == "ps": + K_val[i].append(result.best_values["K"]) + else: + pass + + axes = fig.add_subplot(sx, sy, i + 1) + axes.set_xlabel("K/") + axes.set_ylabel("P(K)") + + # Using the best K and M values interpolate and get more values for fitting curve + fitx_ = np.linspace(0, max(Knorm_bin_edges[j, i][:-1]), 1000) + fitx = np.linspace(0, max(bin_edges[j, i][:-1]), 1000) + if func == "bn": + fity = nbinom_dist(fitx, K_val[i][j], M_val[i][j]) # M and K are fitted best values + label = "nbinom" + txt = "K=" + "%.3f" % (K_val[i][0]) + "," + "M=" + "%.3f" % (M_val[i][0]) + elif func == "gm": + fity = gamma_dist(fitx, K_mean[i] * 2**j, M_val[i][j]) + label = "gamma" + txt = "M=" + "%.3f" % (M_val[i][0]) + elif func == "ps": + fity = poisson_dist(fitx, K_val[i][j]) + label = "poisson" + txt = "K=" + "%.3f" % (K_val[i][0]) + else: + pass + + if j == 0: + (art,) = axes.plot(fitx_, fity, "-b", label=label) + else: + (art,) = axes.plot(fitx_, fity, "-b") + + if i == 0: + (art,) = axes.plot( + Knorm_bin_edges[j, i][:-1], spe_cts_all[j, i], "o", label=str(time_steps[j]) + " ms" + ) + else: + (art,) = axes.plot( + Knorm_bin_edges[j, i][:-1], + spe_cts_all[j, i], + "o", + ) + + axes.set_xlim(0, 3.5) + if ylim is not None: + axes.set_ylim(ylim) + # Annotate the best K and M values on the plot + + axes.annotate( + r"%s" % txt, + xy=(1, 0.25), + xycoords="axes fraction", + fontsize=10, + horizontalalignment="right", + verticalalignment="bottom", + ) + axes.set_title("Q " + "%.4f " % (q_ring_center[i]) + r"$\AA^{-1}$") + axes.legend(loc="best", fontsize=6) + # plt.show() + fig.tight_layout() + + return M_val, K_val + + +def plot_xsvs_g2(g2, taus, res_pargs=None, *argv, **kwargs): + """plot g2 results, + g2: one-time correlation function + taus: the time delays + res_pargs, a dict, can contains + uid/path/qr_center/qz_center/ + kwargs: can contains + vlim: [vmin,vmax]: for the plot limit of y, the y-limit will be [vmin * min(y), vmx*max(y)] + ylim/xlim: the limit of y and x + + e.g. + plot_gisaxs_g2( g2b, taus= np.arange( g2b.shape[0]) *timeperframe, q_ring_center = q_ring_center, vlim=[.99, 1.01] ) + + """ + + if res_pargs is not None: + uid = res_pargs["uid"] + path = res_pargs["path"] + q_ring_center = res_pargs["q_ring_center"] + + else: + if "uid" in kwargs.keys(): + uid = kwargs["uid"] + else: + uid = "uid" + + if "q_ring_center" in kwargs.keys(): + q_ring_center = kwargs["q_ring_center"] + else: + q_ring_center = np.arange(g2.shape[1]) + + if "path" in kwargs.keys(): + path = kwargs["path"] + else: + path = "" + + num_rings = g2.shape[1] + sx = int(round(np.sqrt(num_rings))) + if num_rings % sx == 0: + sy = int(num_rings / sx) + else: + sy = int(num_rings / sx + 1) + + # print (num_rings) + if num_rings != 1: + # fig = plt.figure(figsize=(14, 10)) + fig = plt.figure(figsize=(12, 10)) + plt.axis("off") + # plt.axes(frameon=False) + # print ('here') + plt.xticks([]) + plt.yticks([]) + + else: + fig = plt.figure(figsize=(8, 8)) + + plt.title("uid= %s" % uid, fontsize=20, y=1.06) + for i in range(num_rings): + ax = fig.add_subplot(sx, sy, i + 1) + ax.set_ylabel("beta") + ax.set_title(" Q= " + "%.5f " % (q_ring_center[i]) + r"$\AA^{-1}$") + y = g2[:, i] + # print (y) + ax.semilogx(taus, y, "-o", markersize=6) + # ax.set_ylim([min(y)*.95, max(y[1:])*1.05 ]) + if "ylim" in kwargs: + ax.set_ylim(kwargs["ylim"]) + elif "vlim" in kwargs: + vmin, vmax = kwargs["vlim"] + ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax]) + else: + pass + if "xlim" in kwargs: + ax.set_xlim(kwargs["xlim"]) + + dt = datetime.now() + CurTime = "%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) + + fp = path + "g2--uid=%s" % (uid) + CurTime + ".png" + fig.savefig(fp, dpi=fig.dpi) + fig.tight_layout() + # plt.show() + + +def get_xsvs_fit_old1( + spe_cts_all, K_mean, spec_std=None, varyK=True, qth=None, max_bins=None, g2=None, times=None, taus=None +): + """ + Fit the xsvs by Negative Binomial Function using max-likelihood chi-squares + """ + + max_cts = spe_cts_all[0][0].shape[0] - 1 + num_times, num_rings = spe_cts_all.shape + if max_bins is not None: + num_times = min(num_times, max_bins) + + bin_edges, bin_centers, Knorm_bin_edges, Knorm_bin_centers = get_bin_edges( + num_times, num_rings, K_mean, int(max_cts + 2) + ) + + if g2 is not None: + g2c = g2.copy() + g2c[0] = g2[1] + ML_val = {} + KL_val = {} + K_ = [] + if qth is not None: + range_ = range(qth, qth + 1) + else: + range_ = range(num_rings) + for i in range_: + N = 1 + ML_val[i] = [] + KL_val[i] = [] + + if g2 is not None: + mi_g2 = 1 / (g2c[:, i] - 1) + m_ = np.interp(times, taus, mi_g2) + for j in range(num_times): + x_, x, y = bin_edges[j, i][:-1], Knorm_bin_edges[j, i][:-1], spe_cts_all[j, i] + if spec_std is not None: + yerr = spec_std[j, i] + else: + yerr = None + if g2 is not None: + m0 = m_[j] + else: + m0 = 10 + # resultL = minimize(nbinom_lnlike, [K_mean[i] * 2**j, m0], args=(x_, y) ) + # the normal leastsq + # result_n = leastsq(nbinomres, [K_mean[i] * 2**j, m0], args=(y,x_,N),full_output=1) + # not vary K + + if not varyK: + if yerr is None: + fit_func = nbinomlog1 # _old + else: + fit_func = nbinomlog1 + # print(j,i,m0, y.shape, x_.shape, yerr.shape, N, K_mean[i] * 2**j) + resultL = leastsq( + fit_func, + [m0], + args=(y, x_, yerr, N, K_mean[i] * 2**j), + ftol=1.49012e-38, + xtol=1.49012e-38, + factor=100, + full_output=1, + ) + + ML_val[i].append(abs(resultL[0][0])) + KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) + + else: + # vary M and K + if yerr is None: + fit_func = nbinomlog # _old + else: + fit_func = nbinomlog + resultL = leastsq( + fit_func, + [K_mean[i] * 2**j, m0], + args=(y, x_, yerr, N), + ftol=1.49012e-38, + xtol=1.49012e-38, + factor=100, + full_output=1, + ) + + ML_val[i].append(abs(resultL[0][1])) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) + if j == 0: + K_.append(KL_val[i][0]) + + return ML_val, KL_val, np.array(K_) diff --git a/pyCHX/backups/pyCHX-backup/chx_xpcs_xsvs_jupyter_V1.py b/pyCHX/backups/pyCHX-backup/chx_xpcs_xsvs_jupyter_V1.py new file mode 100644 index 0000000..31ec64e --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/chx_xpcs_xsvs_jupyter_V1.py @@ -0,0 +1,2464 @@ +import pandas as pds + +from pyCHX.chx_libs import colors, markers +from pyCHX.chx_packages import * + +# from pyCHX.chx_generic_functions import get_short_long_labels_from_qval_dict +# RUN_GUI = False +# from pyCHX.chx_libs import markers + + +# from IPython import get_ipython +# ip = get_ipython() +# ip.run_line_magic( +# "run", "/nsls2/data/chx/shared/CHX_Software/packages/environment_management/chx_analysis_setup.ipynb" +# ) + + +def get_t_iqc_uids(uid_list, setup_pargs, slice_num=10, slice_width=1): + """Get Iq at different time edge (difined by slice_num and slice_width) for a list of uids + Input: + uid_list: list of string (uid) + setup_pargs: dict, for caculation of Iq, the key of this dict should include + 'center': beam center + 'dpix': pixel size + 'lambda_': X-ray wavelength + slice_num: slice number of the time edge + slice_edge: the width of the time_edge + Output: + qs: dict, with uid as key, with value as q values + iqsts:dict, with uid as key, with value as iq values + tstamp:dict, with uid as key, with value as time values + + """ + iqsts = {} + tstamp = {} + qs = {} + label = [] + for uid in uid_list: + md = get_meta_data(uid) + luid = md["uid"] + timeperframe = md["cam_acquire_period"] + N = md["cam_num_images"] + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % luid + good_start = 5 + FD = Multifile(filename, good_start, N) + Nimg = FD.end - FD.beg + time_edge = create_time_slice(Nimg, slice_num=slice_num, slice_width=slice_width, edges=None) + time_edge = np.array(time_edge) + good_start + # print( time_edge ) + tstamp[uid] = time_edge[:, 0] * timeperframe + qpt, iqsts[uid], qt = get_t_iqc(FD, time_edge, None, pargs=setup_pargs, nx=1500) + qs[uid] = qt + + return qs, iqsts, tstamp + + +def plot_t_iqtMq2(qt, iqst, tstamp, ax=None, perf=""): + """plot q2~Iq at differnt time""" + if ax is None: + fig, ax = plt.subplots() + q = qt + for i in range(iqst.shape[0]): + yi = iqst[i] * q**2 + time_labeli = perf + "time_%s s" % (round(tstamp[i], 3)) + plot1D( + x=q, + y=yi, + legend=time_labeli, + xlabel="Q (A-1)", + ylabel="I(q)*Q^2", + title="I(q)*Q^2 ~ time", + m=markers[i], + c=colors[i], + ax=ax, + ylim=[-0.001, 0.005], + ) # , xlim=[0.007,0.1] ) + + +def plot_t_iqc_uids(qs, iqsts, tstamps): + """plot q2~Iq at differnt time for a uid list""" + keys = list(qs.keys()) + fig, ax = plt.subplots() + for uid in keys: + qt = qs[uid] + iqst = iqsts[uid] + tstamp = tstamps[uid] + plot_t_iqtMq2(qt, iqst, tstamp, ax=ax, perf=uid + "_") + + +def plot_entries_from_csvlist( + csv_list, + uid_list, + inDir, + key="g2", + qth=1, + legend_size=8, + yshift=0.01, + ymulti=1, + xlim=None, + ylim=None, + uid_length=None, + legend=None, + fp_fulluid=True, +): + """ + YG Feb2, 2018, make yshift be also a list + + YG June 9, 2017@CHX + YG Sep 29, 2017@CHX. + plot enteries for a list csvs + Input: + csv_list: list, a list of uid (string) + inDir: string, imported folder for saved analysis results + key: string, plot entry, surport + 'g2' for one-time, + 'iq' for q~iq + 'mean_int_sets' for mean intensity of each roi as a function of frame + TODOLIST:#also can plot the following + dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', + 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', + 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', + 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) + qth: integer, the intesrest q number + yshift: float, values of shift in y direction + xlim: [x1,x2], for plot x limit + ylim: [y1,y2], for plot y limit + Output: + show the plot + Example: + uid_list = ['5492b9', '54c5e0'] + plot_entries_from_uids( uid_list, inDir, yshift = 0.01, key= 'g2', ylim=[1, 1.2]) + """ + + uid_dict = {} + fig, ax = plt.subplots() + for uid in uid_list: + if uid_length is not None: + uid_ = uid[:uid_length] + else: + uid_ = uid + # print(uid_) + uid_dict[uid_] = get_meta_data(uid)["uid"] + # for i, u in enumerate( list( uid_dict.keys() )): + + for i, fp in enumerate(list(csv_list)): + u = uid_list[i] # print(u) + inDiru = inDir + u + "/" + if fp_fulluid: + inDiru = inDir + uid_dict[u] + "/" + else: + inDiru = inDir + u + "/" + d = pds.read_csv(inDiru + fp) + # print(d) + + if key == "g2": + taus = d["tau"][1:] + col = d.columns[qth + 1] + # print( qth+1, col ) + y = d[col][1:] + if legend is None: + leg = u + else: + leg = "uid=%s-->" % u + legend[i] + if isinstance(yshift, list): + yshift_ = yshift[i] + ii = i + 1 + else: + yshift_ = yshift + ii = i + plot1D( + x=taus, + y=y + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=True, + legend=leg, + xlabel="t (sec)", + ylabel="g2", + legend_size=legend_size, + ) + title = "Q = %s" % (col) + ax.set_title(title) + elif key == "imgsum": + y = total_res[key] + plot1D( + y=d + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=False, + legend=u, + xlabel="Frame", + ylabel="imgsum", + ) + + elif key == "iq": + x = total_res["q_saxs"] + y = total_res["iq_saxs"] + plot1D( + x=x, + y=y * ymulti[i] + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=False, + logy=True, + legend=u, + xlabel="Q " r"($\AA^{-1}$)", + ylabel="I(q)", + ) + + else: + d = total_res[key][:, qth] + plot1D( + x=np.arange(len(d)), + y=d + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=False, + legend=u, + xlabel="xx", + ylabel=key, + ) + if key == "mean_int_sets": + ax.set_xlabel("frame ") + if xlim is not None: + ax.set_xlim(xlim) + if ylim is not None: + ax.set_ylim(ylim) + return fig, ax + + +def plot_entries_from_uids( + uid_list, + inDir, + key="g2", + qth=1, + legend_size=8, + yshift=0.01, + ymulti=1, + xlim=None, + ylim=None, + legend=None, + uid_length=None, + filename_list=None, + fp_fulluid=False, + fp_append=None, +): # ,title='' ): + """ + YG Feb2, 2018, make yshift be also a list + + YG June 9, 2017@CHX + YG Sep 29, 2017@CHX. + plot enteries for a list uids + Input: + uid_list: list, a list of uid (string) + inDir: string, imported folder for saved analysis results + key: string, plot entry, surport + 'g2' for one-time, + 'iq' for q~iq + 'mean_int_sets' for mean intensity of each roi as a function of frame + TODOLIST:#also can plot the following + dict_keys(['qt', 'imgsum', 'qval_dict_v', 'bad_frame_list', 'iqst', + 'times_roi', 'iq_saxs', 'g2', 'mask', 'g2_uids', 'taus_uids', + 'g2_fit_paras', 'mean_int_sets', 'roi_mask', 'qval_dict', 'taus', + 'pixel_mask', 'avg_img', 'qval_dict_p', 'q_saxs', 'md']) + qth: integer, the intesrest q number + yshift: float, values of shift in y direction + xlim: [x1,x2], for plot x limit + ylim: [y1,y2], for plot y limit + Output: + show the plot + Example: + uid_list = ['5492b9', '54c5e0'] + plot_entries_from_uids( uid_list, inDir, yshift = 0.01, key= 'g2', ylim=[1, 1.2]) + """ + + uid_dict = {} + fig, ax = plt.subplots() + for uid in uid_list: + if uid_length is not None: + uid_ = uid[:uid_length] + else: + uid_ = uid + # print(uid_) + uid_dict[uid_] = get_meta_data(uid)["uid"] + # for i, u in enumerate( list( uid_dict.keys() )): + for i, u in enumerate(list(uid_list)): + # print(u) + if isinstance(yshift, list): + yshift_ = yshift[i] + ii = i + 1 + else: + yshift_ = yshift + ii = i + if uid_length is not None: + u = u[:uid_length] + inDiru = inDir + u + "/" + if fp_fulluid: + inDiru = inDir + uid_dict[u] + "/" + else: + inDiru = inDir + u + "/" + if filename_list is None: + if fp_append is not None: + filename = "uid=%s%s_Res.h5" % (uid_dict[u], fp_append) + else: + filename = "uid=%s_Res.h5" % uid_dict[u] + else: + filename = filename_list[i] + total_res = extract_xpcs_results_from_h5(filename=filename, import_dir=inDiru, exclude_keys=["g12b"]) + if key == "g2": + d = total_res[key][1:, qth] + taus = total_res["taus"][1:] + if legend is None: + leg = u + else: + leg = "uid=%s-->" % u + legend[i] + plot1D( + x=taus, + y=d + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=True, + legend=leg, + xlabel="t (sec)", + ylabel="g2", + legend_size=legend_size, + ) + title = "Q = %s" % (total_res["qval_dict"][qth]) + ax.set_title(title) + elif key == "imgsum": + d = total_res[key] + plot1D( + y=d + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=False, + legend=u, + xlabel="Frame", + ylabel="imgsum", + ) + + elif key == "iq": + + x = total_res["q_saxs"] + y = total_res["iq_saxs"] + plot1D( + x=x, + y=y * ymulti[i] + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=False, + logy=True, + legend=u, + xlabel="Q " r"($\AA^{-1}$)", + ylabel="I(q)", + ) + + else: + d = total_res[key][:, qth] + plot1D( + x=np.arange(len(d)), + y=d + yshift_ * ii, + c=colors[i], + m=markers[i], + ax=ax, + logx=False, + legend=u, + xlabel="xx", + ylabel=key, + ) + if key == "mean_int_sets": + ax.set_xlabel("frame ") + if xlim is not None: + ax.set_xlim(xlim) + if ylim is not None: + ax.set_ylim(ylim) + return fig, ax + + +#################################################################################################### +##For real time analysis## +################################################################################################# + + +def get_iq_from_uids(uids, mask, setup_pargs): + """Y.G. developed July 17, 2017 @CHX + Get q-Iq of a uids dict, each uid could corrrespond one frame or a time seriers + uids: dict, val: meaningful decription, key: a list of uids + mask: bool-type 2D array + setup_pargs: dict, at least should contains, the following paramters for calculation of I(q) + + 'Ldet': 4917.50495, + 'center': [988, 1120], + 'dpix': 0.075000003562308848, + 'exposuretime': 0.99998999, + 'lambda_': 1.2845441, + 'path': '/XF11ID/analysis/2017_2/yuzhang/Results/Yang_Pressure/', + + """ + Nuid = len(np.concatenate(np.array(list(uids.values())))) + label = np.zeros([Nuid + 1], dtype=object) + img_data = {} # np.zeros( [ Nuid, avg_img.shape[0], avg_img.shape[1]]) + + n = 0 + for k in list(uids.keys()): + for uid in uids[k]: + + uidstr = "uid=%s" % uid + sud = get_sid_filenames(db[uid]) + # print(sud) + md = get_meta_data(uid) + imgs = load_data(uid, md["detector"], reverse=True) + md.update(imgs.md) + Nimg = len(imgs) + if Nimg != 1: + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % sud[1] + mask0, avg_img, imgsum, bad_frame_list = compress_eigerdata( + imgs, + mask, + md, + filename, + force_compress=False, + para_compress=True, + bad_pixel_threshold=1e14, + bins=1, + num_sub=100, + num_max_para_process=500, + with_pickle=True, + ) + else: + avg_img = imgs[0] + show_img( + avg_img, + vmin=0.00001, + vmax=1e1, + logs=True, + aspect=1, # save_format='tif', + image_name=uidstr + "_img_avg", + save=True, + path=setup_pargs["path"], + cmap=cmap_albula, + ) + + setup_pargs["uid"] = uidstr + + qp_saxs, iq_saxs, q_saxs = get_circular_average(avg_img, mask, pargs=setup_pargs, save=True) + if n == 0: + iqs = np.zeros([len(q_saxs), Nuid + 1]) + iqs[:, 0] = q_saxs + label[0] = "q" + img_data[k + "_" + uid] = avg_img + iqs[:, n + 1] = iq_saxs + label[n + 1] = k + "_" + uid + n += 1 + plot_circular_average( + qp_saxs, + iq_saxs, + q_saxs, + pargs=setup_pargs, + xlim=[q_saxs.min(), q_saxs.max() * 0.9], + ylim=[iq_saxs.min(), iq_saxs.max()], + ) + if "filename" in list(setup_pargs.keys()): + filename = setup_pargs["filename"] + else: + filename = "qIq.csv" + pd = save_arrays(iqs, label=label, dtype="array", filename=filename, path=setup_pargs["path"], return_res=True) + return pd, img_data + + +def wait_func(wait_time=2): + print("Waiting %s secdons for upcoming data..." % wait_time) + time.sleep(wait_time) + # print( 'Starting to do something here...') + + +def wait_data_acquistion_finish(uid, wait_time=2, max_try_num=3): + """check the completion of a data uid acquistion + Parameter: + uid: + wait_time: the waiting step in unit of second + check_func: the function to check the completion + max_try_num: the maximum number for waiting + Return: + True: completion + False: not completion (include waiting time exceeds the max_wait_time) + + """ + FINISH = False + Fake_FINISH = True + w = 0 + sleep_time = 0 + while not FINISH: + try: + get_meta_data(uid) + FINISH = True + print("The data acquistion finished.") + print("Starting to do something here...") + except: + wait_func(wait_time=wait_time) + w += 1 + print("Try number: %s" % w) + if w > max_try_num: + print("There could be something going wrong with data acquistion.") + print("Force to terminate after %s tries." % w) + FINISH = True + Fake_FINISH = False + sleep_time += wait_time + return FINISH * Fake_FINISH # , sleep_time + + +def get_uids_by_range(start_uidth=-1, end_uidth=0): + """Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end uid number, i.e. -10, -1 + Return: + uids: list, uid with 8 character length + fuids: list, uid with full length + + """ + hdrs = list([db[n] for n in range(start_uidth, end_uidth)]) + if len(hdrs) != 0: + print("Totally %s uids are found." % (len(hdrs))) + + uids = [] # short uid + fuids = [] # full uid + for hdr in hdrs: + fuid = hdr["start"]["uid"] + uids.append(fuid[:8]) + fuids.append(fuid) + uids = uids[::-1] + fuids = fuids[::-1] + return np.array(uids), np.array(fuids) + + +def get_uids_in_time_period(start_time, stop_time): + """Y.G. Dec 22, 2016 + A wrap funciton to find uids by giving start and end time + Return: + uids: list, uid with 8 character length + fuids: list, uid with full length + + """ + hdrs = list(db(start_time=start_time, stop_time=stop_time)) + if len(hdrs) != 0: + print("Totally %s uids are found." % (len(hdrs))) + + uids = [] # short uid + fuids = [] # full uid + for hdr in hdrs: + fuid = hdr["start"]["uid"] + uids.append(fuid[:8]) + fuids.append(fuid) + uids = uids[::-1] + fuids = fuids[::-1] + return np.array(uids), np.array(fuids) + + +def do_compress_on_line(start_time, stop_time, mask_dict=None, mask=None, wait_time=2, max_try_num=3): + """Y.G. Mar 10, 2017 + Do on-line compress by giving start time and stop time + Parameters: + mask_dict: a dict, e.g., {mask1: mask_array1, mask2:mask_array2} + wait_time: search interval time + max_try_num: for each found uid, will try max_try_num*wait_time seconds + Return: + running time + """ + + t0 = time.time() + uids, fuids = get_uids_in_time_period(start_time, stop_time) + print(fuids) + if len(fuids): + for uid in fuids: + print("*" * 50) + print("Do compress for %s now..." % uid) + if db[uid]["start"]["plan_name"] == "count": + finish = wait_data_acquistion_finish(uid, wait_time, max_try_num) + if finish: + try: + md = get_meta_data(uid) + compress_multi_uids( + [uid], + mask=mask, + mask_dict=mask_dict, + force_compress=False, + para_compress=True, + bin_frame_number=1, + ) + + update_olog_uid(uid=md["uid"], text="Data are on-line sparsified!", attachments=None) + except: + print("There are something wrong with this data: %s..." % uid) + print("*" * 50) + return time.time() - t0 + + +def realtime_xpcs_analysis( + start_time, stop_time, run_pargs, md_update=None, wait_time=2, max_try_num=3, emulation=False, clear_plot=False +): + """Y.G. Mar 10, 2017 + Do on-line xpcs by giving start time and stop time + Parameters: + run_pargs: all the run control parameters, including giving roi_mask + md_update: if not None, a dict, will update all the found uid metadata by this md_update + e.g, + md['beam_center_x'] = 1012 + md['beam_center_y']= 1020 + md['det_distance']= 16718.0 + wait_time: search interval time + max_try_num: for each found uid, will try max_try_num*wait_time seconds + emulation: if True, it will only check dataset and not do real analysis + Return: + running time + """ + + t0 = time.time() + uids, fuids = get_uids_in_time_period(start_time, stop_time) + # print( fuids ) + if len(fuids): + for uid in fuids: + print("*" * 50) + # print('Do compress for %s now...'%uid) + print("Starting analysis for %s now..." % uid) + if db[uid]["start"]["plan_name"] == "count" or db[uid]["start"]["plan_name"] == "manual_count": + # if db[uid]['start']['dtype'] =='xpcs': + finish = wait_data_acquistion_finish(uid, wait_time, max_try_num) + if finish: + try: + md = get_meta_data(uid) + ##corect some metadata + if md_update is not None: + md.update(md_update) + # if 'username' in list(md.keys()): + # try: + # md_cor['username'] = md_update['username'] + # except: + # md_cor = None + # uid = uid[:8] + # print(md_cor) + if not emulation: + # suid=uid[:6] + run_xpcs_xsvs_single( + uid, run_pargs=run_pargs, md_cor=None, return_res=False, clear_plot=clear_plot + ) + # update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) + except: + print("There are something wrong with this data: %s..." % uid) + else: + print("\nThis is not a XPCS series. We will simiply ignore it.") + print("*" * 50) + + # print( 'Sleep 10 sec here!!!') + # time.sleep(10) + + return time.time() - t0 + + +#################################################################################################### +##compress multi uids, sequential compress for uids, but for each uid, can apply parallel compress## +################################################################################################# +def compress_multi_uids( + uids, + mask, + mask_dict=None, + force_compress=False, + para_compress=True, + bin_frame_number=1, + reverse=True, + rot90=False, + use_local_disk=True, +): + """Compress time series data for a set of uids + Parameters: + uids: list, a list of uid + mask: bool array, mask array + force_compress: default is False, just load the compresssed data; + if True, will compress it to overwrite the old compressed data + para_compress: apply the parallel compress algorithm + bin_frame_number: + Return: + None, save the compressed data in, by default, /XF11ID/analysis/Compressed_Data with filename as + '/uid_%s.cmp' uid is the full uid string + + e.g., compress_multi_uids( uids, mask, force_compress= False, bin_frame_number=1 ) + + """ + for uid in uids: + print("UID: %s is in processing..." % uid) + if validate_uid(uid): + md = get_meta_data(uid) + imgs = load_data(uid, md["detector"], reverse=reverse, rot90=rot90) + sud = get_sid_filenames(db[uid]) + for pa in sud[2]: + if "master.h5" in pa: + data_fullpath = pa + print(imgs, data_fullpath) + if mask_dict is not None: + mask = mask_dict[md["detector"]] + print("The detecotr is: %s" % md["detector"]) + md.update(imgs.md) + if not use_local_disk: + cmp_path = "/nsls2/xf11id1/analysis/Compressed_Data" + else: + cmp_path = "/tmp_data/compressed" + cmp_path = "/nsls2/xf11id1/analysis/Compressed_Data" + if bin_frame_number == 1: + cmp_file = "/uid_%s.cmp" % md["uid"] + else: + cmp_file = "/uid_%s_bined--%s.cmp" % (md["uid"], bin_frame_number) + filename = cmp_path + cmp_file + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata( + imgs, + mask, + md, + filename, + force_compress=force_compress, + para_compress=para_compress, + bad_pixel_threshold=1e14, + reverse=reverse, + rot90=rot90, + bins=bin_frame_number, + num_sub=100, + num_max_para_process=500, + with_pickle=True, + direct_load_data=use_local_disk, + data_path=data_fullpath, + ) + + print("Done!") + + +#################################################################################################### +##get_two_time_mulit_uids, sequential cal for uids, but apply parallel for each uid ## +################################################################################################# + + +def get_two_time_mulit_uids( + uids, + roi_mask, + norm=None, + bin_frame_number=1, + path=None, + force_generate=False, + md=None, + imgs=None, + direct_load_data=False, + compress_path=None, +): + """Calculate two time correlation by using auto_two_Arrayc func for a set of uids, + if the two-time resutls are already created, by default (force_generate=False), just pass + Parameters: + uids: list, a list of uid + roi_mask: bool array, roi mask array + norm: the normalization array + path: string, where to save the two time + force_generate: default, False, if the two-time resutls are already created, just pass + if True, will force to calculate two-time no matter exist or not + + Return: + None, save the two-time in as path + uid + 'uid=%s_g12b'%uid + + e.g., + get_two_time_mulit_uids( guids, roi_mask, norm= norm,bin_frame_number=1, + path= data_dir,force_generate=False ) + + """ + + qind, pixelist = roi.extract_label_indices(roi_mask) + for uid in uids: + print("UID: %s is in processing..." % uid) + if not direct_load_data: + md = get_meta_data(uid) + imgs = load_data(uid, md["detector"], reverse=True) + else: + pass + N = len(imgs) + # print( N ) + if compress_path is None: + compress_path = "/XF11ID/analysis/Compressed_Data/" + if bin_frame_number == 1: + filename = "%s" % compress_path + "uid_%s.cmp" % md["uid"] + else: + filename = "%s" % compress_path + "uid_%s_bined--%s.cmp" % (md["uid"], bin_frame_number) + + FD = Multifile(filename, 0, N // bin_frame_number) + # print( FD.beg, FD.end) + uid_ = md["uid"] + os.makedirs(path + uid_ + "/", exist_ok=True) + filename = path + uid_ + "/" + "uid=%s_g12b" % uid + doit = True + if not force_generate: + if os.path.exists(filename + ".npy"): + doit = False + print("The two time correlation function for uid=%s is already calculated. Just pass..." % uid) + if doit: + data_pixel = Get_Pixel_Arrayc(FD, pixelist, norm=norm).get_data() + g12b = auto_two_Arrayc(data_pixel, roi_mask, index=None) + np.save(filename, g12b) + del g12b + print("The two time correlation function for uid={} is saved as {}.".format(uid, filename)) + + +def get_series_g2_from_g12( + g12b, fra_num_by_dose=None, dose_label=None, good_start=0, log_taus=True, num_bufs=8, time_step=1 +): + """ + Get a series of one-time function from two-time by giving noframes + Parameters: + g12b: a two time function + good_start: the start frame number + fra_num_by_dose: a list, correlation number starting from index 0, + if this number is larger than g12b length, will give a warning message, and + will use g12b length to replace this number + by default is None, will = [ g12b.shape[0] ] + dose_label: the label of each dose, also is the keys of returned g2, lag + log_taus: if true, will only return a g2 with the correponding tau values + as calculated by multi-tau defined taus + Return: + + g2_series, a dict, with keys as dose_label (corrected on if warning message is given) + lag_steps, the corresponding lags + + """ + g2 = {} + lag_steps = {} + L, L, qs = g12b.shape + if fra_num_by_dose is None: + fra_num_by_dose = [L] + if dose_label is None: + dose_label = fra_num_by_dose + fra_num_by_dose = sorted(fra_num_by_dose) + dose_label = sorted(dose_label) + for i, good_end in enumerate(fra_num_by_dose): + key = round(dose_label[i], 3) + # print( good_end ) + if good_end > L: + warnings.warn( + "Warning: the dose value is too large, and please check the maxium dose in this data set and give a smaller dose value. We will use the maxium dose of the data." + ) + good_end = L + if not log_taus: + g2[key] = get_one_time_from_two_time(g12b[good_start:good_end, good_start:good_end, :]) + else: + # print( good_end, num_bufs ) + lag_step = get_multi_tau_lag_steps(good_end, num_bufs) + lag_step = lag_step[lag_step < good_end - good_start] + # print( len(lag_steps ) ) + lag_steps[key] = lag_step * time_step + g2[key] = get_one_time_from_two_time(g12b[good_start:good_end, good_start:good_end, :])[lag_step] + + return lag_steps, g2 + + +def get_fra_num_by_dose(exp_dose, exp_time, att=1, dead_time=2): + """ + Calculate the frame number to be correlated by giving a X-ray exposure dose + + Paramters: + exp_dose: a list, the exposed dose, e.g., in unit of exp_time(ms)*N(fram num)*att( attenuation) + exp_time: float, the exposure time for a xpcs time sereies + dead_time: dead time for the fast shutter reponse time, CHX = 2ms + Return: + noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) + e.g., + + no_dose_fra = get_fra_num_by_dose( exp_dose = [ 3.34* 20, 3.34*50, 3.34*100, 3.34*502, 3.34*505 ], + exp_time = 1.34, dead_time = 2) + + --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) + """ + return np.int_(np.array(exp_dose) / (exp_time + dead_time) / att) + + +def get_series_one_time_mulit_uids( + uids, + qval_dict, + trans=None, + good_start=0, + path=None, + exposure_dose=None, + dead_time=0, + num_bufs=8, + save_g2=True, + md=None, + imgs=None, + direct_load_data=False, +): + """Calculate a dose depedent series of one time correlations from two time + Parameters: + uids: list, a list of uid + trans: list, same length as uids, the transmission list + exposure_dose: list, a list x-ray exposure dose; + by default is None, namely, = [ max_frame_number ], + can be [3.34 334, 3340] in unit of ms, in unit of exp_time(ms)*N(fram num)*att( attenuation) + path: string, where to load the two time, if None, ask for it + the real g12 path is two_time_path + uid + '/' + qval_dict: the dictionary for q values + Return: + taus_uids, with keys as uid, and + taus_uids[uid] is also a dict, with keys as dose_frame + g2_uids, with keys as uid, and + g2_uids[uid] is also a dict, with keys as dose_frame + will also save g2 results to the 'path' + """ + + if path is None: + print("Please calculate two time function first by using get_two_time_mulit_uids function.") + else: + taus_uids = {} + g2_uids = {} + for i, uid in enumerate(uids): + print("UID: %s is in processing..." % uid) + if not direct_load_data: + md = get_meta_data(uid) + imgs = load_data(uid, md["detector"], reverse=True) + # print(md) + detectors = md["detector"] + if isinstance(detectors, list): + if len(detectors) > 1: + if "_image" in md["detector"]: + pref = md["detector"][:-5] + else: + pref = md["detector"] + for k in [ + "beam_center_x", + "beam_center_y", + "cam_acquire_time", + "cam_acquire_period", + "cam_num_images", + "wavelength", + "det_distance", + "photon_energy", + ]: + md[k] = md[pref + "%s" % k] + + else: + pass + N = len(imgs) + if exposure_dose is None: + exposure_dose = [N] + try: + g2_path = path + uid + "/" + g12b = np.load(g2_path + "uid=%s_g12b.npy" % uid) + except: + g2_path = path + md["uid"] + "/" + g12b = np.load(g2_path + "uid=%s_g12b.npy" % uid) + try: + exp_time = float(md["cam_acquire_time"]) # *1000 #from second to ms + except: + exp_time = float(md["exposure time"]) # * 1000 #from second to ms + if trans is None: + try: + transi = md["transmission"] + except: + transi = [1] + else: + transi = trans[i] + fra_num_by_dose = get_fra_num_by_dose( + exp_dose=exposure_dose, exp_time=exp_time, dead_time=dead_time, att=transi + ) + + print("uid: %s--> fra_num_by_dose: %s" % (uid, fra_num_by_dose)) + + taus_uid, g2_uid = get_series_g2_from_g12( + g12b, + fra_num_by_dose=fra_num_by_dose, + dose_label=exposure_dose, + good_start=good_start, + num_bufs=num_bufs, + time_step=exp_time, + ) # md['cam_acquire_period'] ) + g2_uids["uid_%03d=%s" % (i, uid)] = g2_uid + taus_uids["uid_%03d=%s" % (i, uid)] = taus_uid + if save_g2: + for k in list(g2_uid.keys()): + # print(k) + uid_ = uid + "_fra_%s_%s" % (good_start, k) + save_g2_general( + g2_uid[k], + taus=taus_uid[k], + qr=np.array(list(qval_dict.values()))[:, 0], + uid=uid_ + "_g2.csv", + path=g2_path, + return_res=False, + ) + return taus_uids, g2_uids + + +def plot_dose_g2( + taus_uids, + g2_uids, + qval_dict, + qth_interest=None, + ylim=[0.95, 1.05], + vshift=0.1, + fit_res=None, + geometry="saxs", + filename="dose" + "_g2", + legend_size=None, + path=None, + function=None, + g2_labels=None, + ylabel="g2_dose", + append_name="_dose", + return_fig=False, +): + """Plot a does-dependent g2 + taus_uids, dict, with format as {uid1: { dose1: tau_1, dose2: tau_2...}, uid2: ...} + g2_uids, dict, with format as {uid1: { dose1: g2_1, dose2: g2_2...}, uid2: ...} + qval_dict: a dict of qvals + vshift: float, vertical shift value of different dose of g2 + + """ + + uids = sorted(list(taus_uids.keys())) + # print( uids ) + dose = sorted(list(taus_uids[uids[0]].keys())) + if qth_interest is None: + g2_dict = {} + taus_dict = {} + if g2_labels is None: + g2_labels = [] + for i in range(len(dose)): + g2_dict[i + 1] = [] + taus_dict[i + 1] = [] + # print ( i ) + for j in range(len(uids)): + # print( uids[i] , dose[j]) + g2_dict[i + 1].append(g2_uids[uids[j]][dose[i]] + vshift * i) + taus_dict[i + 1].append(taus_uids[uids[j]][dose[i]]) + if j == 0: + g2_labels.append("Dose_%s" % dose[i]) + + plot_g2_general( + g2_dict, + taus_dict, + ylim=[ylim[0], ylim[1] + vshift * len(dose)], + qval_dict=qval_dict, + fit_res=None, + geometry=geometry, + filename=filename, + path=path, + function=function, + ylabel=ylabel, + g2_labels=g2_labels, + append_name=append_name, + ) + + else: + fig, ax = plt.subplots() + q = qval_dict[qth_interest - 1][0] + j = 0 + for uid in uids: + # uid = uids[0] + # print( uid ) + dose_list = sorted(list(taus_uids["%s" % uid].keys())) + # print( dose_list ) + for i, dose in enumerate(dose_list): + dose = float(dose) + if j == 0: + legend = "dose_%s" % round(dose, 2) + else: + legend = "" + + # print( markers[i], colors[i] ) + + plot1D( + x=taus_uids["%s" % uid][dose_list[i]], + y=g2_uids["%s" % uid][dose_list[i]][:, qth_interest] + i * vshift, + logx=True, + ax=ax, + legend=legend, + m=markers[i], + c=colors[i], + lw=3, + title="%s_Q=%s" % (uid, q) + r"$\AA^{-1}$", + legend_size=legend_size, + ) + ylabel = "g2--Dose (trans*exptime_sec)" + j += 1 + + ax.set_ylabel(r"$%s$" % ylabel + "(" + r"$\tau$" + ")") + ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16) + ax.set_ylim(ylim) + if return_fig: + return fig, ax + # return taus_dict, g2_dict + + +def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse=True, clear_plot=False): + """Y.G. Dec 22, 2016 + Run XPCS XSVS analysis for a single uid + Parameters: + uid: unique id + run_pargs: dict, control run type and setup parameters, such as q range et.al. + reverse:,True, revserse the image upside down + Return: + save analysis result to csv/png/h5 files + return_res: if true, return a dict, containing g2,g4,g12,contrast et.al. depending on the run type + An example for the run_pargs: + + run_pargs= dict( + scat_geometry = 'gi_saxs' #suport 'saxs', 'gi_saxs', 'ang_saxs' (for anisotropics saxs or flow-xpcs) + force_compress = True,#False, + para_compress = True, + run_fit_form = False, + run_waterfall = True,#False, + run_t_ROI_Inten = True, + #run_fit_g2 = True, + fit_g2_func = 'stretched', + run_one_time = True,#False, + run_two_time = True,#False, + run_four_time = False, + run_xsvs=True, + att_pdf_report = True, + show_plot = False, + + CYCLE = '2016_3', + mask_path = '/XF11ID/analysis/2016_3/masks/', + mask_name = 'Nov28_4M_SAXS_mask.npy', + good_start = 5, + + uniformq = True, + inner_radius= 0.005, #0.005 for 50 nm, 0.006, #for 10nm/coralpor + outer_radius = 0.04, #0.04 for 50 nm, 0.05, #for 10nm/coralpor + num_rings = 12, + gap_ring_number = 6, + number_rings= 1, + #qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 + #width = 0.0002 + qth_interest = 1, #the intested single qth + use_sqnorm = False, + use_imgsum_norm = True, + + pdf_version = '_1' #for pdf report name + ) + + md_cor: if not None, will update the metadata with md_cor + + """ + + scat_geometry = run_pargs["scat_geometry"] + force_compress = run_pargs["force_compress"] + para_compress = run_pargs["para_compress"] + run_fit_form = run_pargs["run_fit_form"] + run_waterfall = run_pargs["run_waterfall"] + run_t_ROI_Inten = run_pargs["run_t_ROI_Inten"] + + # run_fit_g2 = run_pargs['run_fit_g2'], + fit_g2_func = run_pargs["fit_g2_func"] + run_one_time = run_pargs["run_one_time"] + run_two_time = run_pargs["run_two_time"] + run_four_time = run_pargs["run_four_time"] + run_xsvs = run_pargs["run_xsvs"] + try: + run_dose = run_pargs["run_dose"] + except: + run_dose = False + ############################################################### + if scat_geometry == "gi_saxs": # to be done for other types + run_xsvs = False + ############################################################### + + ############################################################### + if scat_geometry == "ang_saxs": + run_xsvs = False + run_waterfall = False + run_two_time = False + run_four_time = False + run_t_ROI_Inten = False + ############################################################### + if "bin_frame" in list(run_pargs.keys()): + bin_frame = run_pargs["bin_frame"] + bin_frame_number = run_pargs["bin_frame_number"] + else: + bin_frame = False + if not bin_frame: + bin_frame_number = 1 + + att_pdf_report = run_pargs["att_pdf_report"] + show_plot = run_pargs["show_plot"] + CYCLE = run_pargs["CYCLE"] + mask_path = run_pargs["mask_path"] + mask_name = run_pargs["mask_name"] + good_start = run_pargs["good_start"] + use_imgsum_norm = run_pargs["use_imgsum_norm"] + try: + use_sqnorm = run_pargs["use_sqnorm"] + except: + use_sqnorm = False + try: + inc_x0 = run_pargs["inc_x0"] + inc_y0 = run_pargs["inc_y0"] + except: + inc_x0 = None + inc_y0 = None + + # for different scattering geogmetry, we only need to change roi_mask + # and qval_dict + qval_dict = run_pargs["qval_dict"] + if scat_geometry != "ang_saxs": + roi_mask = run_pargs["roi_mask"] + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + + else: + roi_mask_p = run_pargs["roi_mask_p"] + qval_dict_p = run_pargs["qval_dict_p"] + roi_mask_v = run_pargs["roi_mask_v"] + qval_dict_v = run_pargs["qval_dict_v"] + + if scat_geometry == "gi_saxs": + refl_x0 = run_pargs["refl_x0"] + refl_y0 = run_pargs["refl_y0"] + Qr, Qz, qr_map, qz_map = run_pargs["Qr"], run_pargs["Qz"], run_pargs["qr_map"], run_pargs["qz_map"] + + taus = None + g2 = None + tausb = None + g2b = None + g12b = None + taus4 = None + g4 = None + times_xsv = None + contrast_factorL = None + qth_interest = run_pargs["qth_interest"] + pdf_version = run_pargs["pdf_version"] + + try: + username = run_pargs["username"] + except: + username = getpass.getuser() + + data_dir0 = os.path.join("/XF11ID/analysis/", CYCLE, username, "Results/") + os.makedirs(data_dir0, exist_ok=True) + print("Results from this analysis will be stashed in the directory %s" % data_dir0) + # uid = (sys.argv)[1] + print("*" * 40) + print("*" * 5 + "The processing uid is: %s" % uid + "*" * 5) + print("*" * 40) + suid = uid # [:6] + data_dir = os.path.join(data_dir0, "%s/" % suid) + os.makedirs(data_dir, exist_ok=True) + print("Results from this analysis will be stashed in the directory %s" % data_dir) + md = get_meta_data(uid) + uidstr = "uid=%s" % uid[:6] + imgs = load_data(uid, md["detector"], reverse=reverse) + md.update(imgs.md) + Nimg = len(imgs) + if md_cor is not None: + md.update(md_cor) + + if inc_x0 is not None: + md["beam_center_x"] = inc_x0 + if inc_y0 is not None: + md["beam_center_y"] = inc_y0 + + # print( run_pargs ) + # print( run_pargs['inc_x0'],run_pargs['inc_y0'] ) + # print( inc_x0, inc_y0 ) + + if md["detector"] == "eiger1m_single_image": + Chip_Mask = np.load("/XF11ID/analysis/2017_1/masks/Eiger1M_Chip_Mask.npy") + elif md["detector"] == "eiger4m_single_image" or md["detector"] == "image": + Chip_Mask = np.array(np.load("/XF11ID/analysis/2017_1/masks/Eiger4M_chip_mask.npy"), dtype=bool) + BadPix = np.load("/XF11ID/analysis/2018_1/BadPix_4M.npy") + Chip_Mask.ravel()[BadPix] = 0 + elif md["detector"] == "eiger500K_single_image": + Chip_Mask = 1 # to be defined the chip mask + else: + Chip_Mask = 1 + # show_img(Chip_Mask) + + center = [int(md["beam_center_y"]), int(md["beam_center_x"])] # beam center [y,x] for python image + + pixel_mask = 1 - np.int_(np.array(imgs.md["pixel_mask"], dtype=bool)) + print("The data are: %s" % imgs) + + if False: + print_dict( + md, + [ + "suid", + "number of images", + "uid", + "scan_id", + "start_time", + "stop_time", + "sample", + "Measurement", + "acquire period", + "exposure time", + "det_distanc", + "beam_center_x", + "beam_center_y", + ], + ) + ## Overwrite Some Metadata if Wrong Input + dpix, lambda_, Ldet, exposuretime, timeperframe, center = check_lost_metadata( + md, Nimg, inc_x0=inc_x0, inc_y0=inc_y0, pixelsize=7.5 * 10 * (-5) + ) + + print("The beam center is: %s" % center) + + timeperframe *= bin_frame_number + + setup_pargs = dict( + uid=uidstr, + dpix=dpix, + Ldet=Ldet, + lambda_=lambda_, + exposuretime=exposuretime, + timeperframe=timeperframe, + center=center, + path=data_dir, + ) + # print_dict( setup_pargs ) + + mask = load_mask(mask_path, mask_name, plot_=False, image_name=uidstr + "_mask", reverse=reverse) + mask *= pixel_mask + if md["detector"] == "eiger4m_single_image": + mask[:, 2069] = 0 # False #Concluded from the previous results + show_img(mask, image_name=uidstr + "_mask", save=True, path=data_dir) + mask_load = mask.copy() + imgsa = apply_mask(imgs, mask) + + img_choice_N = 2 + img_samp_index = random.sample(range(len(imgs)), img_choice_N) + avg_img = get_avg_img(imgsa, img_samp_index, plot_=False, uid=uidstr) + + if avg_img.max() == 0: + print("There are no photons recorded for this uid: %s" % uid) + print("The data analysis should be terminated! Please try another uid.") + + else: + if scat_geometry != "saxs": + show_img( + avg_img, + vmin=0.1, + vmax=np.max(avg_img * 0.1), + logs=True, + image_name=uidstr + "_%s_frames_avg" % img_choice_N, + save=True, + path=data_dir, + ) + else: + show_saxs_qmap( + avg_img, + setup_pargs, + width=400, + show_pixel=False, + vmin=0.1, + vmax=np.max(avg_img), + logs=True, + image_name=uidstr + "_%s_frames_avg" % img_choice_N, + ) + + compress = True + photon_occ = len(np.where(avg_img)[0]) / (imgsa[0].size) + # compress = photon_occ < .4 #if the photon ocupation < 0.5, do compress + print("The non-zeros photon occupation is %s." % (photon_occ)) + print("Will " + "Always " + ["NOT", "DO"][compress] + " apply compress process.") + # good_start = 5 #make the good_start at least 0 + t0 = time.time() + filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % md["uid"] + mask, avg_img, imgsum, bad_frame_list = compress_eigerdata( + imgs, + mask, + md, + filename, + force_compress=force_compress, + para_compress=para_compress, + bad_pixel_threshold=1e14, + bins=bin_frame_number, + num_sub=100, + num_max_para_process=500, + with_pickle=True, + ) + min_inten = 10 + good_start = max(good_start, np.where(np.array(imgsum) > min_inten)[0][0]) + print("The good_start frame number is: %s " % good_start) + FD = Multifile(filename, good_start, len(imgs)) + # FD = Multifile(filename, good_start, 100) + uid_ = uidstr + "_fra_%s_%s" % (FD.beg, FD.end) + print(uid_) + plot1D( + y=imgsum[np.array([i for i in np.arange(good_start, len(imgsum)) if i not in bad_frame_list])], + title=uidstr + "_imgsum", + xlabel="Frame", + ylabel="Total_Intensity", + legend="imgsum", + ) + run_time(t0) + + mask = mask * Chip_Mask + + # %system free && sync && echo 3 > /proc/sys/vm/drop_caches && free + ## Get bad frame list by a polynominal fit + bad_frame_list = get_bad_frame_list( + imgsum, + fit=True, + plot=True, + polyfit_order=30, + scale=5.5, + good_start=good_start, + uid=uidstr, + path=data_dir, + ) + print("The bad frame list length is: %s" % len(bad_frame_list)) + + ### Creat new mask by masking the bad pixels and get new avg_img + if False: + mask = mask_exclude_badpixel(bp, mask, md["uid"]) + avg_img = get_avg_imgc(FD, sampling=1, bad_frame_list=bad_frame_list) + + show_img( + avg_img, + vmin=0.001, + vmax=np.max(avg_img), + logs=True, + aspect=1, # save_format='tif', + image_name=uidstr + "_img_avg", + save=True, + path=data_dir, + cmap=cmap_albula, + ) + + imgsum_y = imgsum[np.array([i for i in np.arange(len(imgsum)) if i not in bad_frame_list])] + imgsum_x = np.arange(len(imgsum_y)) + save_lists( + [imgsum_x, imgsum_y], label=["Frame", "Total_Intensity"], filename=uidstr + "_img_sum_t", path=data_dir + ) + plot1D( + y=imgsum_y, + title=uidstr + "_img_sum_t", + xlabel="Frame", + ylabel="Total_Intensity", + legend="imgsum", + save=True, + path=data_dir, + ) + + ############for SAXS and ANG_SAXS (Flow_SAXS) + if scat_geometry == "saxs" or scat_geometry == "ang_saxs": + + # show_saxs_qmap( avg_img, setup_pargs, width=600, vmin=.1, vmax=np.max(avg_img*.1), logs=True, + # image_name= uidstr + '_img_avg', save=True) + # np.save( data_dir + 'uid=%s--img-avg'%uid, avg_img) + + # try: + # hmask = create_hot_pixel_mask( avg_img, threshold = 1000, center=center, center_radius= 600) + # except: + # hmask=1 + hmask = 1 + qp_saxs, iq_saxs, q_saxs = get_circular_average( + avg_img * Chip_Mask, mask * hmask * Chip_Mask, pargs=setup_pargs, save=True + ) + + plot_circular_average( + qp_saxs, + iq_saxs, + q_saxs, + pargs=setup_pargs, + xlim=[q_saxs.min(), q_saxs.max()], + ylim=[iq_saxs.min(), iq_saxs.max()], + ) + + # pd = trans_data_to_pd( np.where( hmask !=1), + # label=[md['uid']+'_hmask'+'x', md['uid']+'_hmask'+'y' ], dtype='list') + + # pd.to_csv('/XF11ID/analysis/Commissioning/eiger4M_badpixel.csv', mode='a' ) + + # mask =np.array( mask * hmask, dtype=bool) + # show_img( mask ) + + if run_fit_form: + form_res = fit_form_factor( + q_saxs, + iq_saxs, + guess_values={"radius": 2500, "sigma": 0.05, "delta_rho": 1e-10}, + fit_range=[0.0001, 0.015], + fit_variables={"radius": T, "sigma": T, "delta_rho": T}, + res_pargs=setup_pargs, + xlim=[0.0001, 0.015], + ) + + show_ROI_on_image( + avg_img, + roi_mask, + center, + label_on=False, + rwidth=700, + alpha=0.9, + save=True, + path=data_dir, + uid=uidstr, + vmin=np.min(avg_img), + vmax=np.max(avg_img), + ) + + qr = np.array([qval_dict[k][0] for k in list(qval_dict.keys())]) + plot_qIq_with_ROI( + q_saxs, + iq_saxs, + qr, + logs=True, + uid=uidstr, + xlim=[q_saxs.min(), q_saxs.max()], + ylim=[iq_saxs.min(), iq_saxs.max()], + save=True, + path=data_dir, + ) + + if scat_geometry != "ang_saxs": + Nimg = FD.end - FD.beg + time_edge = create_time_slice(N=Nimg, slice_num=3, slice_width=1, edges=None) + time_edge = np.array(time_edge) + good_start + # print( time_edge ) + qpt, iqst, qt = get_t_iqc(FD, time_edge, mask * Chip_Mask, pargs=setup_pargs, nx=1500) + plot_t_iqc( + qt, + iqst, + time_edge, + pargs=setup_pargs, + xlim=[qt.min(), qt.max()], + ylim=[iqst.min(), iqst.max()], + save=True, + ) + + elif scat_geometry == "gi_waxs": + # roi_mask[badpixel] = 0 + qr = np.array([qval_dict[k][0] for k in list(qval_dict.keys())]) + show_ROI_on_image( + avg_img, roi_mask, label_on=True, alpha=0.5, save=True, path=data_dir, uid=uidstr + ) # , vmin=1, vmax=15) + + elif scat_geometry == "gi_saxs": + show_img( + avg_img, + vmin=0.1, + vmax=np.max(avg_img * 0.1), + logs=True, + image_name=uidstr + "_img_avg", + save=True, + path=data_dir, + ) + ticks_ = get_qzr_map(qr_map, qz_map, inc_x0, Nzline=10, Nrline=10) + ticks = ticks_[:4] + plot_qzr_map(qr_map, qz_map, inc_x0, ticks=ticks_, data=avg_img, uid=uidstr, path=data_dir) + show_qzr_roi(avg_img, roi_mask, inc_x0, ticks, alpha=0.5, save=True, path=data_dir, uid=uidstr) + qr_1d_pds = cal_1d_qr(avg_img, Qr, Qz, qr_map, qz_map, inc_x0, setup_pargs=setup_pargs) + plot_qr_1d_with_ROI( + qr_1d_pds, + qr_center=np.unique(np.array(list(qval_dict.values()))[:, 0]), + loglog=False, + save=True, + uid=uidstr, + path=data_dir, + ) + + Nimg = FD.end - FD.beg + time_edge = create_time_slice(N=Nimg, slice_num=3, slice_width=1, edges=None) + time_edge = np.array(time_edge) + good_start + qrt_pds = get_t_qrc(FD, time_edge, Qr, Qz, qr_map, qz_map, path=data_dir, uid=uidstr) + plot_qrt_pds(qrt_pds, time_edge, qz_index=0, uid=uidstr, path=data_dir) + + ############################## + ##the below works for all the geometries + ######################################## + if scat_geometry != "ang_saxs": + roi_inten = check_ROI_intensity( + avg_img, roi_mask, ring_number=qth_interest, uid=uidstr, save=True, path=data_dir + ) + if scat_geometry == "saxs" or scat_geometry == "gi_saxs" or scat_geometry == "gi_waxs": + if run_waterfall: + wat = cal_waterfallc(FD, roi_mask, qindex=qth_interest, save=True, path=data_dir, uid=uidstr) + if run_waterfall: + plot_waterfallc( + wat, + qindex=qth_interest, + aspect=None, + vmax=np.max(wat), + uid=uidstr, + save=True, + path=data_dir, + beg=FD.beg, + ) + ring_avg = None + + if run_t_ROI_Inten: + times_roi, mean_int_sets = cal_each_ring_mean_intensityc( + FD, roi_mask, timeperframe=None, multi_cor=True + ) + plot_each_ring_mean_intensityc(times_roi, mean_int_sets, uid=uidstr, save=True, path=data_dir) + roi_avg = np.average(mean_int_sets, axis=0) + + uid_ = uidstr + "_fra_%s_%s" % (FD.beg, FD.end) + lag_steps = None + + if use_sqnorm: + norm = get_pixelist_interp_iq(qp_saxs, iq_saxs, roi_mask, center) + else: + norm = None + + define_good_series = False + if define_good_series: + FD = Multifile(filename, beg=good_start, end=Nimg) + uid_ = uidstr + "_fra_%s_%s" % (FD.beg, FD.end) + print(uid_) + + if "g2_fit_variables" in list(run_pargs.keys()): + g2_fit_variables = run_pargs["g2_fit_variables"] + else: + g2_fit_variables = {"baseline": True, "beta": True, "alpha": False, "relaxation_rate": True} + + if "g2_guess_values" in list(run_pargs.keys()): + g2_guess_values = run_pargs["g2_guess_values"] + else: + g2_guess_values = { + "baseline": 1.0, + "beta": 0.05, + "alpha": 1.0, + "relaxation_rate": 0.01, + } + + if "g2_guess_limits" in list(run_pargs.keys()): + g2_guess_limits = run_pargs["g2_guess_limits"] + else: + g2_guess_limits = dict(baseline=[1, 2], alpha=[0, 2], beta=[0, 1], relaxation_rate=[0.001, 5000]) + + if run_one_time: + if use_imgsum_norm: + imgsum_ = imgsum + else: + imgsum_ = None + if scat_geometry != "ang_saxs": + t0 = time.time() + g2, lag_steps = cal_g2p( + FD, roi_mask, bad_frame_list, good_start, num_buf=8, num_lev=None, imgsum=imgsum_, norm=norm + ) + run_time(t0) + taus = lag_steps * timeperframe + g2_pds = save_g2_general( + g2, + taus=taus, + qr=np.array(list(qval_dict.values()))[:, 0], + uid=uid_ + "_g2.csv", + path=data_dir, + return_res=True, + ) + g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( + g2, + taus, + function=fit_g2_func, + vlim=[0.95, 1.05], + fit_range=None, + fit_variables=g2_fit_variables, + guess_values=g2_guess_values, + guess_limits=g2_guess_limits, + ) + + g2_fit_paras = save_g2_fit_para_tocsv( + g2_fit_result, filename=uid_ + "_g2_fit_paras.csv", path=data_dir + ) + + # if run_one_time: + # plot_g2_general( g2_dict={1:g2}, taus_dict={1:taus},vlim=[0.95, 1.05], qval_dict = qval_dict, fit_res= None, + # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') + + plot_g2_general( + g2_dict={1: g2, 2: g2_fit}, + taus_dict={1: taus, 2: taus_fit}, + vlim=[0.95, 1.05], + qval_dict=qval_dict, + fit_res=g2_fit_result, + geometry=scat_geometry, + filename=uid_ + "_g2", + path=data_dir, + function=fit_g2_func, + ylabel="g2", + append_name="_fit", + ) + + D0, qrate_fit_res = get_q_rate_fit_general( + qval_dict, g2_fit_paras["relaxation_rate"], geometry=scat_geometry + ) + plot_q_rate_fit_general( + qval_dict, + g2_fit_paras["relaxation_rate"], + qrate_fit_res, + geometry=scat_geometry, + uid=uid_, + path=data_dir, + ) + + else: + t0 = time.time() + g2_v, lag_steps_v = cal_g2p( + FD, roi_mask_v, bad_frame_list, good_start, num_buf=8, num_lev=None, imgsum=imgsum_, norm=norm + ) + g2_p, lag_steps_p = cal_g2p( + FD, roi_mask_p, bad_frame_list, good_start, num_buf=8, num_lev=None, imgsum=imgsum_, norm=norm + ) + run_time(t0) + + taus_v = lag_steps_v * timeperframe + g2_pds_v = save_g2_general( + g2_v, + taus=taus_v, + qr=np.array(list(qval_dict_v.values()))[:, 0], + uid=uid_ + "_g2v.csv", + path=data_dir, + return_res=True, + ) + + taus_p = lag_steps_p * timeperframe + g2_pds_p = save_g2_general( + g2_p, + taus=taus_p, + qr=np.array(list(qval_dict_p.values()))[:, 0], + uid=uid_ + "_g2p.csv", + path=data_dir, + return_res=True, + ) + + fit_g2_func_v = "stretched" # for vertical + g2_fit_result_v, taus_fit_v, g2_fit_v = get_g2_fit_general( + g2_v, + taus_v, + function=fit_g2_func_v, + vlim=[0.95, 1.05], + fit_range=None, + fit_variables={"baseline": True, "beta": True, "alpha": False, "relaxation_rate": True}, + guess_values={ + "baseline": 1.0, + "beta": 0.05, + "alpha": 1.0, + "relaxation_rate": 0.01, + }, + ) + g2_fit_paras_v = save_g2_fit_para_tocsv( + g2_fit_result_v, filename=uid_ + "_g2_fit_paras_v.csv", path=data_dir + ) + + fit_g2_func_p = "flow_para" # for parallel + g2_fit_result_p, taus_fit_p, g2_fit_p = get_g2_fit_general( + g2_p, + taus_p, + function=fit_g2_func_p, + vlim=[0.95, 1.05], + fit_range=None, + fit_variables={ + "baseline": True, + "beta": True, + "alpha": False, + "relaxation_rate": True, + "flow_velocity": True, + }, + guess_values={ + "baseline": 1.0, + "beta": 0.05, + "alpha": 1.0, + "relaxation_rate": 0.01, + "flow_velocity": 1, + }, + ) + g2_fit_paras_p = save_g2_fit_para_tocsv( + g2_fit_result_p, filename=uid_ + "_g2_fit_paras_p.csv", path=data_dir + ) + + plot_g2_general( + g2_dict={1: g2_v, 2: g2_fit_v}, + taus_dict={1: taus_v, 2: taus_fit_v}, + vlim=[0.95, 1.05], + qval_dict=qval_dict_v, + fit_res=g2_fit_result_v, + geometry=scat_geometry, + filename=uid_ + "_g2_v", + path=data_dir, + function=fit_g2_func_v, + ylabel="g2_v", + append_name="_fit", + ) + + plot_g2_general( + g2_dict={1: g2_p, 2: g2_fit_p}, + taus_dict={1: taus_p, 2: taus_fit_p}, + vlim=[0.95, 1.05], + qval_dict=qval_dict_p, + fit_res=g2_fit_result_p, + geometry=scat_geometry, + filename=uid_ + "_g2_p", + path=data_dir, + function=fit_g2_func_p, + ylabel="g2_p", + append_name="_fit", + ) + + combine_images( + [data_dir + uid_ + "_g2_v_fit.png", data_dir + uid_ + "_g2_p_fit.png"], + data_dir + uid_ + "_g2_fit.png", + outsize=(2000, 2400), + ) + + D0_v, qrate_fit_res_v = get_q_rate_fit_general( + qval_dict_v, g2_fit_paras_v["relaxation_rate"], geometry=scat_geometry + ) + plot_q_rate_fit_general( + qval_dict_v, + g2_fit_paras_v["relaxation_rate"], + qrate_fit_res_v, + geometry=scat_geometry, + uid=uid_ + "_vert", + path=data_dir, + ) + + D0_p, qrate_fit_res_p = get_q_rate_fit_general( + qval_dict_p, g2_fit_paras_p["relaxation_rate"], geometry=scat_geometry + ) + plot_q_rate_fit_general( + qval_dict_p, + g2_fit_paras_p["relaxation_rate"], + qrate_fit_res_p, + geometry=scat_geometry, + uid=uid_ + "_para", + path=data_dir, + ) + + combine_images( + [data_dir + uid_ + "_vert_Q_Rate_fit.png", data_dir + uid_ + "_para_Q_Rate_fit.png"], + data_dir + uid_ + "_Q_Rate_fit.png", + outsize=(2000, 2400), + ) + + # For two-time + data_pixel = None + if run_two_time: + + data_pixel = Get_Pixel_Arrayc(FD, pixelist, norm=norm).get_data() + t0 = time.time() + g12b = auto_two_Arrayc(data_pixel, roi_mask, index=None) + if run_dose: + np.save(data_dir + "uid=%s_g12b" % uid, g12b) + + if lag_steps is None: + num_bufs = 8 + noframes = FD.end - FD.beg + num_levels = int(np.log(noframes / (num_bufs - 1)) / np.log(2) + 1) + 1 + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + max_taus = lag_steps.max() + lag_steps = lag_steps[lag_steps < Nimg - good_start] + + run_time(t0) + + show_C12( + g12b, + q_ind=qth_interest, + N1=FD.beg, + N2=min(FD.end, 5000), + vmin=0.99, + vmax=1.3, + timeperframe=timeperframe, + save=True, + cmap=cmap_albula, + path=data_dir, + uid=uid_, + ) + + # print('here') + # show_C12(g12b, q_ind= 3, N1= 5, N2=min(5000,5000), vmin=.8, vmax=1.31, cmap=cmap_albula, + # timeperframe= timeperframe,save=False, path= data_dir, uid = uid_ +'_' + k) + max_taus = Nimg + t0 = time.time() + # g2b = get_one_time_from_two_time(g12b)[:max_taus] + g2b = get_one_time_from_two_time(g12b)[lag_steps] + + tausb = lag_steps * timeperframe + run_time(t0) + + # tausb = np.arange( g2b.shape[0])[:max_taus] *timeperframe + g2b_pds = save_g2_general( + g2b, + taus=tausb, + qr=np.array(list(qval_dict.values()))[:, 0], + qz=None, + uid=uid_ + "_g2b.csv", + path=data_dir, + return_res=True, + ) + + g2_fit_resultb, taus_fitb, g2_fitb = get_g2_fit_general( + g2b, + tausb, + function=fit_g2_func, + vlim=[0.95, 1.05], + fit_range=None, + fit_variables=g2_fit_variables, + guess_values=g2_guess_values, + guess_limits=g2_guess_limits, + ) + + g2b_fit_paras = save_g2_fit_para_tocsv( + g2_fit_resultb, filename=uid_ + "_g2b_fit_paras.csv", path=data_dir + ) + + D0b, qrate_fit_resb = get_q_rate_fit_general( + qval_dict, g2b_fit_paras["relaxation_rate"], fit_range=None, geometry=scat_geometry + ) + + # print( qval_dict, g2b_fit_paras['relaxation_rate'], qrate_fit_resb ) + plot_q_rate_fit_general( + qval_dict, + g2b_fit_paras["relaxation_rate"], + qrate_fit_resb, + geometry=scat_geometry, + uid=uid_ + "_two_time", + path=data_dir, + ) + + plot_g2_general( + g2_dict={1: g2b, 2: g2_fitb}, + taus_dict={1: tausb, 2: taus_fitb}, + vlim=[0.95, 1.05], + qval_dict=qval_dict, + fit_res=g2_fit_resultb, + geometry=scat_geometry, + filename=uid_ + "_g2", + path=data_dir, + function=fit_g2_func, + ylabel="g2", + append_name="_b_fit", + ) + + if run_two_time and run_one_time: + plot_g2_general( + g2_dict={1: g2, 2: g2b}, + taus_dict={1: taus, 2: tausb}, + vlim=[0.95, 1.05], + qval_dict=qval_dict, + g2_labels=["from_one_time", "from_two_time"], + geometry=scat_geometry, + filename=uid_ + "_g2_two_g2", + path=data_dir, + ylabel="g2", + ) + + # Four Time Correlation + + if run_four_time: # have to run one and two first + t0 = time.time() + g4 = get_four_time_from_two_time(g12b, g2=g2b)[:max_taus] + run_time(t0) + + taus4 = np.arange(g4.shape[0]) * timeperframe + g4_pds = save_g2_general( + g4, + taus=taus4, + qr=np.array(list(qval_dict.values()))[:, 0], + qz=None, + uid=uid_ + "_g4.csv", + path=data_dir, + return_res=True, + ) + plot_g2_general( + g2_dict={1: g4}, + taus_dict={1: taus4}, + vlim=[0.95, 1.05], + qval_dict=qval_dict, + fit_res=None, + geometry=scat_geometry, + filename=uid_ + "_g4", + path=data_dir, + ylabel="g4", + ) + + if run_dose: + get_two_time_mulit_uids( + [uid], roi_mask, norm=norm, bin_frame_number=bin_frame_number, path=data_dir0, force_generate=False + ) + N = len(imgs) + try: + tr = md["transmission"] + except: + tr = 1 + if "dose_frame" in list(run_pargs.keys()): + dose_frame = run_pargs["dose_frame"] + else: + dose_frame = np.int_([N / 8, N / 4, N / 2, 3 * N / 4, N * 0.99]) + # N/32, N/16, N/8, N/4 ,N/2, 3*N/4, N*0.99 + exposure_dose = tr * exposuretime * dose_frame + taus_uids, g2_uids = get_series_one_time_mulit_uids( + [uid], + qval_dict, + good_start=good_start, + path=data_dir0, + exposure_dose=exposure_dose, + num_bufs=8, + save_g2=False, + dead_time=0, + trans=[tr], + ) + + plot_dose_g2( + taus_uids, + g2_uids, + ylim=[0.95, 1.2], + vshift=0.00, + qval_dict=qval_dict, + fit_res=None, + geometry=scat_geometry, + filename="%s_dose_analysis" % uid_, + path=data_dir, + function=None, + ylabel="g2_Dose", + g2_labels=None, + append_name="", + ) + + # Speckel Visiblity + if run_xsvs: + max_cts = get_max_countc(FD, roi_mask) + qind, pixelist = roi.extract_label_indices(roi_mask) + noqs = len(np.unique(qind)) + nopr = np.bincount(qind, minlength=(noqs + 1))[1:] + # time_steps = np.array( utils.geometric_series(2, len(imgs) ) ) + time_steps = [0, 1] # only run the first two levels + num_times = len(time_steps) + times_xsvs = exposuretime + (2 ** (np.arange(len(time_steps))) - 1) * timeperframe + print("The max counts are: %s" % max_cts) + + ### Do historam + if roi_avg is None: + times_roi, mean_int_sets = cal_each_ring_mean_intensityc( + FD, + roi_mask, + timeperframe=None, + ) + roi_avg = np.average(mean_int_sets, axis=0) + + t0 = time.time() + spec_bins, spec_his, spec_std = xsvsp( + FD, + np.int_(roi_mask), + norm=None, + max_cts=int(max_cts + 2), + bad_images=bad_frame_list, + only_two_levels=True, + ) + spec_kmean = np.array([roi_avg * 2**j for j in range(spec_his.shape[0])]) + run_time(t0) + + run_xsvs_all_lags = False + if run_xsvs_all_lags: + times_xsvs = exposuretime + lag_steps * acquisition_period + if data_pixel is None: + data_pixel = Get_Pixel_Arrayc(FD, pixelist, norm=norm).get_data() + t0 = time.time() + spec_bins, spec_his, spec_std, spec_kmean = get_binned_his_std( + data_pixel, np.int_(ro_mask), lag_steps + ) + run_time(t0) + spec_pds = save_bin_his_std( + spec_bins, spec_his, spec_std, filename=uid_ + "_spec_res.csv", path=data_dir + ) + + ML_val, KL_val, K_ = get_xsvs_fit( + spec_his, + spec_kmean, + spec_std, + max_bins=2, + varyK=False, + ) + + # print( 'The observed average photon counts are: %s'%np.round(K_mean,4)) + # print( 'The fitted average photon counts are: %s'%np.round(K_,4)) + print( + "The difference sum of average photon counts between fit and data are: %s" + % np.round(abs(np.sum(spec_kmean[0, :] - K_)), 4) + ) + print("#" * 30) + qth = 10 + print("The fitted M for Qth= %s are: %s" % (qth, ML_val[qth])) + print(K_[qth]) + print("#" * 30) + + plot_xsvs_fit( + spec_his, + ML_val, + KL_val, + K_mean=spec_kmean, + spec_std=spec_std, + xlim=[0, 10], + vlim=[0.9, 1.1], + uid=uid_, + qth=qth_interest, + logy=True, + times=times_xsvs, + q_ring_center=qr, + path=data_dir, + ) + + plot_xsvs_fit( + spec_his, + ML_val, + KL_val, + K_mean=spec_kmean, + spec_std=spec_std, + xlim=[0, 15], + vlim=[0.9, 1.1], + uid=uid_, + qth=None, + logy=True, + times=times_xsvs, + q_ring_center=qr, + path=data_dir, + ) + + ### Get contrast + contrast_factorL = get_contrast(ML_val) + spec_km_pds = save_KM( + spec_kmean, KL_val, ML_val, qs=qr, level_time=times_xsvs, uid=uid_, path=data_dir + ) + # print( spec_km_pds ) + + plot_g2_contrast( + contrast_factorL, + g2, + times_xsvs, + taus, + qr, + vlim=[0.8, 1.2], + qth=qth_interest, + uid=uid_, + path=data_dir, + legend_size=14, + ) + + plot_g2_contrast( + contrast_factorL, + g2, + times_xsvs, + taus, + qr, + vlim=[0.8, 1.2], + qth=None, + uid=uid_, + path=data_dir, + legend_size=4, + ) + + md["mask_file"] = mask_path + mask_name + md["mask"] = mask + md["NOTEBOOK_FULL_PATH"] = None + md["good_start"] = good_start + md["bad_frame_list"] = bad_frame_list + md["avg_img"] = avg_img + md["roi_mask"] = roi_mask + + if scat_geometry == "gi_saxs": + md["Qr"] = Qr + md["Qz"] = Qz + md["qval_dict"] = qval_dict + md["beam_center_x"] = inc_x0 + md["beam_center_y"] = inc_y0 + md["beam_refl_center_x"] = refl_x0 + md["beam_refl_center_y"] = refl_y0 + + elif scat_geometry == "saxs" or "gi_waxs": + md["qr"] = qr + # md['qr_edge'] = qr_edge + md["qval_dict"] = qval_dict + md["beam_center_x"] = center[1] + md["beam_center_y"] = center[0] + + elif scat_geometry == "ang_saxs": + md["qval_dict_v"] = qval_dict_v + md["qval_dict_p"] = qval_dict_p + md["beam_center_x"] = center[1] + md["beam_center_y"] = center[0] + + md["beg"] = FD.beg + md["end"] = FD.end + md["metadata_file"] = data_dir + "md.csv-&-md.pkl" + psave_obj(md, data_dir + "uid=%s_md" % uid[:6]) # save the setup parameters + # psave_obj( md, data_dir + 'uid=%s_md'%uid ) #save the setup parameters + save_dict_csv(md, data_dir + "uid=%s_md.csv" % uid, "w") + + Exdt = {} + if scat_geometry == "gi_saxs": + for k, v in zip( + [ + "md", + "roi_mask", + "qval_dict", + "avg_img", + "mask", + "pixel_mask", + "imgsum", + "bad_frame_list", + "qr_1d_pds", + ], + [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, bad_frame_list, qr_1d_pds], + ): + Exdt[k] = v + elif scat_geometry == "saxs": + for k, v in zip( + [ + "md", + "q_saxs", + "iq_saxs", + "iqst", + "qt", + "roi_mask", + "qval_dict", + "avg_img", + "mask", + "pixel_mask", + "imgsum", + "bad_frame_list", + ], + [ + md, + q_saxs, + iq_saxs, + iqst, + qt, + roi_mask, + qval_dict, + avg_img, + mask, + pixel_mask, + imgsum, + bad_frame_list, + ], + ): + Exdt[k] = v + elif scat_geometry == "gi_waxs": + for k, v in zip( + ["md", "roi_mask", "qval_dict", "avg_img", "mask", "pixel_mask", "imgsum", "bad_frame_list"], + [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, bad_frame_list], + ): + Exdt[k] = v + elif scat_geometry == "ang_saxs": + for k, v in zip( + [ + "md", + "q_saxs", + "iq_saxs", + "roi_mask_v", + "roi_mask_p", + "qval_dict_v", + "qval_dict_p", + "avg_img", + "mask", + "pixel_mask", + "imgsum", + "bad_frame_list", + ], + [ + md, + q_saxs, + iq_saxs, + roi_mask_v, + roi_mask_p, + qval_dict_v, + qval_dict_p, + avg_img, + mask, + pixel_mask, + imgsum, + bad_frame_list, + ], + ): + Exdt[k] = v + + if run_waterfall: + Exdt["wat"] = wat + if run_t_ROI_Inten: + Exdt["times_roi"] = times_roi + Exdt["mean_int_sets"] = mean_int_sets + if run_one_time: + if scat_geometry != "ang_saxs": + for k, v in zip(["taus", "g2", "g2_fit_paras"], [taus, g2, g2_fit_paras]): + Exdt[k] = v + else: + for k, v in zip(["taus_v", "g2_v", "g2_fit_paras_v"], [taus_v, g2_v, g2_fit_paras_v]): + Exdt[k] = v + for k, v in zip(["taus_p", "g2_p", "g2_fit_paras_p"], [taus_p, g2_p, g2_fit_paras_p]): + Exdt[k] = v + if run_two_time: + for k, v in zip(["tausb", "g2b", "g2b_fit_paras", "g12b"], [tausb, g2b, g2b_fit_paras, g12b]): + Exdt[k] = v + if run_four_time: + for k, v in zip(["taus4", "g4"], [taus4, g4]): + Exdt[k] = v + if run_xsvs: + for k, v in zip( + ["spec_kmean", "spec_pds", "times_xsvs", "spec_km_pds", "contrast_factorL"], + [spec_kmean, spec_pds, times_xsvs, spec_km_pds, contrast_factorL], + ): + Exdt[k] = v + + export_xpcs_results_to_h5("uid=%s_Res.h5" % md["uid"], data_dir, export_dict=Exdt) + # extract_dict = extract_xpcs_results_from_h5( filename = 'uid=%s_Res.h5'%md['uid'], import_dir = data_dir ) + # Creat PDF Report + pdf_out_dir = os.path.join("/XF11ID/analysis/", CYCLE, username, "Results/") + pdf_filename = "XPCS_Analysis_Report_for_uid=%s%s.pdf" % (uid, pdf_version) + if run_xsvs: + pdf_filename = "XPCS_XSVS_Analysis_Report_for_uid=%s%s.pdf" % (uid, pdf_version) + # pdf_filename + + print(data_dir, uid[:6], pdf_out_dir, pdf_filename, username) + + make_pdf_report( + data_dir, + uid[:6], + pdf_out_dir, + pdf_filename, + username, + run_fit_form, + run_one_time, + run_two_time, + run_four_time, + run_xsvs, + run_dose=run_dose, + report_type=scat_geometry, + ) + ## Attach the PDF report to Olog + if att_pdf_report: + os.environ["HTTPS_PROXY"] = "https://proxy:8888" + os.environ["no_proxy"] = "cs.nsls2.local,localhost,127.0.0.1" + pname = pdf_out_dir + pdf_filename + atch = [Attachment(open(pname, "rb"))] + try: + update_olog_uid(uid=md["uid"], text="Add XPCS Analysis PDF Report", attachments=atch) + except: + print( + "I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file." + % pname + ) + + if show_plot: + plt.show() + # else: + # plt.close('all') + if clear_plot: + plt.close("all") + if return_res: + res = {} + if scat_geometry == "saxs": + for k, v in zip( + [ + "md", + "q_saxs", + "iq_saxs", + "iqst", + "qt", + "avg_img", + "mask", + "imgsum", + "bad_frame_list", + "roi_mask", + "qval_dict", + ], + [md, q_saxs, iq_saxs, iqst, qt, avg_img, mask, imgsum, bad_frame_list, roi_mask, qval_dict], + ): + res[k] = v + + elif scat_geometry == "ang_saxs": + for k, v in zip( + [ + "md", + "q_saxs", + "iq_saxs", + "roi_mask_v", + "roi_mask_p", + "qval_dict_v", + "qval_dict_p", + "avg_img", + "mask", + "pixel_mask", + "imgsum", + "bad_frame_list", + ], + [ + md, + q_saxs, + iq_saxs, + roi_mask_v, + roi_mask_p, + qval_dict_v, + qval_dict_p, + avg_img, + mask, + pixel_mask, + imgsum, + bad_frame_list, + ], + ): + res[k] = v + + elif scat_geometry == "gi_saxs": + for k, v in zip( + [ + "md", + "roi_mask", + "qval_dict", + "avg_img", + "mask", + "pixel_mask", + "imgsum", + "bad_frame_list", + "qr_1d_pds", + ], + [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, bad_frame_list, qr_1d_pds], + ): + res[k] = v + + elif scat_geometry == "gi_waxs": + for k, v in zip( + ["md", "roi_mask", "qval_dict", "avg_img", "mask", "pixel_mask", "imgsum", "bad_frame_list"], + [md, roi_mask, qval_dict, avg_img, mask, pixel_mask, imgsum, bad_frame_list], + ): + res[k] = v + + if run_waterfall: + res["wat"] = wat + if run_t_ROI_Inten: + res["times_roi"] = times_roi + res["mean_int_sets"] = mean_int_sets + if run_one_time: + if scat_geometry != "ang_saxs": + res["g2"] = g2 + res["taus"] = taus + else: + res["g2_p"] = g2_p + res["taus_p"] = taus_p + res["g2_v"] = g2_v + res["taus_v"] = taus_v + + if run_two_time: + res["tausb"] = tausb + res["g12b"] = g12b + res["g2b"] = g2b + if run_four_time: + res["g4"] = g4 + res["taus4"] = taus4 + if run_xsvs: + res["spec_kmean"] = spec_kmean + res["spec_pds"] = spec_pds + res["contrast_factorL"] = contrast_factorL + res["times_xsvs"] = times_xsvs + return res + + +# uid = '3ff4ee' +# run_xpcs_xsvs_single( uid, run_pargs ) diff --git a/pyCHX/backups/pyCHX-backup/movie_maker.py b/pyCHX/backups/pyCHX-backup/movie_maker.py new file mode 100644 index 0000000..0d42cf9 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/movie_maker.py @@ -0,0 +1,241 @@ +################################ +######Movie_maker############### +################################ + + +def read_imgs(inDir): + """Give image folder: inDir + Get a pims.sequences, + e.g. inDir= '/home/.../*.png' + """ + from pims import ImageSequence as Images + + return Images(inDir) + + +def select_regoin( + img, + vert, + keep_shape=True, + qmask=None, +): + """Get a pixellist by a rectangular region + defined by + verts e.g. xs,xe,ys,ye = vert #x_start, x_end, y_start,y_end + (dimy, dimx,) = img.shape + Giving cut postion, start, end, width""" + import numpy as np + + xs, xe, ys, ye = vert + if keep_shape: + img_ = np.zeros_like(img) + # img_= np.zeros( [dimy,dimx]) + + try: + img_[ys:ye, xs:xe] = True + except: + img_[ys:ye, xs:xe, :] = True + pixellist_ = np.where(img_.ravel())[0] + # pixellist_ = img_.ravel() + if qmask is not None: + b = np.where(qmask.flatten() == False)[0] + pixellist_ = np.intersect1d(pixellist_, b) + # imgx = img[pixellist_] + # imgx = imgx.reshape( xe-xs, ye-ys) + imgx = img_.ravel() + imgx[pixellist_] = img.ravel()[pixellist_] + imgx = imgx.reshape(img.shape) + + else: + try: + imgx = img[ys:ye, xs:xe] + except: + imgx = img[ys:ye, xs:xe, :] + + return imgx + + +def save_png_series( + imgs, ROI=None, logs=True, outDir=None, uid=None, vmin=None, vmax=None, cmap="viridis", dpi=100 +): + import matplotlib.pyplot as plt + import numpy as np + from matplotlib.colors import LogNorm + + """ + save a series of images in a format of png + + Parameters + ---------- + imgs : array + image data array for the movie + dimensions are: [num_img][num_rows][num_cols] + ROI: e.g. xs,xe,ys,ye = vert #x_start, x_end, y_start,y_end + outDir: the output path + vmin/vmax: for image contrast + cmap: the color for plot + dpi: resolution + + Returns + ------- + save png files + + """ + if uid == None: + uid = "uid" + num_frame = 0 + for img in imgs: + fig = plt.figure() + ax = fig.add_subplot(111) + ax.get_xaxis().set_visible(False) + ax.get_yaxis().set_visible(False) + if ROI is None: + i0 = img + asp = 1.0 + else: + i0 = select_regoin( + img, + ROI, + keep_shape=False, + ) + xs, xe, ys, ye = ROI + asp = (ye - ys) / float(xe - xs) + ax.set_aspect("equal") + + if not logs: + im = ax.imshow( + i0, origin="lower", cmap=cmap, interpolation="nearest", vmin=vmin, vmax=vmax + ) # vmin=0,vmax=1, + else: + im = ax.imshow(i0, origin="lower", cmap=cmap, interpolation="nearest", norm=LogNorm(vmin, vmax)) + # ttl = ax.text(.75, .2, '', transform = ax.transAxes, va='center', color='white', fontsize=18) + # fig.set_size_inches( [5., 5 * asp] ) + # plt.tight_layout() + fname = outDir + "uid_%s-frame-%s.png" % (uid, num_frame) + num_frame += 1 + plt.savefig(fname, dpi=None) + + +def movie_maker( + imgs, + num_frames=None, + ROI=None, + interval=20, + fps=15, + real_interval=1.0, + movie_name="movie.mp4", + outDir=None, + movie_writer="ffmpeg", + logs=True, + show_text_on_image=False, + vmin=None, + vmax=None, + cmap="viridis", + dpi=100, +): + import matplotlib.animation as animation + import matplotlib.pyplot as plt + import numpy as np + from matplotlib.colors import LogNorm + + """ + Make a movie by give a image series + + Parameters + ---------- + imgs : array + image data array for the movie + dimensions are: [num_img][num_rows][num_cols] + ROI: e.g. xs,xe,ys,ye = vert #x_start, x_end, y_start,y_end + + num_frames : int + number of frames in the array + + interval : int, optional + delay between frames + + movie_name : str, optional + name of the movie to save + + movie_writer : str, optional + movie writer + + fps : int, optional + Frame rate for movie. + + real_interval: + the real time interval between each frame in unit of ms + outDir: the output path + vmin/vmax: for image contrast + cmap: the color for plot + dpi: resolution + + Returns + ------- + #ani : + # movie + + """ + + fig = plt.figure() + ax = fig.add_subplot(111) + + ax.get_xaxis().set_visible(False) + ax.get_yaxis().set_visible(False) + + if ROI is None: + i0 = imgs[0] + asp = 1.0 + + else: + i0 = select_regoin( + imgs[0], + ROI, + keep_shape=False, + ) + xs, xe, ys, ye = ROI + asp = (ye - ys) / float(xe - xs) + + ax.set_aspect("equal") + # print( cmap, vmin, vmax ) + + if not logs: + im = ax.imshow(i0, origin="lower", cmap=cmap, interpolation="nearest", vmin=vmin, vmax=vmax) + else: + im = ax.imshow(i0, origin="lower", cmap=cmap, interpolation="nearest", norm=LogNorm(vmin, vmax)) + + # ttl = ax.text(.75, .2, '', transform = ax.transAxes, va='center', color='white', fontsize=18) + ttl = ax.text(0.75, 0.2, "", transform=ax.transAxes, va="center", color="black", fontsize=18) + # print asp + # fig.set_size_inches( [5., 5 * asp] ) + + plt.tight_layout() + + if num_frames is None: + num_frames = len(imgs) + + def update_img(n): + if ROI is None: + ign = imgs[n] + else: + ign = select_regoin( + imgs[n], + ROI, + keep_shape=False, + ) + im.set_data(ign) + if show_text_on_image: + if real_interval >= 10: + ttl.set_text("%s s" % (n * real_interval / 1000.0)) + elif real_interval < 10: + ttl.set_text("%s ms" % (n * real_interval)) + # im.set_text(n) + # print (n) + + ani = animation.FuncAnimation(fig, update_img, num_frames, interval=interval) + writer = animation.writers[movie_writer](fps=fps) + + if outDir is not None: + movie_name = outDir + movie_name + ani.save(movie_name, writer=writer, dpi=dpi) + # return ani diff --git a/pyCHX/backups/pyCHX-backup/xpcs_timepixel.py b/pyCHX/backups/pyCHX-backup/xpcs_timepixel.py new file mode 100644 index 0000000..85080c5 --- /dev/null +++ b/pyCHX/backups/pyCHX-backup/xpcs_timepixel.py @@ -0,0 +1,907 @@ +import os +import pickle as pkl +import struct +import sys + +# from Init_for_Timepix import * # the setup file +import time + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pds +from numpy import ( + apply_over_axes, + arange, + arctan, + around, + array, + digitize, + dot, + exp, + histogram, + histogramdd, + hstack, + hypot, + indices, + int_, + intersect1d, + linspace, + load, + log, + log10, + ma, + mean, + mgrid, + ones, + pi, + poly1d, + polyfit, + power, + ravel, + reshape, + round, + save, + shape, + sin, + sqrt, + std, + sum, + unique, + vstack, + where, + zeros, + zeros_like, +) +from numpy.linalg import lstsq +from tqdm import tqdm + +from pyCHX.chx_compress import Multifile, go_through_FD, pass_FD +from pyCHX.chx_libs import multi_tau_lags + + +def get_timepixel_data(data_dir, filename, time_unit=1): + """give a csv file of a timepixel data, return x,y,t + x, pos_x in pixel + y, pos_y in pixel + t, arrival time + time_unit, t*time_unit will convert to second, in reality, this value is 6.1e-12 + return x,y,t (in second, starting from zero) + + """ + data = pds.read_csv(data_dir + filename) + #'#Col', ' #Row', ' #ToA', + # return np.array( data['Col'] ), np.array(data['Row']), np.array(data['GlobalTimeFine']) #*6.1 #in ps + if time_unit != 1: + try: + x, y, t = np.array(data["#Col"]), np.array(data["#Row"]), np.array(data["#ToA"]) * time_unit + except: + x, y, t = np.array(data["#Col"]), np.array(data[" #Row"]), np.array(data[" #ToA"]) * time_unit + else: + try: + x, y, t = np.array(data["#Col"]), np.array(data["#Row"]), np.array(data["#ToA"]) + except: + x, y, t = np.array(data["#Col"]), np.array(data[" #Row"]), np.array(data[" #ToA"]) + return x, y, t - t.min() # * 25/4096. #in ns + + +def get_pvlist_from_post(p, t, binstep=100, detx=256, dety=256): + """YG.DEV@CHX Nov, 2017 to get a pos, val list of phonton hitting detector by giving + p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + p: array, int64, coordinate-x * det_x + coordinate-y + t: list, int64, photon hit time + binstep: int, binstep (in t unit) period + detx,dety: int/int, the detector size in x and y + Output: + positions: int array, (x*detx +y) + vals: int array, counts of that positions + counts: int array, counts of that positions in each binstep + """ + v = (t - t[0]) // binstep + L = np.max(v) + 1 + arr = np.ravel_multi_index([p, v], [detx * dety, L]) + uval, ind, count = np.unique(arr, return_counts=True, return_index=True) + ind2 = np.lexsort((p[ind], v[ind])) + ps = (p[ind])[ind2] + vs = count[ind2] + cs = np.bincount(v[ind]) + return ps, vs, cs + + +def histogram_pt(p, t, binstep=100, detx=256, dety=256): + """YG.DEV@CHX Nov, 2017 to get a histogram of phonton counts by giving + p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + p: coordinate-x * det_x + coordinate-y + t: photon hit time + bin t in binstep (in t unit) period + detx,dety: the detector size in x and y + Output: + the hitorgram of photons with bins as binstep (in time unit) + """ + L = np.max((t - t[0]) // binstep) + 1 + # print(L,x,y, (t-t[0])//binstep) + arr = np.ravel_multi_index([p, (t - t[0]) // binstep], [detx * dety, L]) + M, N = arr.max(), arr.min() + da = np.zeros([detx * dety, L]) + da.flat[np.arange(N, M)] = np.bincount(arr - N) + return da + + +def histogram_xyt(x, y, t, binstep=100, detx=256, dety=256): + """YG.DEV@CHX Mar, 2017 to get a histogram of phonton counts by giving + x (photon hit pos_x), y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + x: coordinate-x + y: coordinate-y + t: photon hit time + bin t in binstep (in t unit) period + detx,dety: the detector size in x and y + Output: + the hitorgram of photons with bins as binstep (in time unit) + + + """ + L = np.max((t - t[0]) // binstep) + 1 + # print(L,x,y, (t-t[0])//binstep) + arr = np.ravel_multi_index([x, y, (t - t[0]) // binstep], [detx, dety, L]) + M, N = arr.max(), arr.min() + da = np.zeros([detx, dety, L]) + da.flat[np.arange(N, M)] = np.bincount(arr - N) + return da + + +def get_FD_end_num(FD, maxend=1e10): + N = maxend + for i in range(0, int(maxend)): + try: + FD.seekimg(i) + except: + N = i + break + FD.seekimg(0) + return N + + +def compress_timepix_data( + pos, t, tbins, filename=None, md=None, force_compress=False, nobytes=2, with_pickle=True +): + """YG.Dev@CHX Nov 20, 2017 + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * y + x + t: arrival time in sec + filename: the output filename + md: a dict to describle the data info + force_compress: if False, + if already compressed, just it + else: compress + if True, compress and, if exist, overwrite the already-coompress data + Return: + avg_img, imgsum, N (frame number) + + """ + if filename is None: + filename = "/XF11ID/analysis/Compressed_Data" + "/timpix_uid_%s.cmp" % md["uid"] + + if force_compress: + print("Create a new compress file with filename as :%s." % filename) + return init_compress_timepix_data( + pos, t, tbins, filename=filename, md=md, nobytes=nobytes, with_pickle=with_pickle + ) + else: + if not os.path.exists(filename): + print("Create a new compress file with filename as :%s." % filename) + return init_compress_timepix_data( + pos, t, tbins, filename=filename, md=md, nobytes=nobytes, with_pickle=with_pickle + ) + else: + print("Using already created compressed file with filename as :%s." % filename) + return pkl.load(open(filename + ".pkl", "rb")) + + # FD = Multifile(filename, 0, int(1e25) ) + # return get_FD_end_num(FD) + + +def create_timepix_compress_header(md, filename, nobytes=2, bins=1): + """ + Create the head for a compressed eiger data, this function is for parallel compress + """ + fp = open(filename, "wb") + # Make Header 1024 bytes + # md = images.md + if bins != 1: + nobytes = 8 + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMPtpx1", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["sy"], + md["sx"], + 0, + 256, + 0, + 256, + ) + fp.write(Header) + fp.close() + + +def init_compress_timepix_data(pos, t, binstep, filename, mask=None, md=None, nobytes=2, with_pickle=True): + """YG.Dev@CHX Nov 19, 2017 with optimal algorithm by using complex index techniques + + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * x + y #can't be 256*x + y + t: arrival time in sec + binstep: int, binstep (in t unit) period + filename: the output filename + md: a dict to describle the data info + Return: + N (frame number) + + """ + fp = open(filename, "wb") + if md is None: + md = {} + md["beam_center_x"] = 0 + md["beam_center_y"] = 0 + md["count_time"] = 0 + md["detector_distance"] = 0 + md["frame_time"] = 0 + md["incident_wavelength"] = 0 + md["x_pixel_size"] = 45 + md["y_pixel_size"] = 45 + # nobytes = 2 + md["sx"] = 256 + md["sy"] = 256 + + # TODList: for different detector using different md structure, March 2, 2017, + + # 8d include, + #'bytes', 'nrows', 'ncols', (detsize) + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMPtpx1", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["sy"], + md["sx"], + 0, + 256, + 0, + 256, + ) + fp.write(Header) + + N_ = np.int(np.ceil((t.max() - t.min()) / binstep)) + print("There are %s frames to be compressed..." % (N_ - 1)) + + ps, vs, cs = get_pvlist_from_post(pos, t, binstep, detx=md["sx"], dety=md["sy"]) + N = len(cs) - 1 # the last one might don't have full number for bings, so kick off + css = np.cumsum(cs) + imgsum = np.zeros(N) + good_count = 0 + avg_img = np.zeros( + [md["sy"], md["sx"]], dtype=np.float64 + ) # changed depreciated np.float to np.float64 LW @06/11/2023 + + for i in tqdm(range(0, N)): + if i == 0: + ind1 = 0 + ind2 = css[i] + else: + ind1 = css[i - 1] + ind2 = css[i] + # print( ind1, ind2 ) + good_count += 1 + psi = ps[ind1:ind2] + vsi = vs[ind1:ind2] + dlen = cs[i] + imgsum[i] = vsi.sum() + np.ravel(avg_img)[psi] += vsi + # print(vs.sum()) + fp.write(struct.pack("@I", dlen)) + fp.write(struct.pack("@{}i".format(dlen), *psi)) + fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *vsi)) + fp.close() + avg_img /= good_count + # return N -1 + if with_pickle: + pkl.dump([avg_img, imgsum, N], open(filename + ".pkl", "wb")) + return avg_img, imgsum, N + + +def init_compress_timepix_data_light_duty( + pos, t, binstep, filename, mask=None, md=None, nobytes=2, with_pickle=True +): + """YG.Dev@CHX Nov 19, 2017 + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * x + y #can't be 256*x + y + t: arrival time in sec + filename: the output filename + md: a dict to describle the data info + Return: + N (frame number) + + """ + fp = open(filename, "wb") + if md is None: + md = {} + md["beam_center_x"] = 0 + md["beam_center_y"] = 0 + md["count_time"] = 0 + md["detector_distance"] = 0 + md["frame_time"] = 0 + md["incident_wavelength"] = 0 + md["x_pixel_size"] = 45 + md["y_pixel_size"] = 45 + # nobytes = 2 + md["sx"] = 256 + md["sy"] = 256 + + # TODList: for different detector using different md structure, March 2, 2017, + + # 8d include, + #'bytes', 'nrows', 'ncols', (detsize) + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMPtpx1", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["sy"], + md["sx"], + 0, + 256, + 0, + 256, + ) + fp.write(Header) + + tx = np.arange(t.min(), t.max(), binstep) + N = len(tx) + imgsum = np.zeros(N - 1) + print("There are %s frames to be compressed..." % (N - 1)) + good_count = 0 + avg_img = np.zeros( + [md["sy"], md["sx"]], dtype=np.float64 + ) # changed depreciated np.float to np.float64 LW @06/11/2023 + for i in tqdm(range(N - 1)): + ind1 = np.argmin(np.abs(tx[i] - t)) + ind2 = np.argmin(np.abs(tx[i + 1] - t)) + # print( 'N=%d:'%i, ind1, ind2 ) + p_i = pos[ind1:ind2] + ps, vs = np.unique(p_i, return_counts=True) + np.ravel(avg_img)[ps] += vs + good_count += 1 + dlen = len(ps) + imgsum[i] = vs.sum() + # print(vs.sum()) + fp.write(struct.pack("@I", dlen)) + fp.write(struct.pack("@{}i".format(dlen), *ps)) + fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *vs)) + fp.close() + avg_img /= good_count + # return N -1 + if with_pickle: + pkl.dump([avg_img, imgsum, N - 1], open(filename + ".pkl", "wb")) + return avg_img, imgsum, N - 1 + + +def compress_timepix_data_old(data_pixel, filename, rois=None, md=None, nobytes=2): + """ + Compress the timepixeldata + md: a dict to describle the data info + rois: [y1,y2, x1, x2] + + """ + fp = open(filename, "wb") + if md is None: + md = {} + md["beam_center_x"] = 0 + md["beam_center_y"] = 0 + md["count_time"] = 0 + md["detector_distance"] = 0 + md["frame_time"] = 0 + md["incident_wavelength"] = 0 + md["x_pixel_size"] = 25 + md["y_pixel_size"] = 25 + # nobytes = 2 + md["sx"] = 256 + md["sy"] = 256 + md["roi_rb"] = 0 + md["roi_re"] = md["sy"] + md["roi_cb"] = 0 + md["roi_ce"] = md["sx"] + if rois is not None: + md["roi_rb"] = rois[2] + md["roi_re"] = rois[3] + md["roi_cb"] = rois[1] + md["roi_ce"] = rois[0] + + md["sy"] = md["roi_cb"] - md["roi_ce"] + md["sx"] = md["roi_re"] - md["roi_rb"] + + # TODList: for different detector using different md structure, March 2, 2017, + + # 8d include, + #'bytes', 'nrows', 'ncols', (detsize) + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack( + "@16s8d7I916x", + b"Version-COMPtpx1", + md["beam_center_x"], + md["beam_center_y"], + md["count_time"], + md["detector_distance"], + md["frame_time"], + md["incident_wavelength"], + md["x_pixel_size"], + md["y_pixel_size"], + nobytes, + md["sy"], + md["sx"], + md["roi_rb"], + md["roi_re"], + md["roi_cb"], + md["roi_ce"], + ) + + fp.write(Header) + fp.write(data_pixel) + + +class Get_TimePixel_Arrayc(object): + """ + a class to get intested pixels from a images sequence, + load ROI of all images into memory + get_data: to get a 2-D array, shape as (len(images), len(pixellist)) + + One example: + data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() + """ + + def __init__( + self, pos, hitime, tbins, pixelist, beg=None, end=None, norm=None, flat_correction=None, detx=256, dety=256 + ): + """ + indexable: a images sequences + pixelist: 1-D array, interest pixel list + #flat_correction, normalized by flatfield + #norm, normalized by total intensity, like a incident beam intensity + """ + self.hitime = hitime + self.tbins = tbins + self.tx = np.arange(self.hitime.min(), self.hitime.max(), self.tbins) + N = len(self.tx) + if beg is None: + beg = 0 + if end is None: + end = N + + self.beg = beg + self.end = end + self.length = self.end - self.beg + self.pos = pos + self.pixelist = pixelist + self.norm = norm + self.flat_correction = flat_correction + self.detx = detx + self.dety = dety + + def get_data(self): + """ + To get intested pixels array + Return: 2-D array, shape as (len(images), len(pixellist)) + """ + norm = self.norm + data_array = np.zeros([self.length - 1, len(self.pixelist)]) + print(data_array.shape) + + # fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros(self.detx * self.dety, dtype=np.int32) + timg[self.pixelist] = np.arange(1, len(self.pixelist) + 1) + n = 0 + tx = self.tx + N = len(self.tx) + print("The Produced Array Length is %d." % (N - 1)) + flat_correction = self.flat_correction + # imgsum = np.zeros( N ) + for i in tqdm(range(N - 1)): + ind1 = np.argmin(np.abs(tx[i] - self.hitime)) + ind2 = np.argmin(np.abs(tx[i + 1] - self.hitime)) + # print( 'N=%d:'%i, ind1, ind2 ) + p_i = self.pos[ind1:ind2] + pos, val = np.unique(p_i, return_counts=True) + # print( val.sum() ) + w = np.where(timg[pos])[0] + pxlist = timg[pos[w]] - 1 + # print( val[w].sum() ) + # fra_pix[ pxlist] = v[w] + if flat_correction is not None: + # normalized by flatfield + data_array[n][pxlist] = val[w] + else: + data_array[n][pxlist] = val[w] / flat_correction[pxlist] # -1.0 + if norm is not None: + # normalized by total intensity, like a incident beam intensity + data_array[n][pxlist] /= norm[i] + n += 1 + return data_array + + +def apply_timepix_mask(x, y, t, roi): + y1, y2, x1, x2 = roi + w = (x < x2) & (x >= x1) & (y < y2) & (y >= y1) + return x[w], y[w], t[w] + + +def get_timepixel_data_from_series(data_dir, filename_prefix, total_filenum=72, colms=int(1e5)): + x = np.zeros(total_filenum * colms) + y = np.zeros(total_filenum * colms) + t = zeros(total_filenum * colms) + for n in range(total_filenum): + filename = filename_prefix + "_%s.csv" % n + data = get_timepixel_data(data_dir, filename) + if n != total_filenum - 1: + (x[n * colms : (n + 1) * colms], y[n * colms : (n + 1) * colms], t[n * colms : (n + 1) * colms]) = ( + data[0], + data[1], + data[2], + ) + else: + # print( filename_prefix + '_%s.csv'%n ) + ln = len(data[0]) + # print( ln ) + (x[n * colms : n * colms + ln], y[n * colms : n * colms + ln], t[n * colms : n * colms + ln]) = ( + data[0], + data[1], + data[2], + ) + + return x[: n * colms + ln], y[: n * colms + ln], t[: n * colms + ln] + + +def get_timepixel_avg_image(x, y, t, det_shape=[256, 256], delta_time=None): + """YG.Dev@CHX, 2016 + give x,y, t data to get image in a period of delta_time (in second) + x, pos_x in pixel + y, pos_y in pixel + t, arrival time + + + """ + t0 = t.min() + tm = t.max() + + if delta_time is not None: + delta_time *= 1e12 + if delta_time > tm: + delta_time = tm + else: + delta_time = t.max() + # print( delta_time) + t_ = t[t < delta_time] + x_ = x[: len(t_)] + y_ = y[: len(t_)] + + img = np.zeros(det_shape, dtype=np.int32) + pixlist = x_ * det_shape[0] + y_ + his = np.histogram(pixlist, bins=np.arange(det_shape[0] * det_shape[1] + 1))[0] + np.ravel(img)[:] = his + print("The max photon count is %d." % img.max()) + return img + + +def get_his_taus(t, bin_step): + """Get taus and histrogram of photons + Parameters: + t: the time stamp of photon hitting the detector + bin_step: bin time step, in unit of ms + Return: + taus, in ms + histogram of photons + + """ + + bins = np.arange(t.min(), t.max(), bin_step) # 1e6 for us + # print( bins ) + td = np.histogram(t, bins=bins)[0] # do histogram + taus = bins - bins[0] + return taus[1:], td + + +def get_multi_tau_lags(oned_count, num_bufs=8): + n = len(oned_count) + num_levels = int(np.log(n / (num_bufs - 1)) / np.log(2) + 1) + 1 + tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) + return lag_steps[lag_steps < n] + + +def get_timepixel_multi_tau_g2(oned_count, num_bufs=8): + n = len(oned_count) + lag_steps = get_multi_tau_lags(oned_count, num_bufs) + g2 = np.zeros(len(lag_steps)) + + for tau_ind, tau in enumerate(lag_steps): + IP = oned_count[: n - tau] + IF = oned_count[tau:n] + # print( IP.shape,IF.shape, tau, n ) + g2[tau_ind] = np.dot(IP, IF) / (IP.mean() * IF.mean() * float(n - tau)) + return g2 + + +def get_timepixel_c12(oned_count): + noframes = len(oned_count) + oned_count = oned_count.reshape(noframes, 1) + return np.dot(oned_count, oned_count.T) / oned_count / oned_count.T / noframes + + +def get_timepixel_g2(oned_count): + n = len(oned_count) + norm = ( + np.arange(n, 0, -1) + * np.array([np.average(oned_count[i:]) for i in range(n)]) + * np.array([np.average(oned_count[0 : n - i]) for i in range(n)]) + ) + return np.correlate(oned_count, oned_count, mode="full")[-n:] / norm + + +######################################### +T = True +F = False + + +def read_xyt_frame(n=1): + """Load the xyt txt files: + x,y is the detector (x,y) coordinates + t is the time-encoder (when hitting the detector at that (x,y)) + DATA_DIR is the data filefold path + DataPref is the data prefix + n is file number + the data name will be like: DATA_DIR/DataPref_0001.txt + return the histogram of the hitting event + """ + import numpy as np + + ni = "%04d" % n + fp = DATA_DIR + DataPref + "%s.txt" % ni + data = np.genfromtxt(fp, skiprows=0)[:, 2] # take the time encoder + td = np.histogram(data, bins=np.arange(11810))[0] # do histogram + return td + + +def readframe_series(n=1): + """Using this universe name for all the loading fucntions""" + return read_xyt_frame(n) + + +class xpcs(object): + def __init__(self): + """DOCUMENT __init__( ) + the initilization of the XPCS class + """ + self.version = "version_0" + self.create_time = "July_14_2015" + self.author = "Yugang_Zhang@chx11id_nsls2_BNL" + + def delays(self, time=1, nolevs=None, nobufs=None, tmaxs=None): + """DOCUMENT delays(time=) + Using the lev,buf concept, to generate array of time delays + return array of delays. + KEYWORD: time: scale delays by time ( should be time between frames) + nolevs: lev (a integer number) + nobufs: buf (a integer number) + tmax: the max time in the calculation, usually, the noframes + + """ + + if nolevs is None: + nolevs = nolev # defined by the set-up file + if nobufs is None: + nobufs = nobuf # defined by the set-up file + if tmaxs is None: + tmaxs = tmax # defined by the set-up file + if nobufs % 2 != 0: + print("nobuf must be even!!!") + dly = zeros((nolevs + 1) * nobufs / 2 + 1) + dict_dly = {} + for i in range(1, nolevs + 1): + if i == 1: + imin = 1 + else: + imin = nobufs / 2 + 1 + ptr = (i - 1) * nobufs / 2 + arange(imin, nobufs + 1) + dly[ptr] = arange(imin, nobufs + 1) * 2 ** (i - 1) + dict_dly[i] = dly[ptr - 1] + dly *= time + dly = dly[:-1] + dly_ = dly[: where(dly < tmaxs)[0][-1] + 1] + self.dly = dly + self.dly_ = dly_ + self.dict_dly = dict_dly + return dly + + def make_qlist(self): + """DOCUMENT make_qlist( ) + Giving the noqs, qstart,qend,qwidth, defined by the set-up file + return qradi: a list of q values, [qstart, ...,qend] with length as noqs + qlist: a list of q centered at qradi with qwidth. + KEYWORD: noqs, qstart,qend,qwidth::defined by the set-up file + """ + qradi = linspace(qstart, qend, noqs) + qlist = zeros(2 * noqs) + qlist[::2] = round(qradi - qwidth / 2) # render even value + qlist[1::2] = round(qradi + (1 + qwidth) / 2) # render odd value + qlist[::2] = int_(qradi - qwidth / 2) # render even value + qlist[1::2] = int_(qradi + (1 + qwidth) / 2) # render odd value + if qlist_ != None: + qlist = qlist_ + return qlist, qradi + + def calqlist(self, qmask=None, shape="circle"): + """DOCUMENT calqlist( qmask=,shape=, ) + calculate the equvilent pixel with a shape, + return + qind: the index of q + pixellist: the list of pixle + nopr: pixel number in each q + nopixels: total pixel number + KEYWORD: + qmask, a mask file; + qlist,qradi is calculated by make_qlist() + shape='circle', give a circle shaped qlist + shape='column', give a column shaped qlist + shape='row', give a row shaped qlist + """ + + qlist, qradi = self.make_qlist() + y, x = indices([dimy, dimx]) + if shape == "circle": + y_ = y - ceny + 1 + x_ = x - cenx + 1 + r = int_(hypot(x_, y_) + 0.5) + elif shape == "column": + r = x + elif shape == "row": + r = y + else: + pass + r = r.flatten() + noqrs = len(qlist) + qind = digitize(r, qlist) + if qmask is None: + w_ = where((qind) % 2) # qind should be odd;print 'Yes' + w = w_[0] + else: + a = where((qind) % 2)[0] + b = where(mask.flatten() == False)[0] + w = intersect1d(a, b) + nopixels = len(w) + qind = qind[w] / 2 + pixellist = (y * dimx + x).flatten()[w] + nopr, bins = histogram(qind, bins=range(len(qradi) + 1)) + return qind, pixellist, nopr, nopixels + + ########################################################################### + ########for one_time correlation function for xyt frames + ################################################################## + + def autocor_xytframe(self, n): + """Do correlation for one xyt frame--with data name as n""" + dly_ = xp.dly_ + # cal=0 + gg2 = zeros(len(dly_)) + data = read_xyt_frame(n) # load data + datm = len(data) + for tau_ind, tau in enumerate(dly_): + IP = data[: datm - tau] + IF = data[tau:datm] + gg2[tau_ind] = dot(IP, IF) / (IP.mean() * IF.mean() * float(datm - tau)) + + return gg2 + + def autocor(self, noframes=10): + """Do correlation for xyt file, + noframes is the frame number to be correlated + """ + start_time = time.time() + for n in range(1, noframes + 1): # the main loop for correlator + gg2 = self.autocor_xytframe(n) + if n == 1: + g2 = zeros_like(gg2) + g2 += (gg2 - g2) / float(n) # average g2 + # print n + if noframes > 10: # print progress... + if n % (noframes / 10) == 0: + sys.stdout.write("#") + sys.stdout.flush() + elapsed_time = time.time() - start_time + print("Total time: %.2f min" % (elapsed_time / 60.0)) + return g2 + + def plot(self, y, x=None): + """a simple plot""" + if x is None: + x = arange(len(y)) + plt.plot(x, y, "ro", ls="-") + plt.show() + + def g2_to_pds(self, dly, g2, tscale=None): + """convert g2 to a pandas frame""" + if len(g2.shape) == 1: + g2 = g2.reshape([len(g2), 1]) + tn, qn = g2.shape + tindex = xrange(tn) + qcolumns = ["t"] + ["g2"] + if tscale is None: + tscale = 1.0 + g2t = hstack([dly[:tn].reshape(tn, 1) * tscale, g2]) + g2p = pd.DataFrame(data=g2t, index=tindex, columns=qcolumns) + return g2p + + def show(self, g2p, title): + t = g2p.t + N = len(g2p) + ylim = [g2p.g2.min(), g2p[1:N].g2.max()] + g2p.plot(x=t, y="g2", marker="o", ls="--", logx=T, ylim=ylim) + plt.xlabel("time delay, ns", fontsize=12) + plt.title(title) + plt.savefig(RES_DIR + title + ".png") + plt.show() + + +###################################################### + +if False: + xp = xpcs() + # use the xpcs class + dly = xp.delays() + if T: + fnum = 100 + g2 = xp.autocor(fnum) + filename = "g2_-%s-" % (fnum) + save(RES_DIR + FOUT + filename, g2) + ##g2= load(RES_DIR + FOUT + filename +'.npy') + g2p = xp.g2_to_pds(dly, g2, tscale=20) + xp.show(g2p, "g2_run_%s" % fnum) diff --git a/pyCHX/backups/pychx-repo-obsolete b/pyCHX/backups/pychx-repo-obsolete new file mode 160000 index 0000000..1a4d3f5 --- /dev/null +++ b/pyCHX/backups/pychx-repo-obsolete @@ -0,0 +1 @@ +Subproject commit 1a4d3f5940e141d037904e511730b4fd7f39343f diff --git a/pyCHX/backups/xpcs_timepixel_05012024.py b/pyCHX/backups/xpcs_timepixel_05012024.py new file mode 100644 index 0000000..286141e --- /dev/null +++ b/pyCHX/backups/xpcs_timepixel_05012024.py @@ -0,0 +1,830 @@ +from numpy import pi,sin,arctan,sqrt,mgrid,where,shape,exp,linspace,std,arange +from numpy import power,log,log10,array,zeros,ones,reshape,mean,histogram,round,int_ +from numpy import indices,hypot,digitize,ma,histogramdd,apply_over_axes,sum +from numpy import around,intersect1d, ravel, unique,hstack,vstack,zeros_like +from numpy import save, load, dot +from numpy.linalg import lstsq +from numpy import polyfit,poly1d; +import sys,os +import pickle as pkl + +import matplotlib.pyplot as plt +#from Init_for_Timepix import * # the setup file +import time + +import struct +import numpy as np +from tqdm import tqdm +import pandas as pds +from pyCHX.chx_libs import multi_tau_lags +from pyCHX.chx_compress import Multifile, go_through_FD, pass_FD + + + + +def get_timepixel_data( data_dir, filename, time_unit= 1 ): + '''give a csv file of a timepixel data, return x,y,t + x, pos_x in pixel + y, pos_y in pixel + t, arrival time + time_unit, t*time_unit will convert to second, in reality, this value is 6.1e-12 + return x,y,t (in second, starting from zero) + + ''' + data = pds.read_csv( data_dir + filename ) + #'#Col', ' #Row', ' #ToA', + #return np.array( data['Col'] ), np.array(data['Row']), np.array(data['GlobalTimeFine']) #*6.1 #in ps + if time_unit !=1: + try: + x,y,t=np.array( data['#Col'] ), np.array(data['#Row']), np.array(data['#ToA'] ) * time_unit + except: + x,y,t=np.array( data['#Col'] ), np.array(data[' #Row']), np.array(data[' #ToA'] ) * time_unit + else: + try: + x,y,t=np.array( data['#Col'] ), np.array(data['#Row']), np.array(data['#ToA'] ) + except: + x,y,t=np.array( data['#Col'] ), np.array(data[' #Row']), np.array(data[' #ToA'] ) + return x,y, t-t.min() #* 25/4096. #in ns + + +def get_pvlist_from_post( p, t, binstep=100, detx=256, dety=256 ): + '''YG.DEV@CHX Nov, 2017 to get a pos, val list of phonton hitting detector by giving + p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + p: array, int64, coordinate-x * det_x + coordinate-y + t: list, int64, photon hit time + binstep: int, binstep (in t unit) period + detx,dety: int/int, the detector size in x and y + Output: + positions: int array, (x*detx +y) + vals: int array, counts of that positions + counts: int array, counts of that positions in each binstep + ''' + v = ( t - t[0])//binstep + L= np.max( v ) + 1 + arr = np.ravel_multi_index( [ p, v ], [detx * dety,L ] ) + uval, ind, count = np.unique( arr, return_counts=True, return_index=True) + ind2 = np.lexsort( ( p[ind], v[ind] ) ) + ps = (p[ind])[ind2] + vs = count[ind2] + cs = np.bincount(v[ind]) + return ps,vs,cs + + + +def histogram_pt( p, t, binstep=100, detx=256, dety=256 ): + '''YG.DEV@CHX Nov, 2017 to get a histogram of phonton counts by giving + p (photon hit pos_x * detx + y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + p: coordinate-x * det_x + coordinate-y + t: photon hit time + bin t in binstep (in t unit) period + detx,dety: the detector size in x and y + Output: + the hitorgram of photons with bins as binstep (in time unit) + ''' + L= np.max( (t-t[0])//binstep ) + 1 + #print(L,x,y, (t-t[0])//binstep) + arr = np.ravel_multi_index( [ p, (t-t[0])//binstep ], [detx * dety,L ] ) + M,N = arr.max(),arr.min() + da = np.zeros( [detx * dety, L ] ) + da.flat[np.arange(N, M ) ] = np.bincount( arr- N ) + return da + +def histogram_xyt( x, y, t, binstep=100, detx=256, dety=256 ): + '''YG.DEV@CHX Mar, 2017 to get a histogram of phonton counts by giving + x (photon hit pos_x), y (photon hit pos_y), t (photon hit time), and the time bin + The most important function for timepix + Input: + x: coordinate-x + y: coordinate-y + t: photon hit time + bin t in binstep (in t unit) period + detx,dety: the detector size in x and y + Output: + the hitorgram of photons with bins as binstep (in time unit) + + + ''' + L= np.max( (t-t[0])//binstep ) + 1 + #print(L,x,y, (t-t[0])//binstep) + arr = np.ravel_multi_index( [x, y, (t-t[0])//binstep ], [detx, dety,L ] ) + M,N = arr.max(),arr.min() + da = np.zeros( [detx, dety, L ] ) + da.flat[np.arange(N, M ) ] = np.bincount( arr- N ) + return da + + + +def get_FD_end_num(FD, maxend=1e10): + N = maxend + for i in range(0,int(maxend)): + try: + FD.seekimg(i) + except: + N = i + break + FD.seekimg(0) + return N + +def compress_timepix_data( pos, t, tbins, filename=None, md=None, force_compress=False, nobytes=2, + with_pickle=True ): + + ''' YG.Dev@CHX Nov 20, 2017 + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * y + x + t: arrival time in sec + filename: the output filename + md: a dict to describle the data info + force_compress: if False, + if already compressed, just it + else: compress + if True, compress and, if exist, overwrite the already-coompress data + Return: + avg_img, imgsum, N (frame number) + + ''' + if filename is None: + filename= '/XF11ID/analysis/Compressed_Data' +'/timpix_uid_%s.cmp'%md['uid'] + + if force_compress: + print ("Create a new compress file with filename as :%s."%filename) + return init_compress_timepix_data( pos, t, tbins, filename=filename, md=md, nobytes= nobytes, + with_pickle=with_pickle ) + else: + if not os.path.exists( filename ): + print ("Create a new compress file with filename as :%s."%filename) + return init_compress_timepix_data( pos, t, tbins, filename=filename, md=md, nobytes= nobytes, + with_pickle=with_pickle ) + else: + print ("Using already created compressed file with filename as :%s."%filename) + return pkl.load( open(filename + '.pkl', 'rb' ) ) + + #FD = Multifile(filename, 0, int(1e25) ) + #return get_FD_end_num(FD) + + + + + +def create_timepix_compress_header( md, filename, nobytes=2, bins=1 ): + ''' + Create the head for a compressed eiger data, this function is for parallel compress + ''' + fp = open( filename,'wb' ) + #Make Header 1024 bytes + #md = images.md + if bins!=1: + nobytes=8 + Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + + nobytes, md['sy'], md['sx'], + 0,256, + 0,256 + ) + fp.write( Header) + fp.close() + + +def init_compress_timepix_data( pos, t, binstep, filename, mask=None, + md = None, nobytes=2,with_pickle=True ): + ''' YG.Dev@CHX Nov 19, 2017 with optimal algorithm by using complex index techniques + + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * x + y #can't be 256*x + y + t: arrival time in sec + binstep: int, binstep (in t unit) period + filename: the output filename + md: a dict to describle the data info + Return: + N (frame number) + + ''' + fp = open( filename,'wb' ) + if md is None: + md={} + md['beam_center_x'] = 0 + md['beam_center_y'] = 0 + md['count_time'] = 0 + md['detector_distance'] = 0 + md['frame_time'] = 0 + md['incident_wavelength'] =0 + md['x_pixel_size'] = 45 + md['y_pixel_size'] = 45 + #nobytes = 2 + md['sx'] = 256 + md['sy'] = 256 + + + #TODList: for different detector using different md structure, March 2, 2017, + + #8d include, + #'bytes', 'nrows', 'ncols', (detsize) + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + + nobytes, md['sy'], md['sx'], + 0,256, + 0,256 + ) + fp.write( Header) + + N_ = np.int( np.ceil( (t.max() -t.min()) / binstep ) ) + print('There are %s frames to be compressed...'%(N_-1)) + + ps,vs,cs = get_pvlist_from_post( pos, t, binstep, detx= md['sx'], dety= md['sy'] ) + N = len(cs) - 1 #the last one might don't have full number for bings, so kick off + css = np.cumsum(cs) + imgsum = np.zeros( N ) + good_count = 0 + avg_img = np.zeros( [ md['sy'], md['sx'] ], dtype= np.float64 ) # changed depreciated np.float to np.float64 LW @06/11/2023 + + for i in tqdm( range(0,N) ): + if i ==0: + ind1 = 0 + ind2 = css[i] + else: + ind1 = css[i-1] + ind2 = css[i] + #print( ind1, ind2 ) + good_count +=1 + psi = ps[ ind1:ind2 ] + vsi = vs[ ind1:ind2 ] + dlen = cs[i] + imgsum[i] = vsi.sum() + np.ravel(avg_img )[psi] += vsi + #print(vs.sum()) + fp.write( struct.pack( '@I', dlen )) + fp.write( struct.pack( '@{}i'.format( dlen), *psi)) + fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *vsi)) + fp.close() + avg_img /= good_count + #return N -1 + if with_pickle: + pkl.dump( [ avg_img, imgsum, N ], open(filename + '.pkl', 'wb' ) ) + return avg_img, imgsum, N + + + + + +def init_compress_timepix_data_light_duty( pos, t, binstep, filename, mask=None, + md = None, nobytes=2,with_pickle=True ): + ''' YG.Dev@CHX Nov 19, 2017 + Compress the timepixeldata, in a format of x, y, t + x: pos_x in pixel + y: pos_y in pixel + timepix3 det size 256, 256 + TODOLIST: mask is not working now + Input: + pos: 256 * x + y #can't be 256*x + y + t: arrival time in sec + filename: the output filename + md: a dict to describle the data info + Return: + N (frame number) + + ''' + fp = open( filename,'wb' ) + if md is None: + md={} + md['beam_center_x'] = 0 + md['beam_center_y'] = 0 + md['count_time'] = 0 + md['detector_distance'] = 0 + md['frame_time'] = 0 + md['incident_wavelength'] =0 + md['x_pixel_size'] = 45 + md['y_pixel_size'] = 45 + #nobytes = 2 + md['sx'] = 256 + md['sy'] = 256 + + + #TODList: for different detector using different md structure, March 2, 2017, + + #8d include, + #'bytes', 'nrows', 'ncols', (detsize) + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + + nobytes, md['sy'], md['sx'], + 0,256, + 0,256 + ) + fp.write( Header) + + tx = np.arange( t.min(), t.max(), binstep ) + N = len(tx) + imgsum = np.zeros( N-1 ) + print('There are %s frames to be compressed...'%(N-1)) + good_count = 0 + avg_img = np.zeros( [ md['sy'], md['sx'] ], dtype= np.float64 ) # changed depreciated np.float to np.float64 LW @06/11/2023 + for i in tqdm( range(N-1) ): + ind1 = np.argmin( np.abs( tx[i] - t) ) + ind2 = np.argmin( np.abs( tx[i+1] - t ) ) + #print( 'N=%d:'%i, ind1, ind2 ) + p_i = pos[ind1: ind2] + ps,vs = np.unique( p_i, return_counts= True ) + np.ravel(avg_img )[ps] += vs + good_count +=1 + dlen = len(ps) + imgsum[i] = vs.sum() + #print(vs.sum()) + fp.write( struct.pack( '@I', dlen )) + fp.write( struct.pack( '@{}i'.format( dlen), *ps)) + fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *vs)) + fp.close() + avg_img /= good_count + #return N -1 + if with_pickle: + pkl.dump( [ avg_img, imgsum, N-1 ], open(filename + '.pkl', 'wb' ) ) + return avg_img, imgsum, N-1 + + + + + + +def compress_timepix_data_old( data_pixel, filename, rois=None, + md = None, nobytes=2 ): + ''' + Compress the timepixeldata + md: a dict to describle the data info + rois: [y1,y2, x1, x2] + + ''' + fp = open( filename,'wb' ) + if md is None: + md={} + md['beam_center_x'] = 0 + md['beam_center_y'] = 0 + md['count_time'] = 0 + md['detector_distance'] = 0 + md['frame_time'] = 0 + md['incident_wavelength'] =0 + md['x_pixel_size'] =25 + md['y_pixel_size'] =25 + #nobytes = 2 + md['sx'] = 256 + md['sy'] = 256 + md['roi_rb']= 0 + md['roi_re']= md['sy'] + md['roi_cb']= 0 + md['roi_ce']= md['sx'] + if rois is not None: + md['roi_rb']= rois[2] + md['roi_re']= rois[3] + md['roi_cb']= rois[1] + md['roi_ce']= rois[0] + + md['sy'] = md['roi_cb'] - md['roi_ce'] + md['sx'] = md['roi_re'] - md['roi_rb'] + + #TODList: for different detector using different md structure, March 2, 2017, + + #8d include, + #'bytes', 'nrows', 'ncols', (detsize) + #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + Header = struct.pack('@16s8d7I916x',b'Version-COMPtpx1', + md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'], + md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'], + + nobytes, md['sy'], md['sx'], + md['roi_rb'], md['roi_re'],md['roi_cb'],md['roi_ce'] + ) + + fp.write( Header) + fp.write( data_pixel ) + + + +class Get_TimePixel_Arrayc(object): + ''' + a class to get intested pixels from a images sequence, + load ROI of all images into memory + get_data: to get a 2-D array, shape as (len(images), len(pixellist)) + + One example: + data_pixel = Get_Pixel_Array( imgsr, pixelist).get_data() + ''' + + def __init__(self, pos, hitime, tbins, pixelist, beg=None, end=None, norm=None,flat_correction=None, + detx = 256, dety = 256): + ''' + indexable: a images sequences + pixelist: 1-D array, interest pixel list + #flat_correction, normalized by flatfield + #norm, normalized by total intensity, like a incident beam intensity + ''' + self.hitime = hitime + self.tbins = tbins + self.tx = np.arange( self.hitime.min(), self.hitime.max(), self.tbins ) + N = len(self.tx) + if beg is None: + beg = 0 + if end is None: + end = N + + self.beg = beg + self.end = end + self.length = self.end - self.beg + self.pos = pos + self.pixelist = pixelist + self.norm = norm + self.flat_correction = flat_correction + self.detx = detx + self.dety = dety + + def get_data(self ): + ''' + To get intested pixels array + Return: 2-D array, shape as (len(images), len(pixellist)) + ''' + norm = self.norm + data_array = np.zeros([ self.length-1,len(self.pixelist)]) + print( data_array.shape) + + #fra_pix = np.zeros_like( pixelist, dtype=np.float64) + timg = np.zeros( self.detx * self.dety, dtype=np.int32 ) + timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) + n=0 + tx = self.tx + N = len(self.tx) + print( 'The Produced Array Length is %d.'%(N-1) ) + flat_correction = self.flat_correction + #imgsum = np.zeros( N ) + for i in tqdm( range(N-1) ): + ind1 = np.argmin( np.abs( tx[i] - self.hitime ) ) + ind2 = np.argmin( np.abs( tx[i+1] - self.hitime ) ) + #print( 'N=%d:'%i, ind1, ind2 ) + p_i = self.pos[ind1: ind2] + pos,val = np.unique( p_i, return_counts= True ) + #print( val.sum() ) + w = np.where( timg[pos] )[0] + pxlist = timg[ pos[w] ] -1 + #print( val[w].sum() ) + #fra_pix[ pxlist] = v[w] + if flat_correction is not None: + #normalized by flatfield + data_array[n][ pxlist] = val[w] + else: + data_array[n][ pxlist] = val[w] / flat_correction[pxlist] #-1.0 + if norm is not None: + #normalized by total intensity, like a incident beam intensity + data_array[n][ pxlist] /= norm[i] + n += 1 + return data_array + + + +def apply_timepix_mask( x,y,t, roi ): + y1,y2, x1,x2 = roi + w = (x < x2) & (x >= x1) & (y < y2) & (y >= y1) + return x[w],y[w], t[w] + + + + + + +def get_timepixel_data_from_series( data_dir, filename_prefix, + total_filenum = 72, colms = int(1e5) ): + x = np.zeros( total_filenum * colms ) + y = np.zeros( total_filenum * colms ) + t = zeros( total_filenum * colms ) + for n in range( total_filenum): + filename = filename_prefix + '_%s.csv'%n + data = get_timepixel_data( data_dir, filename ) + if n!=total_filenum-1: + ( x[n*colms: (n+1)*colms ], y[n*colms: (n+1)*colms ], t[n*colms: (n+1)*colms ] )= ( + data[0], data[1], data[2]) + else: + #print( filename_prefix + '_%s.csv'%n ) + ln = len(data[0]) + #print( ln ) + ( x[n*colms: n*colms + ln ], y[n*colms: n*colms + ln ], t[n*colms: n*colms + ln ] )= ( + data[0], data[1], data[2]) + + return x[:n*colms + ln] ,y[:n*colms + ln],t[:n*colms + ln] + + + +def get_timepixel_avg_image( x,y,t, det_shape = [256, 256], delta_time = None ): + '''YG.Dev@CHX, 2016 + give x,y, t data to get image in a period of delta_time (in second) + x, pos_x in pixel + y, pos_y in pixel + t, arrival time + + + ''' + t0 = t.min() + tm = t.max() + + if delta_time is not None: + delta_time *=1e12 + if delta_time > tm: + delta_time = tm + else: + delta_time = t.max() + #print( delta_time) + t_ = t[t10: #print progress... + if n %( noframes / 10) ==0: + sys.stdout.write("#") + sys.stdout.flush() + elapsed_time = time.time() - start_time + print ( 'Total time: %.2f min' %(elapsed_time/60.) ) + return g2 + + + def plot(self, y,x=None): + '''a simple plot''' + if x is None:x=arange( len(y)) + plt.plot(x,y,'ro', ls='-') + plt.show() + + + def g2_to_pds(self, dly, g2, tscale = None): + '''convert g2 to a pandas frame''' + if len(g2.shape)==1:g2=g2.reshape( [len(g2),1] ) + tn, qn = g2.shape + tindex=xrange( tn ) + qcolumns = ['t'] + [ 'g2' ] + if tscale is None:tscale = 1.0 + g2t = hstack( [dly[:tn].reshape(tn,1) * tscale, g2 ]) + g2p = pd.DataFrame(data=g2t, index=tindex,columns=qcolumns) + return g2p + + def show(self,g2p,title): + t = g2p.t + N = len( g2p ) + ylim = [g2p.g2.min(),g2p[1:N].g2.max()] + g2p.plot(x=t,y='g2',marker='o',ls='--',logx=T,ylim=ylim); + plt.xlabel('time delay, ns',fontsize=12) + plt.title(title) + plt.savefig( RES_DIR + title +'.png' ) + plt.show() + + + +###################################################### + +if False: + xp=xpcs(); #use the xpcs class + dly = xp.delays() + if T: + fnum = 100 + g2=xp.autocor( fnum ) + filename='g2_-%s-'%(fnum) + save( RES_DIR + FOUT + filename, g2) + ##g2= load(RES_DIR + FOUT + filename +'.npy') + g2p = xp.g2_to_pds(dly,g2, tscale = 20) + xp.show(g2p,'g2_run_%s'%fnum) diff --git a/pyCHX/chx_compress.py b/pyCHX/chx_compress.py index 16e9881..ab8cdf4 100644 --- a/pyCHX/chx_compress.py +++ b/pyCHX/chx_compress.py @@ -6,7 +6,7 @@ import sys from contextlib import closing from glob import iglob -from multiprocessing import Pool +from multiprocessing import Pool, cpu_count import dill import matplotlib.pyplot as plt @@ -74,7 +74,7 @@ def compress_eigerdata( bins=1, bad_frame_list=None, para_compress=False, - num_sub=100, + num_sub=128, dtypes="uid", reverse=True, rot90=False, @@ -84,7 +84,7 @@ def compress_eigerdata( data_path=None, images_per_file=100, copy_rawdata=True, - new_path="/tmp_data/data/", + new_path="/tmp/", ): """ Init 2016, YG@CHX @@ -111,9 +111,12 @@ def compress_eigerdata( if force_compress: print("Create a new compress file with filename as :%s." % filename) if para_compress: - # stop connection to be before forking... (let it reset again) - db.reg.disconnect() - db.mds.reset_connection() + # stop connection to be before forking... (let it reset again); 11/09/2024 this seems to fail with 'registry doesn't have attribute disconnect... -> try making this optional; this might have been a leftover: if compression happens "natuarally" (not as force_compress=True) this disconnect/reconnect is already missing...we definitely had this error before... + try: + db.reg.disconnect() + db.mds.reset_connection() + except: + pass print("Using a multiprocess to compress the data.") return para_compress_eigerdata( images, @@ -178,6 +181,7 @@ def compress_eigerdata( data_path=data_path, images_per_file=images_per_file, copy_rawdata=copy_rawdata, + new_path=new_path ) else: return init_compress_eigerdata( @@ -270,7 +274,7 @@ def para_compress_eigerdata( mask, md, filename, - num_sub=100, + num_sub=128, bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, bad_pixel_low_threshold=0, @@ -280,13 +284,13 @@ def para_compress_eigerdata( reverse=True, rot90=False, num_max_para_process=500, - cpu_core_number=72, + cpu_core_number=0, with_pickle=True, direct_load_data=False, data_path=None, images_per_file=100, copy_rawdata=True, - new_path="/tmp_data/data/", + new_path="/tmp/", ): data_path_ = data_path @@ -302,7 +306,7 @@ def para_compress_eigerdata( if not copy_rawdata: images_ = EigerImages(data_path, images_per_file, md) else: - print("Due to a IO problem running on GPFS. The raw data will be copied to /tmp_data/Data.") + print("Due to a IO problem running on GPFS. The raw data will be copied to /tmp/") print("Copying...") copy_data(data_path, new_path) # print(data_path, new_path) @@ -319,10 +323,14 @@ def para_compress_eigerdata( else: N = len(images) + + if cpu_core_number == 0: + cpu_core_number = cpu_count() + N = int(np.ceil(N / bins)) Nf = int(np.ceil(N / num_sub)) if Nf > cpu_core_number: - print("The process number is larger than %s (XF11ID server core number)" % cpu_core_number) + print("The process number is larger than %s (current server's core threads)" % cpu_core_number) num_sub_old = num_sub num_sub = int(np.ceil(N / cpu_core_number)) Nf = int(np.ceil(N / num_sub)) diff --git a/pyCHX/chx_generic_functions.py b/pyCHX/chx_generic_functions.py index ea7f6cd..97f4b05 100644 --- a/pyCHX/chx_generic_functions.py +++ b/pyCHX/chx_generic_functions.py @@ -865,6 +865,71 @@ def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1 plt.axis("off") plt.savefig(data_dir + "uid=%s_OVA_images.png" % uid) +def save_oavs_tifs_v2(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1, threshold=0,cross=[685, 440, 50]): + """ + save OAV images collected for a uid as an 'aggregate' image that can be attached to Olog (attaching is not part of this function) + Adds time stamps for series of OAV images + uid: uid + data_dir: directory for saving aggregate image + brightness_scale: scale brightness of images (default: 1, i.e. no scaling) + scalebar_size: [pixel] if sufficient information available in md, add scalebar to images; (default: 100) + cross: [xpos,ypos,width] (xpos,ypos): pixel coordinate of X-ray beam, width: width of cross on image [pixel] ;default: [685, 440, 50] -> optical axis for 12x zoom, 50 pixel wide cross + scale/threshold: manipulation of image intensity, enhancement of areas (currently not implemented) + 01/28/2025 by LW + """ + h=db[uid].v2.start + detectors=h['detectors'] + for d in detectors: + if 'oav' in d or 'OAV' in d: + oav_det=d + oav_cam = '%s_image'%d + + oavs = list(db[uid].data(oav_cam))[0] + res_key=None + for k in h.keys(): + if 'OAV' in k and 'resolution' in k: + res_key = k + try: + pixel_scalebar = np.ceil(scalebar_size / h[res_key]) + except: + pixel_scalebar = None + print("No OAV resolution is available.") + text_string = "%s $\mu$m" % scalebar_size + oav_period=np.array(db[uid].v2['primary']['config'][oav_det]['%s_cam_acquire_period'%oav_det])[0] + oav_expt=np.array(db[uid].v2['primary']['config'][oav_det]['%s_cam_acquire_time'%oav_det])[0] + + oav_times = [] + for i in range(len(oavs)): + oav_times.append(oav_expt + i * oav_period) + fig = plt.subplots(int(np.ceil(len(oavs) / 3)), 3, figsize=(3 * 5.08, int(np.ceil(len(oavs) / 3)) * 4)) + pc=1 + for m in range(len(oavs)): + plt.subplot(int(np.ceil(len(oavs) / 3)), 3, m + 1) + # plt.subplots(figsize=(5.2,4)) + img = oavs[m] + try: + ind = np.flipud(img * scale)[:, :, 2] < threshold + except: + ind = np.flipud(img * scale) < threshold + rgb_cont_img = np.copy(np.flipud(img)) + # rgb_cont_img[ind,0]=1000 + if brightness_scale != 1: + rgb_cont_img = scale_rgb(rgb_cont_img, scale=brightness_scale) + + plt.imshow(rgb_cont_img, interpolation="none", resample=True, cmap="gray") + plt.axis("equal") + plt.plot([cross[0] - cross[2] / 2, cross[0] + cross[2] / 2], [cross[1], cross[1]], "r-") + plt.plot([cross[0], cross[0]], [cross[1] - cross[2] / 2, cross[1] + cross[2] / 2], "r-") + if pixel_scalebar != None: + plt.plot([1100, 1100 + pixel_scalebar], [150, 150], "r-", Linewidth=5) # scale bar. + plt.text(1000, 50, text_string, fontsize=14, color="r") + plt.text(600, 50, str(oav_times[m])[:5] + " [s]", fontsize=14, color="r") + plt.axis("off");pc+=1 + for i in range(int(np.ceil(len(oavs) / 3))* 3-pc+1): + plt.subplot(int(np.ceil(len(oavs) / 3)), 3, pc) + plt.axis("off") + plt.savefig(data_dir + "uid=%s_OVA_images.png" % uid) + def shift_mask_old(mask, shiftx, shifty): """YG Dev Feb 4@CHX create new mask by shift mask in x and y direction with unit in pixel @@ -3437,49 +3502,56 @@ def get_sid_filenames(hdr, verbose=False): return ret -# def get_sid_filenames(header): -# """YG. Dev Jan, 2016 -# Get a bluesky scan_id, unique_id, filename by giveing uid - -# Parameters -# ---------- -# header: a header of a bluesky scan, e.g. db[-1] - -# Returns -# ------- -# scan_id: integer -# unique_id: string, a full string of a uid -# filename: sring - -# Usuage: -# sid,uid, filenames = get_sid_filenames(db[uid]) - -# """ -# from collections import defaultdict -# from glob import glob -# from pathlib import Path - -# filepaths = [] -# resources = {} # uid: document -# datums = defaultdict(list) # uid: List(document) -# for name, doc in header.documents(): -# if name == "resource": -# resources[doc["uid"]] = doc -# elif name == "datum": -# datums[doc["resource"]].append(doc) -# elif name == "datum_page": -# for datum in event_model.unpack_datum_page(doc): -# datums[datum["resource"]].append(datum) -# for resource_uid, resource in resources.items(): -# file_prefix = Path(resource.get('root', '/'), resource["resource_path"]) -# if 'eiger' not in resource['spec'].lower(): -# continue -# for datum in datums[resource_uid]: -# dm_kw = datum["datum_kwargs"] -# seq_id = dm_kw['seq_id'] -# new_filepaths = glob(f'{file_prefix!s}_{seq_id}*') -# filepaths.extend(new_filepaths) -# return header.start['scan_id'], header.start['uid'], filepaths +def get_sid_filenames_v2(run): + """ + get scan_id, uid and detector filename from databroker + get_sid_filenames(run,verbose=False) + run = db[uid] + returns (scan_id, uid, filepath) + 01/26/2025 function by Dan Allan, modified by LW to handle Eiger + oav + """ + from pathlib import Path + import event_model + from area_detector_handlers.eiger import EigerHandler + + run = run.v2 + sid = run.start['scan_id'] + uid = run.start['uid'] + resources = [doc for name, doc in run.documents() if name == "resource"] + for r in resources: + if r['spec'] in list(['AD_EIGER2']): + resource = r + datum_pages = [doc for name, doc in run.documents() if name == "datum_page"] + handler = EigerHandler(str(Path(resource['root'], resource['resource_path'])), **resource['resource_kwargs']) + datums = [] + for datum_page in datum_pages: + for datum in event_model.unpack_datum_page(datum_page): + if 'seq_id' in datum['datum_kwargs'].keys(): + datums.append(datum) + datum_set = sorted(set(handler.get_file_list([datum["datum_kwargs"] for datum in datums]))) + for datum in datum_set: + if "_master.h5" in datum: + return sid, uid, datum + +def get_sid_filenames_v3(run): + """ + get scan_id, uid and detector filename from databroker + get_sid_filenames(run,verbose=False) + run = db[uid] + returns (scan_id, uid, filepath) + 01/26/2025 based on get_sid_filenames_v2 by Dan Allan, modified by LW to handle Eiger +oav as detectors and using md['sequence_id'] from 'series' + """ + run = run.v2 + sid = run.start['scan_id'] + uid = run.start['uid'] + resources = [doc for name, doc in run.documents() if name == "resource"] + for r in resources: + if r['spec'] in list(['AD_EIGER2']): + resource = r + if 'eiger' in resource['root']: + datum = '%s/%s_%s_master.h5'%(resource['root'],resource['resource_path'],run.start['sequence id']) + return sid, uid, datum + def load_dask_data(uid, detector, mask_path_full, reverse=False, rot90=False): @@ -3510,10 +3582,20 @@ def load_dask_data(uid, detector, mask_path_full, reverse=False, rot90=False): "beam_center_x": "beam_center_x", "beam_center_y": "beam_center_y", } + + det_mapping = { + "eiger4m": "eiger4m", + "eiger1m": "eiger1m", + "eiger500k": "eiger500K", + "eiger500K": "eiger500K" + } + + det_short = next((short for key, short in det_mapping.items() if key in det), None) + img_md = {} for k in list(img_md_dict.keys()): img_md[k] = hdr.config_data(det)["primary"][0]["%s_%s" % (det, img_md_dict[k])] - if detector in ["eiger4m_single_image", "eiger1m_single_image", "eiger500K_single_image"]: + if det_short is not None: img_md.update({"y_pixel_size": 7.5e-05, "x_pixel_size": 7.5e-05}) got_pixel_mask = True else: @@ -3522,7 +3604,7 @@ def load_dask_data(uid, detector, mask_path_full, reverse=False, rot90=False): # load pixel mask from static location if got_pixel_mask: # json_open = open(_mask_path_ + "pixel_masks/pixel_mask_compression_%s.json" % detector.split("_")[0]) - json_open = open(mask_path_full + "pixel_mask_compression_%s.json" % detector.split("_")[0]) + json_open = open(mask_path_full + "pixel_mask_compression_%s.json" % det_short) mask_dict = json.load(json_open) img_md["pixel_mask"] = np.array(mask_dict["pixel_mask"]) img_md["binary_mask"] = np.array(mask_dict["binary_mask"]) diff --git a/pyCHX/chx_olog.py b/pyCHX/chx_olog.py index 880c9f4..96154cd 100644 --- a/pyCHX/chx_olog.py +++ b/pyCHX/chx_olog.py @@ -1,6 +1,6 @@ from pyOlog import Attachment, LogEntry, OlogClient, SimpleOlogClient from pyOlog.OlogDataTypes import Logbook - +olog_client = SimpleOlogClient(url='https://epics-services-chx.nsls2.bnl.local:38981/Olog') def create_olog_entry(text, logbooks="Data Acquisition"): """ @@ -42,7 +42,7 @@ def update_olog_uid_with_file(uid, text, filename, append_name=""): atch = [Attachment(open(filename, "rb"))] try: - update_olog_uid(uid=uid, text=text, attachments=atch) + update_olog_uid(olog_client, uid=uid, text=text, attachments=atch) except Exception: from shutil import copyfile @@ -50,8 +50,7 @@ def update_olog_uid_with_file(uid, text, filename, append_name=""): copyfile(filename, npname) atch = [Attachment(open(npname, "rb"))] print(f"Append {append_name} to the filename.") - update_olog_uid(uid=uid, text=text, attachments=atch) - + update_olog_uid(olog_client,uid=uid, text=text, attachments=atch) def update_olog_logid_with_file(logid, text, filename=None, verbose=False): """ @@ -77,7 +76,7 @@ def update_olog_logid_with_file(logid, text, filename=None, verbose=False): pass -def update_olog_id(logid, text, attachments, verbose=True): +def update_olog_id(olog_client, logid, text, attachments, verbose=True): """ Update olog book logid entry with text and attachments files. @@ -98,8 +97,7 @@ def update_olog_id(logid, text, attachments, verbose=True): update_olog_id(logid=29327, text='add_test_atch', attachmenents=atch) """ - olog_client = SimpleOlogClient() - client = OlogClient() + client = olog_client.session # This is an instance of OlogClient url = client._url old_text = olog_client.find(id=logid)[0]["text"] @@ -111,9 +109,9 @@ def update_olog_id(logid, text, attachments, verbose=True): client.updateLog(logid, upd) if verbose: print(f"The url={url} was successfully updated with {text} and with " f"the attachments") + return old_text - -def update_olog_uid(uid, text, attachments): +def update_olog_uid(olog_client, uid, text, attachments): """ Update olog book logid entry cotaining uid string with text and attachments files. @@ -134,7 +132,6 @@ def update_olog_uid(uid, text, attachments): atch = [Attachment(open(filename1, 'rb'))] update_olog_uid(uid='af8f66', text='Add xpcs pdf report', attachments=atch) """ - olog_client = SimpleOlogClient() - - logid = olog_client.find(search=f"*{uid}*")[0]["id"] - update_olog_id(logid, text, attachments) + logid = olog_client.find(search=f"*{uid}*")[-1]["id"] # test: attach to FIRST occurance of this uid, which is when the data was actually created + #logid = olog_client.find(search=f"*{uid}*")[0]["id"] + update_olog_id(olog_client, logid, text, attachments) diff --git a/pyCHX/chx_outlier_detection.py b/pyCHX/chx_outlier_detection.py index 596393e..0e62cd8 100644 --- a/pyCHX/chx_outlier_detection.py +++ b/pyCHX/chx_outlier_detection.py @@ -1,3 +1,9 @@ +import numpy as np +try: # some genius moved roi within skbeam.... + from skbeam.core.utils import roi +except: + from skbeam.core import roi + def is_outlier(points, thresh=3.5, verbose=False): """MAD test""" points.tolist() diff --git a/pyCHX/chx_packages.py b/pyCHX/chx_packages.py index f7817b7..318e158 100644 --- a/pyCHX/chx_packages.py +++ b/pyCHX/chx_packages.py @@ -122,6 +122,8 @@ get_series_g2_taus, get_SG_norm, get_sid_filenames, + get_sid_filenames_v2, + get_sid_filenames_v3, get_today_date, get_touched_qwidth, get_waxs_beam_center, @@ -160,6 +162,7 @@ save_g2_general, save_lists, save_oavs_tifs, + save_oavs_tifs_v2, sgolay2d, shift_mask, show_img, diff --git a/pyCHX/chx_packages_local.py b/pyCHX/chx_packages_local.py new file mode 100644 index 0000000..979f9dc --- /dev/null +++ b/pyCHX/chx_packages_local.py @@ -0,0 +1,323 @@ +### This enables local import of pyCHX for testing + +import pickle as cpk + +import historydict + +# from pyCHX.chx_handlers import use_dask, use_pims +from chx_handlers import use_dask, use_pims + +# from pyCHX.chx_libs import ( +from chx_libs import ( + EigerHandler, + Javascript, + LogNorm, + Model, + cmap_albula, + cmap_vge, + datetime, + db, + getpass, + h5py, + multi_tau_lags, + np, + os, + pims, + plt, + random, + roi, + time, + tqdm, + utils, + warnings, +) +from eiger_io.fs_handler import EigerImages +from skimage.draw import line, line_aa, polygon + +# changes to current version of chx_packages.py +# added load_dask_data in generic_functions + + +use_pims(db) # use pims for importing eiger data, register_handler 'AD_EIGER2' and 'AD_EIGER' + +# from pyCHX.chx_compress import ( +from chx_compress import ( + MultifileBNLCustom, + combine_binary_files, + compress_eigerdata, + create_compress_header, + get_eigerImage_per_file, + init_compress_eigerdata, + para_compress_eigerdata, + para_segment_compress_eigerdata, + read_compressed_eigerdata, + segment_compress_eigerdata, +) + +# from pyCHX.chx_compress_analysis import ( +from chx_compress_analysis import ( + Multifile, + cal_each_ring_mean_intensityc, + cal_waterfallc, + compress_eigerdata, + get_avg_imgc, + get_each_frame_intensityc, + get_each_ring_mean_intensityc, + get_time_edge_avg_img, + mean_intensityc, + plot_each_ring_mean_intensityc, + plot_waterfallc, + read_compressed_eigerdata, +) + +# from pyCHX.chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq +from chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq + +# from pyCHX.chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF +from chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF + +# from pyCHX.chx_crosscor import CrossCorrelator2, run_para_ccorr_sym +from chx_crosscor import CrossCorrelator2, run_para_ccorr_sym + +# from pyCHX.chx_generic_functions import ( +from chx_generic_functions import ( + R_2, + RemoveHot, + apply_mask, + average_array_withNan, + check_bad_uids, + check_lost_metadata, + check_ROI_intensity, + check_shutter_open, + combine_images, + copy_data, + create_cross_mask, + create_fullImg_with_box, + create_hot_pixel_mask, + create_multi_rotated_rectangle_mask, + create_polygon_mask, + create_rectangle_mask, + create_ring_mask, + create_seg_ring, + create_time_slice, + create_user_folder, + delete_data, + extract_data_from_file, + filter_roi_mask, + find_bad_pixels, + find_bad_pixels_FD, + find_good_xpcs_uids, + find_index, + find_uids, + fit_one_peak_curve, + get_averaged_data_from_multi_res, + get_avg_img, + get_bad_frame_list, + get_base_all_filenames, + get_cross_point, + get_current_pipeline_filename, + get_current_pipeline_fullpath, + get_curve_turning_points, + get_detector, + get_detectors, + get_each_frame_intensity, + get_echos, + get_eigerImage_per_file, + get_fit_by_two_linear, + get_fra_num_by_dose, + get_g2_fit_general, + get_image_edge, + get_image_with_roi, + get_img_from_iq, + get_last_uids, + get_mass_center_one_roi, + get_max_countc, + get_meta_data, + get_multi_tau_lag_steps, + get_non_uniform_edges, + get_print_uids, + get_q_rate_fit_general, + get_qval_dict, + get_qval_qwid_dict, + get_roi_mask_qval_qwid_by_shift, + get_roi_nr, + get_series_g2_taus, + get_SG_norm, + get_sid_filenames, + get_sid_filenames_v2, + get_sid_filenames_v3, + get_today_date, + get_touched_qwidth, + get_waxs_beam_center, + lin2log_g2, + linear_fit, + load_dask_data, + load_data, + load_mask, + load_pilatus, + ls_dir, + mask_badpixels, + mask_exclude_badpixel, + move_beamstop, + pad_length, + pload_obj, + plot1D, + plot_fit_two_linear_fit, + plot_g2_general, + plot_q_g2fitpara_general, + plot_q_rate_fit_general, + plot_q_rate_general, + plot_xy_with_fit, + plot_xy_x2, + print_dict, + psave_obj, + read_dict_csv, + refine_roi_mask, + reverse_updown, + ring_edges, + run_time, + save_array_to_tiff, + save_arrays, + save_current_pipeline, + save_dict_csv, + save_g2_fit_para_tocsv, + save_g2_general, + save_lists, + save_oavs_tifs, + save_oavs_tifs_v2, + sgolay2d, + shift_mask, + show_img, + show_ROI_on_image, + shrink_image, + trans_data_to_pd, + update_qval_dict, + update_roi_mask, + validate_uid, +) + +# from pyCHX.chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file +# from chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file + +# from pyCHX.chx_outlier_detection import ( +from chx_outlier_detection import is_outlier, outlier_mask + +# from pyCHX.chx_specklecp import ( +from chx_specklecp import ( + get_binned_his_std, + get_contrast, + get_his_std_from_pds, + get_xsvs_fit, + plot_g2_contrast, + plot_xsvs_fit, + save_bin_his_std, + save_KM, + xsvsc, + xsvsp, +) + +# from pyCH.chx_xpcs_xsvs_jupyter_V1 import( +from chx_xpcs_xsvs_jupyter_V1 import ( + compress_multi_uids, + do_compress_on_line, + get_fra_num_by_dose, + get_iq_from_uids, + get_series_g2_from_g12, + get_series_one_time_mulit_uids, + get_t_iqc_uids, + get_two_time_mulit_uids, + get_uids_by_range, + get_uids_in_time_period, + plot_dose_g2, + plot_entries_from_csvlist, + plot_entries_from_uids, + plot_t_iqc_uids, + plot_t_iqtMq2, + realtime_xpcs_analysis, + run_xpcs_xsvs_single, + wait_data_acquistion_finish, + wait_func, +) + +# from pyCHX.Create_Report import ( +from Create_Report import ( + create_multi_pdf_reports_for_uids, + create_one_pdf_reports_for_uids, + create_pdf_report, + export_xpcs_results_to_h5, + extract_xpcs_results_from_h5, + make_pdf_report, +) + +# from pyCHX.DataGonio import qphiavg +from DataGonio import qphiavg + +# from pyCHX.SAXS import ( +from SAXS import ( + fit_form_factor, + fit_form_factor2, + form_factor_residuals_bg_lmfit, + form_factor_residuals_lmfit, + get_form_factor_fit_lmfit, + poly_sphere_form_factor_intensity, + show_saxs_qmap, +) + +# from pyCHX.Two_Time_Correlation_Function import ( +from Two_Time_Correlation_Function import ( + get_aged_g2_from_g12, + get_aged_g2_from_g12q, + get_four_time_from_two_time, + get_one_time_from_two_time, + rotate_g12q_to_rectangle, + show_C12, +) + +# from pyCHX.XPCS_GiSAXS import ( +from XPCS_GiSAXS import ( + cal_1d_qr, + convert_gisaxs_pixel_to_q, + fit_qr_qz_rate, + get_1d_qr, + get_each_box_mean_intensity, + get_gisaxs_roi, + get_qedge, + get_qmap_label, + get_qr_tick_label, + get_qzr_map, + get_qzrmap, + get_reflected_angles, + get_t_qrc, + multi_uids_gisaxs_xpcs_analysis, + plot_gisaxs_g4, + plot_gisaxs_two_g2, + plot_qr_1d_with_ROI, + plot_qrt_pds, + plot_qzr_map, + plot_t_qrc, + show_qzr_map, + show_qzr_roi, +) + +# from pyCHX.XPCS_SAXS import ( +from XPCS_SAXS import ( + cal_g2, + combine_two_roi_mask, + create_hot_pixel_mask, + get_angular_mask, + get_circular_average, + get_cirucular_average_std, + get_each_ring_mean_intensity, + get_QrQw_From_RoiMask, + get_ring_mask, + get_seg_from_ring_mask, + get_t_iq, + get_t_iqc, + get_t_iqc_imstack, + multi_uids_saxs_xpcs_analysis, + plot_circular_average, + plot_qIq_with_ROI, + plot_t_iqc, + recover_img_from_iq, + save_lists, +) diff --git a/standard_functions/standard_functions.py b/standard_functions/standard_functions.py new file mode 100644 index 0000000..3aefcbc --- /dev/null +++ b/standard_functions/standard_functions.py @@ -0,0 +1,469 @@ +from pyCHX.chx_packages import * +import sys + +def dispersion(dispersion_type,x,*args): + """ + general dispersion equation + dispersion(dispersion_type,x,*args) + dispersion_type: one of 'Qsquared', 'Qlinearoffset', 'Qlinear' or 'Qn' + Qsquared:y=a*x**2 + Qlinearoffset: y=a*x+b + Qlinear: y=a*x + QN: y=a*x**b + x: Q-values + *args: a or a,b according to equations avbove + returns: y + """ + if dispersion_type == 'Qsquared': + y=args[0]*x**2 + elif dispersion_type == 'Qlinearoffset': + y=args[0]*x+args[1] + elif dispersion_type == 'Qlinear': + y=args[0]*x + elif dispersion_type == 'QN': + y=args[0]*x**args[1] + else: raise Exception('ERROR: dispersion_type %s is unknown!'%dispersion_type) + return y + +def g2_double(x, a, b, c, d,e,f,g): + return a * (b*np.exp(-(c*x)**d)+(1-b)*np.exp(-(e*x)**f))**2+g + +def g2_single(x, a, b, c, d): + return a * np.exp(-2*(b*x)**c)+d + +def scale_fit(x,a): + return a*x + +def power_law(x,a,b): + return a*x**b + +def log10_power_law(x,a,b): + return np.log10(power_law(x,a,b)) + +def ps(x,y,shift=.5): + from scipy.special import erf + from scipy.optimize import curve_fit + + PEAK=x[np.argmax(y)] + PEAK_y=np.max(y) + COM=np.sum(x * y) / np.sum(y) + + ### from Maksim: assume this is a peak profile: + def is_positive(num): + return True if num > 0 else False + + # Normalize values first: + ym = (y - np.min(y)) / (np.max(y) - np.min(y)) - shift # roots are at Y=0 + + positive = is_positive(ym[0]) + list_of_roots = [] + for i in range(len(y)): + current_positive = is_positive(ym[i]) + if current_positive != positive: + list_of_roots.append(x[i - 1] + (x[i] - x[i - 1]) / (abs(ym[i]) + abs(ym[i - 1])) * abs(ym[i - 1])) + positive = not positive + if len(list_of_roots) >= 2: + FWHM=abs(list_of_roots[-1] - list_of_roots[0]) + CEN=list_of_roots[0]+0.5*(list_of_roots[1]-list_of_roots[0]) + profile='peak' + + else: # ok, maybe it's a step function.. + print('no peak...trying step function...') + ym = ym + shift + def err_func(x, x0, k=2, A=1, base=0 ): #### erf fit from Yugang + return base - A * erf(k*(x-x0)) + popt, pcov = curve_fit(err_func, x,ym,p0=[np.mean(x),2,1.,0.]) + CEN=popt[0];FWHM=popt[1] + profile='step' + ps_dict={'PEAK':PEAK,'COM':COM,'FWHM':FWHM,'CEN':CEN,'profile':profile} + return ps_dict + +def skew_Lorentzian(x,a,b,c,d,e,f): + """ + a: assymetry parameter (0 ~ symmetric) + b: width + c:center + d: A + e: slope of linear offset + f: constant offset + source: https://www.webpages.uidaho.edu/brauns/vibspect1.pdf + """ + h=2*b/(1+np.exp(a*(x-c))) + lin=e*x+f + y=(2*d/(np.pi*h))/(1+4*((x-c)/h)**2)+lin + return y + +def pseudo_Voigt(x,a,b,c,d,e,f): + """ + a: center + b: amplitude + c: FWHM + d: mixing parameter (Gaussian/Lorentzian) + e: constant offset + f: slope of linear offset + [https://www.webpages.uidaho.edu/brauns/vibspect1.pdf] + """ + gam=0.5*c + w=c/(2*np.sqrt(2*np.log(2))) + eta=d + y = b*(d/(np.pi*gam)*(gam**2/((x-a)**2+gam**2))+(1-eta)*(1/(w*np.sqrt(2*np.pi))*np.exp(-(x-a)**2/(2*w**2))))+e+f*x + return y + +def norm_pseudo_Voigt(x, a, b, c, d, e, f):# move to standard functions and import + y = pseudo_Voigt(x, a, b, c, d, e, f) + return y/max(y) + +def double_Voigt(x,a,b,c,d,e,f,a1,b1,c1,d1): + """ + a, a1: centers + b, b1: amplitudes + c, c1: FWHMs + d, d1: mixing parameters (Gaussian/Lorentzian) + e: constant offset + f: slope of linear offset + """ + gam=0.5*c;gam1=0.5*c1 + w=c/(2*np.sqrt(2*np.log(2)));w1=c1/(2*np.sqrt(2*np.log(2))) + eta=d;eta1=d1 + y1 = b*(d/(np.pi*gam)*(gam**2/((x-a)**2+gam**2))+(1-eta)*(1/(w*np.sqrt(2*np.pi))*np.exp(-(x-a)**2/(2*w**2))))+e+f*x + y2 = b1*(d1/(np.pi*gam1)*(gam1**2/((x-a1)**2+gam1**2))+(1-eta1)*(1/(w1*np.sqrt(2*np.pi))*np.exp(-(x-a1)**2/(2*w**2)))) + y=y1+y2 + return y + +def phi_test_Voigt(x,a,b,c,d,e,f,b1,c1,d1): + """ + function for testing azimuthal dependence + variant of double_Voigt, with a fixed dependence (a1=a+180) between the two centers + x in degrees + a centers -> a1=a+180 (deg) + b, b1: amplitudes + c, c1: FWHMs + d, d1: mixing parameters (Gaussian/Lorentzian) + e: constant offset + f: slope of linear offset + """ + a1=a+180 + a2=a-180 + a3=a+360 + a4=a-360 + gam=0.5*c;gam1=0.5*c1 + w=c/(2*np.sqrt(2*np.log(2)));w1=c1/(2*np.sqrt(2*np.log(2))) + eta=d;eta1=d1 + y1 = b*(d/(np.pi*gam)*(gam**2/((x-a)**2+gam**2))+(1-eta)*(1/(w*np.sqrt(2*np.pi))*np.exp(-(x-a)**2/(2*w**2))))+e+f*x + y2 = b1*(d1/(np.pi*gam1)*(gam1**2/((x-a1)**2+gam1**2))+(1-eta1)*(1/(w1*np.sqrt(2*np.pi))*np.exp(-(x-a1)**2/(2*w**2)))) + y3 = b1*(d1/(np.pi*gam1)*(gam1**2/((x-a2)**2+gam1**2))+(1-eta1)*(1/(w1*np.sqrt(2*np.pi))*np.exp(-(x-a2)**2/(2*w**2)))) + y4 = b1*(d1/(np.pi*gam1)*(gam1**2/((x-a3)**2+gam1**2))+(1-eta1)*(1/(w1*np.sqrt(2*np.pi))*np.exp(-(x-a3)**2/(2*w**2)))) + y5 = b1*(d1/(np.pi*gam1)*(gam1**2/((x-a4)**2+gam1**2))+(1-eta1)*(1/(w1*np.sqrt(2*np.pi))*np.exp(-(x-a4)**2/(2*w**2)))) + y=y1+y2+y3+y4+y5 + return y + + +################ functions related to Orientation Parameter Analysis ################################# +def I_GND(x,xc,hw,beta,rel): + """ + generalized normal distribution: can be used as order parameter distribution to extract order parameters from SAXS intensities + I_GND(x,xc,hw,beta) + x: angle [deg] + xc: list of center position(s) of peak(s) [deg], NOTE: will automatically generate twin-peak for pi-symmetry + hw: list of half width(s) of peak(s) [deg] + beta: sharpness parameter: 2: wide, 1: sharp + rel: list of relative peak intensities (first peak is always '1', twin-peak for pi-symmetry is x'rel' ) + by LW 12/16/2022 according to https://onlinelibrary.wiley.com/doi/full/10.1002/app.50939 + """ + from scipy.special import gamma as scp_gamma + I=[] + for hh,h in enumerate(hw): + a=h/(np.log(2)**(1/beta[hh])) + mu=[xc[hh],xc[hh]-180,xc[hh]+180] + for mm,m in enumerate(mu): + s=1 + if mm != 0: + s=rel[hh] + I.append(s*beta[hh]/(2*a*scp_gamma(1/beta[hh]))*np.exp(-np.abs(x-m)**beta[hh]/(a**beta[hh]))) + return np.sum(I,axis=0) + +def I_GND_(x,xc,hw,beta): + """ + generalized normal distribution: can be used as order parameter distribution to extract order parameters from SAXS intensities + I_GND(x,xc,hw,beta) + x: angle [deg] + xc: list of center position(s) of peak(s) [deg], NOTE: will automatically generate twin-peak for pi-symmetry + hw: list of half width(s) of peak(s) [deg] + beta: sharpness parameter: 2: wide, 1: sharp + by LW 12/16/2022 according to https://onlinelibrary.wiley.com/doi/full/10.1002/app.50939 + """ + from scipy.special import gamma as scp_gamma + I=[] + for hh,h in enumerate(hw): + a=h/(np.log(2)**(1/beta[hh])) + mu=[xc[hh],xc[hh]-180,xc[hh]+180] + for m in mu: + I.append(beta[hh]/(2*a*scp_gamma(1/beta[hh]))*np.exp(-np.abs(x-m)**beta[hh]/(a**beta[hh]))) + return np.sum(I,axis=0) + +def I_LD(x,xc,hw,rel): + """ + Lorentz distribution: can be used as order parameter distribution to extract order parameters from SAXS intensities + I_LD(x,xc,hw) + x: angle [deg] + xc: list of center position(s) of peak(s) [deg], NOTE: will automatically generate twin-peak for pi-symmetry + hw: list of half width(s) of peak(s) [deg] + rel: list of relative peak intensities (first peak is always '1', twin-peak for pi-symmetry is x'rel' ) + by LW 12/16/2022 according to https://onlinelibrary.wiley.com/doi/full/10.1002/app.50939 + """ + I=[] + for hh,h in enumerate(hw): + mu=[xc[hh],xc[hh]-180,xc[hh]+180] + for mm,m in enumerate(mu): + s=1 + if mm != 0: + s=rel[hh] + I.append(s/(np.pi*h)*h**2/((x-m)**2+h**2)) + return np.sum(I,axis=0) + +def I_GD(x,xc,hw): + """ + Gaussian distribution: can be used as order parameter distribution to extract order parameters from SAXS intensities + I_GD(x,xc,hw) + x: angle [deg] + xc: list of center position(s) of peak(s) [deg], NOTE: will automatically generate twin-peak for pi-symmetry + hw: list of half width(s) of peak(s) [deg] + rel: list of relative peak intensities (first peak is always '1', twin-peak for pi-symmetry is x'rel' ) + by LW 12/16/2022 according to https://onlinelibrary.wiley.com/doi/full/10.1002/app.50939 + """ + I=[] + for hh,h in enumerate(hw): + s=h/(2*np.log(2))**(1/2) + mu=[xc[hh],xc[hh]-180,xc[hh]+180] + for mm,m in enumerate(mu): + s=1 + if mm != 0: + s=rel[hh] + I.append(s/(s*np.sqrt(2*np.pi))*np.exp(-np.abs(x-m)**2/(2*s**2))) + return np.sum(I,axis=0) + +def periodic_background(x,xoff,slope,constant): + """ + function creates linear, but periodic (180deg) background + periodic_background(x,xoff,slope,constant) + xoff: shift along x (0<=xoff<=90) + slope: slope of the linear background (switches sign to be periodic) + constant: constant offset for all x + by LW 12/16/2022 + """ + back=[];b=slope + if xoff <0 or xoff>90: + raise Exception('offset must be in the range [0,90] deg.!') + + for i in x: + if i>xoff and i<=90+xoff: + back.append(b*i-b*xoff) + elif i<=xoff: + back.append(-b*i+b*xoff) + elif i>180+xoff and i<=270+xoff: + back.append(b*i-(180+xoff)*b) + elif i>90+xoff and i<=180+xoff: + back.append(-b*i+b*(180+xoff)) + elif i>270+xoff and i<=360+xoff: + back.append(-b*i+b*(360+xoff)) + return np.array(back)+constant + +def I_GND_fit_func(x,xc,hw,beta,rel,scale,xoff,slope,const): + """ + helper function for fitting: combining Generalized Normal Distribution (GND) with periodic linear background + Note: while I_GND can handle multiple peaks in [0,pi], this fit function can only take one peak position + typically used as Intensity Distribution Function (IDF) in order parameter analysis + by LW 12/16/2022 + """ + xc=[xc];hw=[hw];beta=[beta];rel=[rel] + return scale*I_GND(x,xc,hw,beta,rel)+periodic_background(x,xoff,slope,const) + +def I_GD_fit_func(x,xc,hw,rel,scale,xoff,slope,const): + """ + helper function for fitting: combining Gaussian Distribution (GD) with periodic linear background + Note: while I_GD can handle multiple peaks in [0,pi], this fit function can only take one peak position + typically used as Intensity Distribution Function (IDF) in order parameter analysis + by LW 12/17/2022 + """ + xc=[xc];hw=[hw];rel=[rel] + return scale*I_GD(x,xc,hw,rel)+periodic_background(x,xoff,slope,const) + +def I_LD_fit_func(x,xc,hw,rel,scale,xoff,slope,const): + """ + helper function for fitting: combining Lorentzian Distribution (LD) with periodic linear background + Note: while I_LD can handle multiple peaks in [0,pi], this fit function can only take one peak position + typically used as Intensity Distribution Function (IDF) in order parameter analysis + by LW 12/17/2022 + """ + xc=[xc];hw=[hw];rel=[rel] + return scale*I_GD(x,xc,hw,rel)+periodic_background(x,xoff,slope,const) + +def linear_intercept(x,a,b): + return a*(x-b) +####################################################################################################### +# Non-mathematical functions: +####################################################################################################### + +def get_uid_list(scan_uid_list,user=None,cycle=None,uid_length=99,fail_nonexisting=True,verbose=False,warning=True): + """ + convert list of scan_ids to uids (with additional optional information 'user' and 'cycle') or vice versa + get_uid_list(scan_uid_list,user=None,cycle=None) + scan_uid_list: can be a mixed list of scan_ids [integers] and uids or short uids [strings] + user: user name associated with the data [string] + fail_nonexisting: if True -> raises Exception if a combination of scan_id/uid and cycle/user cannot be found; if False -> skips scan_id/uid, in this case the returned lists are shorter than the input list + returns: scan_list, uid_list (uid[:uid_length] -> can return shortened uid strings) + verbose: if True -> print pairs of scan_id, uid + warning: if True -> print warning when combination of user, cycle and scan_id cannot be found (could be solved more elegantly with different verbose levels...) + LW 01/03/2024 + """ + u_list = []; s_list=[];message_str = '' + #for s in scan_uid_list: + for s in tqdm (scan_uid_list,desc="getting scan_ids/uids…", ascii=False, ncols=200,file=sys.stdout, colour='GREEN'): + try: + if type(s) == int: + #s_list.append(s) + if not user and not cycle: + u=[h.start['uid'] for h in db(scan_id=s)][0] + elif user and not cycle: + u=[h.start['uid'] for h in db(scan_id=s, user=user)][0] + elif not user and cycle: + u=[h.start['uid'] for h in db(scan_id=s, cycle=cycle)][0] + elif user and cycle: + u=[h.start['uid'] for h in db(scan_id=s, user=user,cycle=cycle)][0] + u_list.append(u[:uid_length]) + s_list.append(s) + else: + h=db[s];u_list.append(h.start['uid'][:uid_length]);s_list.append(h.start['scan_id']) + except: + if fail_nonexisting: raise Exception('ERROR: combination of scan_id/uid: %s and user=%s and cycle=%s could not be found...'%(s,user,cycle)) + else: + if warning: + message_str+='WARNING: combination of scan_id/uid: %s and user=%s and cycle=%s could not be found...-> skip (output list of uids will be shorter than input list!)\n'%(s,user,cycle) + #print('WARNING: combination of scan_id/uid: %s and user=%s and cycle=%s could not be found...-> skip (output list of uids will be shorter than input list!)'%(s,user,cycle)) + print(message_str) + if verbose: + print('detailed information for all scan_ids:') + for ss,s in enumerate(s_list): + print('uid list: %s -> scan list: %s'%(u_list[ss],s)) + return s_list,u_list + +import pickle +def save_pickle(filepath,data_dict,verbose=False): + """ + wrapper function to save python dictionaries as pkl files + save_pickle(filepath,data_dict,verbose=False) + filepath: [string] path to save the data, including filename (IF ending is NOT .pkl, it will be added) + data_dict: dictionary with data to save + LW 01/03/2024 + """ + if filepath[-4:] != '.pkl': + filepath+='.pkl' + with open(filepath, 'wb') as dict_items_save: + pickle.dump(data_dict, dict_items_save) + if verbose: + print('Data has been saved to file: '+filepath) + +def load_pickle(filepath,verbose=False): + """ + wrapper function to load content of .pkl files as python dictionaries + load_pickle(filepath,verbose=False) + filepath: [string] path to load the data, including filename (IF ending is NOT .pkl, it will be added) + returns dictionary with data in dictionary form from pkl file + LW 01/03/2024 + """ + if filepath[-4:] != '.pkl': + filepath+='.pkl' + if verbose: + print('Loading data from file: '+filepath) + with open(filepath, 'rb') as dict_items_open: + return pickle.load(dict_items_open) + +def get_data_dir(uid,_base_path_,_base_path_pass_,create=False,verbose=False): + """ + returns for a given uid + '/nsls2/data/chx/legacy/analysis/cycle/user/uid/' + or + '/nsls2/data/chx/proposals/cycle/pass-123456/uid/' + """ + md=db[uid].start + if 'proposal' in md.keys(): + data_dir = _base_path_pass_+'%s/pass-%s/Results/%s/'%(md['cycle'],md['proposal']['proposal_id'],md['uid']) + else: + data_dir = _base_path_+'%s/%s/Results/%s/'%(md['cycle'],md['user'],md['uid']) + if verbose: + print('data directory for uid: %s: %s'%(md['uid'],data_dir)) + if create: + os.mkdir(data_dir) + if verbose: + print('created directory %s!'%data_dir) + return data_dir + +def get_result_dir(uid,_base_path_,_base_path_pass_,create=False,verbose=False): + """ + returns for a given uid + '/nsls2/data/chx/legacy/analysis/cycle/user/Results/' + or + '/nsls2/data/chx/proposals/cycle/pass-123456/Results/' + """ + md=db[uid].start + if 'proposal' in md.keys(): + result_dir = _base_path_pass_+'%s/pass-%s/Results/'%(md['cycle'],md['proposal']['proposal_id']) + else: + result_dir = _base_path_+'%s/%s/Results/'%(md['cycle'],md['user']) + if verbose: + print('result directory for uid: %s: %s'%(md['uid'],data_dir)) + if create: + os.mkdir(result_dir) + if verbose: + print('created directory %s!'%result_dir) + return result_dir + +def get_pass_dir(uid,_base_path_,_base_path_pass_,create=False,verbose=False): + """ + returns for a given uid + '/nsls2/data/chx/legacy/analysis/cycle/user/' + or + '/nsls2/data/chx/proposals/cycle/pass-123456/' + """ + md=db[uid].start + if 'proposal' in md.keys(): + result_dir = _base_path_pass_+'%s/pass-%s/'%(md['cycle'],md['proposal']['proposal_id']) + else: + result_dir = _base_path_+'%s/%s/'%(md['cycle'],md['user']) + if verbose: + print('result directory for uid: %s: %s'%(md['uid'],data_dir)) + if create: + os.mkdir(result_dir) + if verbose: + print('created directory %s!'%result_dir) + return result_dir + +def monitor_resources(total_time=1E4,update_time=5): + """ + monitor computing resources (cpu and memory usage, free memory) and displays a plot + total_time: total time [s] for this monitor to run + update time: specify how often to collect resource data [s] + plot will be updated after each 10th update + """ + import psutil + from datetime import datetime + for i in range(5000): + time_stamp.append(time.time()) + cpu_usage.append(psutil.cpu_percent());memory_usage.append(psutil.virtual_memory().percent);memory_free.append(psutil.virtual_memory().available * 100 / psutil.virtual_memory().total) + time.sleep(5) + + if np.mod(i,10) == 0: + display(clear=True,wait=True) + print('last updated: %s'%datetime.now().strftime("%m/%d/%Y, %H:%M:%S")) + fig,ax=plt.subplots(figsize=(5,4)) + now=time.time() + plt.plot((now-np.array(time_stamp))/60,cpu_usage,label='cpu [%]') + plt.plot((now-np.array(time_stamp))/60,memory_usage,label='memory [%]') + plt.xlabel('minutes ago');plt.ylabel('cpu / memory usage [%]') + plt.grid(True,which='both');plt.legend(loc='upper left',bbox_to_anchor=(1.1,1)) + ax=plt.gca() + ax2=plt.twinx(ax) + ax2.plot((now-np.array(time_stamp))/60,memory_free,'r-',label='memory [%]') + ax2.set_ylabel('free memory [%]',color='r');ax2.set_ylim(0,100) + display(plt.gcf()) + plt.close() \ No newline at end of file