Skip to content

Commit

Permalink
Merge branch 'main' into Multi-kernel
Browse files Browse the repository at this point in the history
* main:
  Patchup date to 0.2.1 (USNavalResearchLaboratory#52)
  Fixes for writing oh5 files and reading HDF5 version/manufacturer tags (USNavalResearchLaboratory#51)
  • Loading branch information
drowenhorst-nrl committed Feb 1, 2024
2 parents 71695fd + 0036e90 commit 5ef2ce1
Show file tree
Hide file tree
Showing 7 changed files with 90 additions and 42 deletions.
14 changes: 12 additions & 2 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,30 @@ Changelog
All notable changes to PyEBSDIndex will be documented in this file. The format is based
on `Keep a Changelog <https://keepachangelog.com/en/1.1.0>`_.

0.2.dev1
0.2.1 (2024-01-29)
==================
Added
-----


Changed
-------
- ``nlpar.NLPAR.opt_lambda()`` method will now return the array of
the three optimal lambdas [less, medium, more] smoothing. The
defualt lambda is still set to [medium]. Previous return was ``None``
- ``nlpar.NLPAR.calcnlpar()`` will now return a string of the new file
that was made with the NLPARed patterns. Previous return was ``None``


Removed
-------

Fixed
-----

- ``ebsd_pattern``: Reading HDF5 manufacturing strings, and proper identification of
the vendors within get_pattern_file_obj
- ``ebsd_pattern``:Proper reading of parameters from Bruker HDF5 files.
- Corrected writing of oh5 files with ``ebsdfile``

0.2.0 (2023-08-08)
==================
Expand Down
13 changes: 8 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,14 @@ Python based tool for Radon based EBSD orientation indexing.
[![Documentation status](https://readthedocs.org/projects/pyebsdindex/badge/?version=latest)](https://pyebsdindex.readthedocs.io/en/latest/)
[![PyPI version](https://img.shields.io/pypi/v/pyebsdindex.svg)](https://pypi.python.org/pypi/pyebsdindex)

The pattern processing is based on a GPU pipeline, and is based on the work of S. I.
Wright and B. L. Adams. Metallurgical Transactions A-Physical Metallurgy and Materials
Science, 23(3):759–767, 1992, and N. Krieger Lassen. Automated Determination of Crystal
Orientations from Electron Backscattering Patterns. PhD thesis, The Technical University
of Denmark, 1994.
The pattern processing is based on a GPU pipeline. Details can be found
in D. J. Rowenhorst, P. G. Callahan, H. W. Ånes. Fast Radon transforms for
high-precision EBSD orientation determination using PyEBSDIndex. Journal of
Applied Crystallography, 57(1):3–19, 2024. and is based on the work of S. I.
Wright and B. L. Adams. Metallurgical Transactions A-Physical Metallurgy and
Materials Science, 23(3):759–767, 1992, and N. Krieger Lassen. Automated
Determination of Crystal Orientations from Electron Backscattering Patterns.
PhD thesis, The Technical University of Denmark, 1994.

The band indexing is achieved through triplet voting using the methods outlined by A.
Morawiec. Acta Crystallographica Section A Foundations and Advances, 76(6):719–734,
Expand Down
4 changes: 2 additions & 2 deletions pyebsdindex/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
"Dave Rowenhorst",
"Håkon Wiik Ånes",
]
__description__ = "Python based tool for Hough/Radon based EBSD indexing"
__description__ = "Python based tool for Radon based EBSD indexing"
__name__ = "pyebsdindex"
__version__ = "0.2.dev1"
__version__ = "0.2.1"


# Try to import only once
Expand Down
28 changes: 17 additions & 11 deletions pyebsdindex/ebsd_pattern.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,14 +81,20 @@ def get_pattern_file_obj(path,file_type=str('')):
print("File Not Found:",str(Path(pathtemp[0])))
return -1

if 'Manufacture' in f.keys():
vendor = str(f['Manufacture'][()][0])
if 'Manufacturer' in f.keys():
vendor = f['Manufacturer'][()]
if type(vendor) is np.ndarray:
vendor = vendor[0]
vendor = str(vendor.decode(encoding='UTF-8'))
if vendor.upper() == 'EDAX':
ebsdfileobj = EDAXOH5(path)
if vendor.upper() >= 'BRUKER NANO':
if vendor.upper() == 'BRUKER NANO':
ebsdfileobj = BRUKERH5(path)
if 'manufacturer' in f.keys():
vendor = str((f['manufacturer'][()][0]).decode('UTF-8'))
vendor = f['manufacturer'][()]
if type(vendor) is np.ndarray:
vendor = vendor[0]
vendor = str(vendor.decode('UTF-8'))
if vendor >= 'kikuchipy':
ebsdfileobj = KIKUCHIPYH5(path)
if ebsdfileobj.h5patdatpth is None: #automatically chose the first data group
Expand Down Expand Up @@ -1262,7 +1268,7 @@ def read_header(self, path=None):
print("File Not Found:",str(Path(self.filepath)))
return -1

self.version = str(f['Version'][()][0])
self.version = str(f['Version'][()][0].decode('UTF-8'))

if self.version >= 'OIM Analysis 8.6.00':
ngrp = self.get_data_paths()
Expand Down Expand Up @@ -1372,7 +1378,7 @@ def read_header(self, path=None):
print("File Not Found:",str(Path(self.filepath)))
return -1

self.version = str(f['Version'][()][0])
self.version = str(f['Version'][()].decode('UTF-8'))

if self.version.upper() >= 'ESPIRT 2.X':
ngrp = self.get_data_paths()
Expand All @@ -1389,12 +1395,12 @@ def read_header(self, path=None):
self.nPatterns = shp[-3]
self.filedatatype = dset.dtype.type
headerpath = (f[self.h5patdatpth].parent.parent)["Header"]
self.nCols = np.uint32(headerpath['NCOLS'][()][0])
self.nRows = np.uint32(headerpath['NROWS'][()][0])
self.nCols = np.uint32(headerpath['NCOLS'][()])
self.nRows = np.uint32(headerpath['NROWS'][()])
#self.hexflag = np.int32(f[headerpath+'Grid Type'][()][0] == 'HexGrid')

self.xStep = np.float32(headerpath['XSTEP'][()][0])
self.yStep = np.float32(headerpath['YSTEP'][()][0])
self.xStep = np.float32(headerpath['XSTEP'][()])
self.yStep = np.float32(headerpath['YSTEP'][()])

return 0 #note this function uses multiple returns

Expand Down Expand Up @@ -1460,7 +1466,7 @@ def read_header(self, path=None):
print("File Not Found:",str(Path(self.filepath)))
return -1

self.version = str(f['Format Version'][()][0])
self.version = str(f['Format Version'][()][0].decode('UTF-8'))

if self.version >= '5.0':
ngrp = self.get_data_paths()
Expand Down
5 changes: 2 additions & 3 deletions pyebsdindex/misorientation.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,11 +91,10 @@ def misorientcubic_quicknb(q1In,q2In):
i1 = i % n1
i2 = i % n2

q1i = q1In[i1, :].copy().reshape(4)
q2i = q2In[i2,:].copy()
q2i = q2i.reshape(4)
q2i[1:4] *= -1.0

q1i = q1In[i1,:].copy().reshape(4)
q2i[1:4] *= -1.0 # take the conjugate/inverse of q2

qAB = np.abs(rotlib.quat_multiply1(q1i, q2i))

Expand Down
3 changes: 2 additions & 1 deletion pyebsdindex/nlpar.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,7 @@ def d2norm(d2, n2, dij, sigma):
self.lam = np.median(np.mean(lamopt_values, axis = 0))
if self.sigma is None:
self.sigma = sigma
return np.mean(lamopt_values, axis = 0).flatten()

def calcnlpar(self, chunksize=0, searchradius=None, lam = None, dthresh = None, saturation_protect=True, automask=True,
filename=None, fileout=None, reset_sigma=True, backsub = False, rescale = False):
Expand Down Expand Up @@ -430,7 +431,7 @@ def calcnlpar(self, chunksize=0, searchradius=None, lam = None, dthresh = None,
# sigchunk[rowstartcount[0]:rowstartcount[0]+rowstartcount[1],:]

numba.set_num_threads(nthreadpos)

return str(patternfileout.filepath)

def calcsigma(self,chunksize=0,nn=1,saturation_protect=True,automask=True):

Expand Down
65 changes: 47 additions & 18 deletions pyebsdindex/pcopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@

import numpy as np
import multiprocessing
import functools
import scipy.optimize as opt
from timeit import default_timer as timer

Expand Down Expand Up @@ -192,9 +193,11 @@ def optimize_pso(
PC0=None,
batch=False,
search_limit=0.2,
early_exit = 0.0001,
nswarmparticles=30,
pswarmpar=None,
niter=50,
return_cost=False,
verbose=1
):
"""Optimize pattern center (PC) (PCx, PCy, PCz) in the convention
Expand Down Expand Up @@ -222,13 +225,20 @@ def optimize_pso(
search_limit : float, optional
Default is 0.2 for all PC values, and sets the +/- limit for the
optimization search.
early_exit: float, optional
Default is 0.0001 for all PC values, and sets a value for which
the optimum is considered converged before the number of iterations
is reached. The optimiztion will exit early if the velocity and distance
of all the swarm particles is less than the early_exit value.
nswarmparticles : int, optional
Number of particles in a swarm. Default is 30.
pswarmpar : dict, optional
Particle swarm parameters "c1", "c2", and "w" with defaults 3.5,
3.5, and 0.8, respectively.
niter : int, optional
Number of iterations. Default is 50.
return_costs: bool, optional
Set to True to return the cost value as well as the optimum fit PC.
verbose : int, optional
Whether to print the parameters and progress of the
optimization (>= 1) or not (< 1). Default is to print.
Expand Down Expand Up @@ -277,7 +287,8 @@ def optimize_pso(
# )
optimizer = PSOOpt(dimensions=3, n_particles=nswarmparticles,
c1=pswarmpar['c1'],
c2 = pswarmpar['c2'], w = pswarmpar['w'], hyperparammethod='auto')
c2 = pswarmpar['c2'], w = pswarmpar['w'], hyperparammethod='auto',
early_exit=early_exit)

if not batch:
# cost, PCoutRet = optimizer.optimize(
Expand All @@ -286,12 +297,13 @@ def optimize_pso(
cost, PCoutRet = optimizer.optimize(_optfunction, indexer=indexer, banddat=banddat,
start=PC0, bounds=(PC0 - np.array(search_limit), PC0 + np.array(search_limit)),
niter=niter, verbose=verbose)

costout = cost
#print(cost)
else:
PCoutRet = np.zeros((npoints, 3))
if verbose >= 1:
print('', end='\n')
costout = np.zeros(npoints, dtype=np.float32)
for i in range(npoints):
# cost, PCoutRet[i, :] = optimizer.optimize(
# _optfunction, niter, indexer=indexer, banddat=banddat[i, :, :]
Expand All @@ -304,6 +316,7 @@ def optimize_pso(
niter=niter, verbose=0)

PCoutRet[i, :] = newPC
costout[i] = cost
progress = int(round(10 * float(i) / npoints))
if verbose >= 1:
print('', end='\r')
Expand Down Expand Up @@ -338,9 +351,10 @@ def optimize_pso(
newout[:3] = PCoutRet
newout[3] = delta[3]
PCoutRet = newout

return PCoutRet

if return_cost is False:
return PCoutRet
else:
return PCoutRet, costout

def _file_opt(fobj, indexer, stride=200, groupsz = 3):
nCols = fobj.nCols
Expand Down Expand Up @@ -373,7 +387,8 @@ def __init__(self,
c2 = 2.05,
w = 0.8,
hyperparammethod = 'static',
boundmethod = 'bounce'):
boundmethod = 'bounce',
early_exit=None):
self.n_particles = int(n_particles)
self.dimensions = int(dimensions)
self.c1 = c1
Expand All @@ -391,6 +406,7 @@ def __init__(self,
self.niter = None
self.pos = None
self.vel = None
self.early_exit = early_exit


def initializeswarm(self, start=None, bounds=None):
Expand Down Expand Up @@ -437,7 +453,7 @@ def updateswarmbest(self, fun2opt, pool, **kwargs):
#print(timer()-tic)
#pos = self.pos.copy()
#tic = timer()
#results = pool.map(partial(fun2opt, **kwargs),list(pos) )
#results = pool.map(functools.partial(fun2opt, **kwargs),list(pos) )
#print(timer()-tic)
#print(len(results[0]), type(results[0]))
#print(len(results))
Expand Down Expand Up @@ -526,22 +542,35 @@ def printprogress(self, iter):
def optimize(self, function, start=None, bounds=None, niter=50, verbose = 1, **kwargs):

self.initializeswarm(start, bounds)
early_exit = self.early_exit
if early_exit is None:
early_exit = -1.0

with multiprocessing.Pool(min(multiprocessing.cpu_count(), self.n_particles)) as pool:
#with multiprocessing.get_context("spawn").Pool(min(multiprocessing.cpu_count(), self.n_particles)) as pool:
pool = None
if verbose >= 1:
print('n_particles:', self.n_particles, 'c1:', self.c1, 'c2:', self.c2, 'w:', self.w )

self.niter = niter
for iter in range(niter):
self.updatehyperparam(iter)
self.updateswarmbest(function, pool, **kwargs)
if verbose >= 1:
print('n_particles:', self.n_particles, 'c1:', self.c1, 'c2:', self.c2, 'w:', self.w )
self.printprogress(iter)
#print(np.abs(self.vel).max())
self.updateswarmvelpos()

if np.abs(self.vel).max() < early_exit:
d = abs(self.gbest_loc - self.pos)
#print(d.max())
if d.max() < early_exit:
break


self.niter = niter
for iter in range(niter):
self.updatehyperparam(iter)
self.updateswarmbest(function, pool, **kwargs)
if verbose >= 1:
self.printprogress(iter)
self.updateswarmvelpos()


pool.close()
pool.terminate()
#pool.close()
#pool.terminate()
final_best = self.gbest
final_loc = self.gbest_loc
if verbose >= 1:
Expand Down

0 comments on commit 5ef2ce1

Please sign in to comment.