diff --git a/OpenPNM/Algorithms/__LinearSolver__.py b/OpenPNM/Algorithms/__LinearSolver__.py index 743ebd76f1..76be28b003 100644 --- a/OpenPNM/Algorithms/__LinearSolver__.py +++ b/OpenPNM/Algorithms/__LinearSolver__.py @@ -278,7 +278,7 @@ def rate(self,pores='',throats=''): pores2[-sp.in1d(p1,pores)] = p1[-sp.in1d(p1,pores)] X1 = self._result[pores1] X2 = self._result[pores2] - g = self._conductance[throats] + g = self._fluid[self._conductance][throats] R = sp.sum(sp.multiply(g,(X1-X2))) return(R) @@ -296,7 +296,7 @@ def _do_one_inner_iteration(self): self._logger.info('Writing result to '+self.__class__.__name__+'[\''+self._conductance+'\']') self[self._quantity] = self._result - def _calc_eff_prop(self): + def _calc_eff_prop(self,check_health=False): try: self[self._quantity] except: @@ -310,41 +310,43 @@ def _calc_eff_prop(self): outlets = sp.where(self['pore.bcval_Dirichlet']==sp.amin(BCs))[0] #Analyze input and output pores - #Check for coplanarity -# if self._net.iscoplanar(inlets) == False: -# raise Exception('The inlet pores do not define a plane. Effective property will be approximation') -# if self._net.iscoplanar(outlets) == False: -# raise Exception('The outlet pores do not define a plane. Effective property will be approximation') - #Ensure pores are on a face of domain (only 1 non-self neighbor each) - PnI = self._net.find_neighbor_pores(pores=inlets,mode='not_intersection',excl_self=True) - if sp.shape(PnI) != sp.shape(inlets): - self._logger.warning('The inlet pores have too many neighbors. Internal pores appear to be selected.') - PnO = self._net.find_neighbor_pores(pores=outlets,mode='not_intersection',excl_self=True) - if sp.shape(PnO) != sp.shape(outlets): - self._logger.warning('The outlet pores have too many neighbors. Internal pores appear to be selected.') + if check_health: + #Check for coplanarity + if self._net.iscoplanar(inlets) == False: + raise Exception('The inlet pores do not define a plane. Effective property will be approximation') + if self._net.iscoplanar(outlets) == False: + raise Exception('The outlet pores do not define a plane. Effective property will be approximation') + #Ensure pores are on a face of domain (only 1 non-self neighbor each) + PnI = self._net.find_neighbor_pores(pores=inlets,mode='not_intersection',excl_self=True) + if sp.shape(PnI) != sp.shape(inlets): + self._logger.warning('The inlet pores have too many neighbors. Internal pores appear to be selected.') + PnO = self._net.find_neighbor_pores(pores=outlets,mode='not_intersection',excl_self=True) + if sp.shape(PnO) != sp.shape(outlets): + self._logger.warning('The outlet pores have too many neighbors. Internal pores appear to be selected.') #Fetch area and length of domain #A = self._net.domain_area(face=inlets) A = (40.5e-6*10)**2 L = self._net.domain_length(face_1=inlets,face_2=outlets) - x = self._result + #x = self._result #Find flow through inlet face - Pin = [] - Pn = [] - for pore in inlets: - pore_value = x[pore] - neighbors = self._net.find_neighbor_pores(pore, excl_self = True) - for neighbor in neighbors: - neighbor_value = x[neighbor] - if(sp.absolute(neighbor_value - pore_value) > .000001): - Pin.append(pore) - Pn.append(neighbor) - - Ts = self._net.find_connecting_throat(Pin,Pn) - g = self._fluid[self._conductance][Ts] - xin = x[Pin] - xout = x[Pn] - flow = g*(xin - xout) + #Pin = [] + #Pn = [] + #for pore in inlets: + # pore_value = x[pore] + # neighbors = self._net.find_neighbor_pores(pore, excl_self = True) + # for neighbor in neighbors: + # neighbor_value = x[neighbor] + # if(sp.absolute(neighbor_value - pore_value) > .000001): + # Pin.append(pore) + # Pn.append(neighbor) + # + #Ts = self._net.find_connecting_throat(Pin,Pn) + #g = self._fluid[self._conductance][Ts] + #xin = x[Pin] + #xout = x[Pn] + #flow = g*(xin - xout) + flow = self.rate(pores=inlets) D = sp.sum(flow)*L/A/sp.absolute(BCs[0]-BCs[1]) return D diff --git a/OpenPNM/Algorithms/__OrdinaryPercolation__.py b/OpenPNM/Algorithms/__OrdinaryPercolation__.py index e115daff90..33590b5a16 100644 --- a/OpenPNM/Algorithms/__OrdinaryPercolation__.py +++ b/OpenPNM/Algorithms/__OrdinaryPercolation__.py @@ -22,21 +22,21 @@ class OrdinaryPercolation(GenericAlgorithm): r""" - Simulates a capillary drainage experiment by looping through a list of capillary pressures - - This function produces a pore_properties array called 'Pc_invaded' which contains the pressure at which a given pore was invaded. This list can be useful for reproducing the simulation for plotting or determining late pore filling. + Simulates a capillary drainage experiment by looping through a list of + capillary pressures. Parameters ---------- - - loglevel : integer, optional - Level of the logger (10=Debug, 20=INFO, 30=Warning, 40=Error, 50=Critical) - loggername : string, optional - Set the name of the logger to be output on the console. Defaults to class name. - - Note - ---- - Some info about OP? + network : OpenPNM Network Object + The network upon which the simulation will be run + name : string, optional + The name to assign to the Algorithm Object + + Notes + ----- + To run this algorithm, use 'setup()' to provide the necessary simulation + parameters, and then use 'run()' to execute it. Use 'update()' to send + the results of the simulation out of the algorithm object. """ def __init__(self, **kwargs): @@ -46,14 +46,9 @@ def __init__(self, **kwargs): super(OrdinaryPercolation,self).__init__(**kwargs) self._logger.debug("Create Drainage Percolation Algorithm Object") - def setup(self, - invading_fluid = None, - defending_fluid = None, - inlets = [0], - npts = 25, - capillary_pressure = 'capillary_pressure', - AL=True, - **params): + def setup(self,invading_fluid = None,defending_fluid = None,inlets = [0],npts = 25,capillary_pressure = 'capillary_pressure',AL=True,**params): + r''' + ''' # Parse params self._fluid_inv = invading_fluid self._fluid_def = defending_fluid @@ -64,6 +59,8 @@ def setup(self, self._p_cap = capillary_pressure def run(self): + r''' + ''' #See if setup has been run try: capillary_pressure = self._p_cap except: @@ -94,8 +91,6 @@ def _do_outer_iteration_stage(self): self._t_seq = sp.searchsorted(sp.unique(self._t_inv),self._t_inv) self.set_pore_data(prop='inv_seq',data=self._p_seq) self.set_throat_data(prop='inv_seq',data=self._t_seq) - #Remove temporary arrays and adjacency matrices - del self._net._adjacency_matrix['csr']['invaded'] def _do_one_inner_iteration(self,inv_val): r""" @@ -107,11 +102,10 @@ def _do_one_inner_iteration(self,inv_val): #Generate a tlist containing boolean values for throat state Tinvaded = self._t_cap<=inv_val #Fill adjacency matrix with invasion state info - self._net.create_adjacency_matrix(data=Tinvaded,prop='invaded',sprsfmt='csr',dropzeros=True) - clusters = sprs.csgraph.connected_components(self._net._adjacency_matrix['csr']['invaded'])[1] + clusters = self._net.find_clusters(Tinvaded) #Find all pores with at least 1 invaded throat (invaded) Pinvaded = sp.zeros_like(clusters,dtype=bool) - nums = self._net.get_throat_indices('all') + nums = self._net.throats() temp = self._net.find_connected_pores(nums) temp = temp[Tinvaded] temp = sp.hstack((temp[:,0],temp[:,1])) @@ -207,6 +201,7 @@ def update(self, Pc=0, seq = None, occupancy='occupancy'): temp = sp.array(t_inv,dtype=sp.float64,ndmin=1) self._fluid_inv.set_throat_data(prop=occupancy,data=temp) #Apply occupancy to defending fluid + if self._fluid_def != None: temp = sp.array(~p_inv,dtype=sp.float64,ndmin=1) self._fluid_def.set_pore_data(prop=occupancy,data=temp) temp = sp.array(~t_inv,dtype=sp.float64,ndmin=1) diff --git a/OpenPNM/Geometry/__Voronoi__.py b/OpenPNM/Geometry/__Voronoi__.py new file mode 100644 index 0000000000..654d057cdb --- /dev/null +++ b/OpenPNM/Geometry/__Voronoi__.py @@ -0,0 +1,794 @@ +""" +module __Voronoi__: Subclass of GenericGeometry for a standard Geometry created from a Voronoi Diagram +Used with Delaunay Network but could work for others (not tested) +=============================================================================== + +.. warning:: The classes of this module should be loaded through the 'Geometry.__init__.py' file. + +""" + +import sys, os +parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +sys.path.insert(1, parent_dir) +import OpenPNM +import scipy as sp +import numpy as np +import _transformations as tr +from scipy.spatial import ConvexHull +from math import atan2 +from scipy.spatial import Delaunay + +from OpenPNM.Geometry.__GenericGeometry__ import GenericGeometry + +class Voronoi(GenericGeometry): + r""" + Voronoi subclass of GenericGeometry. + + Parameters + ---------- + loglevel : int + Level of the logger (10=Debug, 20=INFO, 30=Warning, 40=Error, 50=Critical) + + """ + + def __init__(self, **kwargs): + r""" + Initialize + """ + if int(sp.__version__.split('.')[1]) < 13: + raise Exception('The installed version of Scipy is too old, Voronoi cannot run') + super(Voronoi,self).__init__(**kwargs) + self._logger.debug("Method: Constructor") + + def setup(self, fibre_rad=3e-06): + self._add_throat_props(radius=fibre_rad) # This sets the key throat data for calculating pore and throat properties later + self.add_property(prop='pore_seed',model='random') + self.add_property(prop='throat_seed',model='neighbor_min') + self.add_property(prop='pore_volume',model='voronoi') # Volume must come before diameter + self.add_property(prop='pore_diameter',model='voronoi') + self.add_property(prop='pore_centroid',model='voronoi') + self.add_property(prop='throat_diameter',model='voronoi') + self.add_property(prop='throat_centroid',model='voronoi') + self.add_property(prop='throat_length',model='constant',value=fibre_rad) + self.add_property(prop='throat_volume',model='voronoi') + self.add_property(prop='throat_vector',model='pore_to_pore') # Not sure how to do this for centre to centre as we might need to split into two vectors + self.add_property(prop='throat_surface_area',model='voronoi') + + def _add_throat_props(self,radius=1e-06): + r""" + Main Loop + This method does all the throat properties for the voronoi cages + including calling the offseting routine which offsets the vertices surrounding each pore by an amount that + replicates erroding the facet of each throat by the fibre radius + """ + connections = self._net['throat.conns'] + coords = self._net['pore.coords'] + verts = self._net['pore.vertices'] + normals = sp.ndarray(len(connections),dtype=object) + normals = coords[connections[:,0]]-coords[connections[:,1]] + area = sp.ndarray(len(connections),dtype=object) + perimeter = sp.ndarray(len(connections),dtype=object) + offset_verts = sp.ndarray(len(connections),dtype=object) + shared_verts = sp.ndarray(len(connections),dtype=object) + offset_error = sp.ndarray(len(connections),dtype=object) + for i,throat_pair in enumerate(connections): + my_shared_verts = [] + pore_a = throat_pair[0] + pore_b = throat_pair[1] + " Identify shared verts " + for vert_a in verts[pore_a]: + for vert_b in verts[pore_b]: + if (vert_a[0] == vert_b[0]) and (vert_a[1] == vert_b[1]) and (vert_a[2] == vert_b[2]): + my_shared_verts.append(vert_a) + if len(shared_verts) >=3: + my_shared_verts = np.asarray(my_shared_verts) + shared_verts[i]=my_shared_verts + area[i],perimeter[i],offset_verts[i],offset_error[i] = self._get_throat_geom(my_shared_verts,normals[i],radius) + else: + area[i]=0.0 + + self._net.set_data(prop='area',throats='all',data=area) + self._net['throat.perimeter']=perimeter + self._net['throat.verts']=shared_verts + self._net['throat.offset_verts']=offset_verts + self._net['throat.normals']=normals + #self._net['throat.offset_error']=offset_error + #offset_error = pn['throat.offset_error'] + #for i in range(len(offset_error)): + # if offset_error[i] == 5 or offset_error[i] == 6: + # self.print_throat([i]) + # print("Throat: "+str(i)+" Error: "+str(offset_error[i])) + " Temporary Code to remove throats with areas smaller than 1% of the mean value " + " This can be used in algorithms to ignore certain connections if required - like in the range of capillary pressures in OP " + + average_area = sp.mean(area) + cutoff = average_area/100 + excluded_throats = [] + for i,throat in enumerate(connections): + if area[i]<=cutoff: + excluded_throats.append(i) + excluded_throats = np.asarray(excluded_throats) + if len(excluded_throats) > 0: + self._net.trim(throats=excluded_throats) + + def _get_throat_geom(self,verts,normal,fibre_rad): + r""" + For one set of vertices defining a throat return the key properties + This is the main loop for calling other sub-routines. + General Method: + For each connection or throat defined by the shared vertices + Rotate the vertices to align with the xy-plane and get rid of z-coordinate + Compute the convex hull of the 2D points giving a set of simplices which define neighbouring vertices in a clockwise fashion + For each triplet calculate the offset position given the fibre radius + Check for overlapping vertices and ones that lie outside the original hull - recalculate position to offset from or ignore if all overlapping + Calculate Area and Perimeter if successfully generated offset vertices to replicate eroded throat + Translate back into 3D + Any Errors encountered result in the throat area being zero and no vertices being passed back + These Errors are not coding mistakes but failures to obtain an eroded facet with non-zero area: + Error 1: Less than 3 vertices in the convex hull - Should never happen (unless 2 points are incredibly close together) + Error 2: The largest span of points is less than twice the fibre radius (i.e. throat will definitley be occluded) + Error 3: All the offset vertices overlap with at least one other vertex - Throat fully occluded + Error 4: Not enough offset vertices to continue - Throat fully occluded + Error 5: An offset vertex is outside the original set of points - Throat fully occluded + """ + z_axis = [0,0,1] + throat_area = 0.0 + throat_perimeter = 0.0 + output_offset = [] + Error = 0 + " For boundaries some facets will already be aligned with the axis - if this is the case a rotation is unnecessary and could also cause problems " + angle = tr.angle_between_vectors(normal,z_axis) + if (angle==0.0)or(angle==np.pi): + "We are already aligned" + rotate_input = False + facet = verts + else: + rotate_input = True + M = tr.rotation_matrix(tr.angle_between_vectors(normal,z_axis),tr.vector_product(normal,z_axis)) + facet = np.dot(verts,M[:3,:3].T) + x = facet[:,0] + y = facet[:,1] + z = facet[:,2] + " Work out span of points and set axes scales to cover this and be equal in both dimensions " + x_range = x.max() - x.min() + y_range = y.max() - y.min() + if (x_range > y_range): + my_range = x_range + else: + my_range = y_range + if (np.around(z.std(),3)!=0.000): + print("Rotation failed") + facet_coords_2D = np.column_stack((x,y)) + hull = ConvexHull(facet_coords_2D) + verts_2D = facet_coords_2D[hull.vertices] + offset = self._outer_offset(verts_2D,fibre_rad) + " At this point we may have overlapping areas for which we need to offset from a new point " + overlap_array,sweep_radius,line_points = self._set_overlap(verts_2D,offset) + #first_array = overlap_array + temp_vert_list=[] + #new_vert_list=[] + if (len(verts_2D) <3): + "Error: Fused Too Many Verts" + Error = 1 + elif(my_range < fibre_rad*2): + "Error: Facet Too small to Erode" + Error = 2 + else: + if overlap_array.any()==False: + " If no overlaps don't worry" + "no overlaps" + elif self._all_overlap(overlap_array)==True: + " If all overlaps then throat is fully occluded" + "Error: Throat fully occluded" + Error = 3 + else: + " If one or two sets of overlaps exist and at least one vertex is not overlapped then we need to do a bit more work " + " Do some linalg to find a new point to offset from saving un-overlapped verts and newly created verts in a temporary list " + count = 0 + temp_verts = verts_2D + while True: + temp_vert_list=[] + for i in range(np.shape(line_points)[0]): + if np.sum(overlap_array[i])==0.0: + temp_vert_list.append(temp_verts[i]) + else: + my_lines=[] + for j in range(np.shape(line_points)[0]): + + if overlap_array[i][j] ==1 and overlap_array[j][i]==1: + list_a = line_points[i][j] + list_b = line_points[j][i] + my_lines = self._symmetric_difference(list_a,list_b) + + my_lines=np.asarray(my_lines) + + if len(my_lines)==2: + try: + quad_points=temp_verts[my_lines] + my_new_point = self._new_point(quad_points) + temp_vert_list.append(my_new_point) + except IndexError: + print("IndexError: "+str(my_lines)) + except TypeError: + print("TypeError: "+str(my_lines)) + + #new_vert_list.append(my_new_point) + + temp_verts=np.asarray(self._unique_list(temp_vert_list)) + #new_vert_list=np.asarray(self._unique_list(new_vert_list)) + #if len(verts_2D) >=3: + offset = self._outer_offset(temp_verts,fibre_rad) + overlap_array,sweep_radius,line_points = self._set_overlap(temp_verts,offset) + #else: + #Error = 4 + if overlap_array.any()==False: + break + elif self._all_overlap(overlap_array)==True: + Error = 3 + break + elif len(temp_verts) <3: + Error = 4 + break + else: + count+=1 + temp_verts = np.asarray(self._fuse_verts(verts=temp_verts,percentage=0.05*count)) + offset = self._outer_offset(temp_verts,fibre_rad) + overlap_array,sweep_radius,line_points = self._set_overlap(temp_verts,offset) + " Continue Looping until one of the above conditions is true or counter reaches 10" + if count >= 10: + break + + if len(offset) >= 3 and Error == 0: + " Now also check whether any of the offset points lie outside the original convex hull " + original_area = np.around(self._PolyArea2D(verts_2D),10) + all_points = np.concatenate((verts_2D,offset),axis=0) + try: + total_hull = ConvexHull(all_points,qhull_options='Pp') #ignores very small angles + total_area = np.around(self._PolyArea2D(all_points[total_hull.vertices]),10) + except sp.spatial.qhull.QhullError: + print(all_points) + total_area =999 + Error = 5 + #total_area=0 + offset_hull = ConvexHull(offset) + offset_verts_2D = offset[offset_hull.vertices] + if (total_area>original_area): # Throat is fully occluded + " Don't do anything " + if Error != 5: + Error = 6 + #print("First Array") + #print(first_array) + #print("Second Array") + #print(overlap_array) + else: + throat_area = self._PolyArea2D(offset_verts_2D) + throat_perimeter = self._PolyPerimeter2D(offset_verts_2D) + " Make 3D again in rotated plane " + offset_verts_3D = np.column_stack((offset_verts_2D,z[0:len(offset_verts_2D)])) + " Get matrix to un-rotate the co-ordinates back to the original orientation if we rotated in the first place" + if (rotate_input): + M1 = tr.inverse_matrix(M) + " Unrotate the offset coordinates " + output_offset = np.dot(offset_verts_3D,M1[:3,:3].T) + else: + output_offset = offset_verts_3D + + return throat_area, throat_perimeter, output_offset, Error + + def _outer_offset(self,verts,fibre_rad): + r""" + Routine to loop through all verts and calculate offset position based on neighbours either side. Verts must be in hull order + """ + offset = [] + for i,vert in enumerate(verts): + " Collect three adjacent points and compute the offset of the first " + triplet = (vert, np.roll(verts,-1,axis=0)[i],np.roll(verts,1,axis=0)[i]) + offset.append(self._offset_vertex(triplet,fibre_rad)) + offset = np.asarray(offset) + + return offset + + def _offset_vertex(self,points,rad = 0.01): + " We are passed in a set of 3 points forming vertices of two adjoining simplexes of the convex hull of a voronoi facet " + " We need to offset the vertices normal to the fibre direction (or adjoining vectors) by the fibre radius " + " This is achieved by finding the half angle between the two adjoining vectors and a direction " + " Mid-point must be the first in the array " + p0 = np.array(points[0]) + p1 = np.array(points[1]) + p2 = np.array(points[2]) + " Now make the midpoint the origin " + vector1 = p1-p0 + vector2 = p2-p0 + + " Find what quadrant the vector is pointing in - atan2 function takes account of signs " + " 0 means aligned with x-axis, pi is aligned with -xaxis, positive numbers are positive y and negative numbers are negative y " + " The angle between the vectors should always be within 180 degrees of one another in a convex hull " + + q1 = atan2(vector1[1],vector1[0]) + q2 = atan2(vector2[1],vector2[0]) + alpha = 0.5*tr.angle_between_vectors(vector1,vector2) + + " We always want to offset from the first vertex we get to - going anti-clockwise from the x-axis " + " Check if both vectors point up or both point down - if so the first one we get to will have smaller q value " + if q1*q2 >=0.0: + if q1np.pi): + "vectors are pointing negative x so take whichever has positive q-value - like a pacman facing left" + if q1>=0: + theta = q1 + else: + theta = q2 + else: + "vectors are pointing positive x so take whichever is negative" + if q1<=0: + theta = q1 + else: + theta = q2 + + if alpha == 0: # this may cause problems in terms of which way to offset!!! + #x = rad*np.cos(theta) + #y = rad*np.sin(theta) + x=0 + y=0 + else: + x = rad*np.cos(alpha+theta)/np.sin(alpha) + y = rad*np.sin(alpha+theta)/np.sin(alpha) + + "Add the midpoint back in" + output = [x+p0[0],y+p0[1]] + + return output + + def _dist2(self,p1, p2): + r""" + Pythagoras to compute the square of the distance between two points (in 2D) + """ + return (p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 + + def _fuse(self,points, d): + r""" + Fuse points together wihin a certain range + """ + ret = [] + d2 = d * d + n = len(points) + taken = [False] * n + for i in range(n): + if not taken[i]: + count = 1 + point = [points[i][0], points[i][1]] + taken[i] = True + for j in range(i+1, n): + if self._dist2(points[i], points[j]) < d2: + point[0] += points[j][0] + point[1] += points[j][1] + count+=1 + taken[j] = True + point[0] /= count + point[1] /= count + ret.append((point[0], point[1])) + return ret + + def _fuse_verts(self,verts,percentage=0.05): + r""" + Work out the span of the points and therefore the range for fusing them together then call fuse + """ + #Work out largest span + x_span = max(verts[:,0])- min(verts[:,0]) + y_span = max(verts[:,1])- min(verts[:,1]) + if x_span > y_span: + tolerance = x_span*percentage + else: + tolerance = y_span*percentage + #fuse vertices lying within 5% of the largest span + return self._fuse(verts,tolerance) + + def _PolyArea2D(self,pts): + r""" + returns the area of a 2D polygon given the set of points defining the convex hull in correct order + """ + lines = np.hstack([pts,np.roll(pts,-1,axis=0)]) + area = 0.5*abs(sum(x1*y2-x2*y1 for x1,y1,x2,y2 in lines)) + return area + + def _PolyPerimeter2D(self,pts): + r""" + returns the perimeter of a 2D polygon given the set of points defining the convex hull in correct order + """ + lines = np.hstack([pts,np.roll(pts,-1,axis=0)]) + perimeter = sum(np.sqrt((x2-x1)**2+(y2-y1)**2) for x1,y1,x2,y2 in lines) + return perimeter + + def _get_hull_volume(self,points): + + r""" + Calculate the volume of a set of points by dividing the bounding surface into triangles and working out the volume of all the pyramid elements + connected to the volume centroid + """ + " remove any duplicate points - this messes up the triangulation " + points = np.asarray(self._unique_list(points)) + try: + tri = Delaunay(points) + except sp.spatial.qhull.QhullError: + print(points) + " We only want points included in the convex hull to calculate the centroid " + #hull_points = np.unique(tri.convex_hull)#could technically use network pore centroids here but this function may be called at other times + hull_centroid = sp.array([points[:,0].mean(),points[:,1].mean(),points[:,2].mean()]) + hull_volume = 0.0 + for ia, ib, ic in tri.convex_hull: + " Points making each triangular face " + " Collection of co-ordinates of each point in this face " + face_x = points[[ia,ib,ic]][:,0] + face_y = points[[ia,ib,ic]][:,1] + face_z = points[[ia,ib,ic]][:,2] + " Average of each co-ordinate is the centroid of the face " + face_centroid = [face_x.mean(),face_y.mean(),face_z.mean()] + face_centroid_vector = face_centroid - hull_centroid + " Vectors of the sides of the face used to find normal vector and area " + vab = points[ib] - points[ia] + vac = points[ic] - points[ia] + vbc = points[ic] - points[ib] # used later for area + " As vectors are co-planar the cross-product will produce the normal vector of the face " + face_normal = np.cross(vab,vac) + face_unit_normal = face_normal/np.linalg.norm(face_normal) + " As triangles are orientated randomly in 3D we could either transform co-ordinates to align with a plane and perform 2D operations " + " to work out the area or we could work out the lengths of each side and use Heron's formula which is easier" + " Using Delaunay traingulation will always produce triangular faces but if dealing with other polygons co-ordinate transfer may be necessary " + a = np.linalg.norm(vab) + b = np.linalg.norm(vbc) + c = np.linalg.norm(vac) + " Semiperimeter " + s = 0.5*(a+b+c) + face_area = np.sqrt(s*(s-a)*(s-b)*(s-c)) + " Now the volume of the pyramid section defined by the 3 face points and the hull centroid can be calculated " + pyramid_volume = np.abs(np.dot(face_centroid_vector,face_unit_normal)*face_area/3) + " Each pyramid is summed together to calculate the total volume " + hull_volume += pyramid_volume + + return hull_volume + + def _unique_list(self,input_list): + r""" + For a given list (of points) remove any duplicates + """ + output_list = [] + if len(input_list) > 0: + dim = np.shape(input_list)[1] + for i in input_list: + match=False + for j in output_list: + if dim == 3: + if (i[0]==j[0]) and (i[1]==j[1]) and (i[2]==j[2]): + match=True + elif dim == 2: + if (i[0]==j[0]) and (i[1]==j[1]): + match=True + elif dim ==1: + if (i[0]==j[0]): + match=True + if match==False: + output_list.append(i) + return output_list + + def _symmetric_difference(self,list_a,list_b): + r""" + Return the combination of two lists without common elements (necessary as sets cannot contain mutable objects) + """ + sym_diff=[] + sorted_list_a = np.sort(list_a) + sorted_list_b = np.sort(list_b) + " Add elements in list a if not in list b " + for element_a in sorted_list_a: + match = False + for element_b in sorted_list_b: + if all(element_a==element_b): + match = True + if match==False: + sym_diff.append(element_a) + " Add elements in list b if not in list a " + for element_b in sorted_list_b: + match = False + for element_a in sorted_list_a: + if all(element_a==element_b): + match = True + if match==False: + sym_diff.append(element_b) + return sym_diff + + def _new_point(self,pairs): + r""" + Passed 2 pairs of points defining lines either side of overlapped offset vertices + need to calculate the new point to offset from given the orientation of the outer fibres + """ + m1,c1 = self._line_equation(pairs[0]) + m2,c2 = self._line_equation(pairs[1]) + + if (m1 == np.inf): + "line1 is a straight line x=c1" + x=c1 + y=(m2*c1)+c2 + elif (m2 == np.inf): + "line2 is a straight line x=c2" + x=c2 + y=(m1*c2)+c1 + else: + try: + x=(c2-c1)/(m1-m2) + y=(m1*c2 - m2*c1)/(m1-m2) + except RuntimeWarning: + x=0 + y=0 + return x,y + + def _line_equation(self,points): + r""" + Return the gradient and y intercept of a straight line given 2 points + """ + x_coords, y_coords = zip(*points) + dy = y_coords[1]-y_coords[0] + dx = x_coords[1]-x_coords[0] + if dx==0: + m=np.inf + c=x_coords[1] + else: + m=dy/dx + c=y_coords[1] - m*x_coords[1] + + return m,c + + def _set_overlap(self,verts,offset): + r""" + Given a set of vertices and a set of offset vertices, evaluate whether any of the offset vertices overlap + This is then used to recalculate points from which to offset + """ + dim = len(verts) + overlap_array = np.zeros(shape=(dim,dim)) + sweep_radius = np.zeros(len(verts)) + for i,zone_centre in enumerate(verts): + sweep_radius[i] = np.sqrt(self._dist2(zone_centre,offset[i])) + for j,test_point in enumerate(offset): + test_radius = np.sqrt(self._dist2(zone_centre,test_point)) + if (test_radius < sweep_radius[i]): + overlap_array[i][j]=1 + overlap_array[j][i]=1 # Fill in both so that they are both recalculated later i overlapping j doesn't necessarily mean j overlaps i + " Join up overlapping regions of points " + for i in range(dim): + for j in range(dim): #loop through each element + if overlap_array[i][j]==1: #if an overlap exist look at what others exist for that vertex + for k in range(dim): + if overlap_array[j][k]==1 and k!=i: + overlap_array[i][k]=1 + + line_points=self._line_points(overlap_array) + + return overlap_array,sweep_radius,line_points + + def _line_points(self,array): + r""" + We are passed a square array containing a list of overlap results. rows represent vertices and columns represent offset vertices + If an overlap occurs in the span between offset j and offset i then a 1 will result in [i][j] + As the vertices are in hull order and our aim is to create a new point from which to offset using connected fibres we want to + identify the correct points to use to define our lines + + e__________ d + | | c + | / + | / + | / + |_/ + a b + if we have 5 points in a hull a,b,c,d,e where a overlaps with b and c overlaps with d (and visa versa) the array will look like + [0,1,0,0,0] + [1,0,0,0,0] + [0,0,0,1,0] + [0,0,1,0,0] + [0,0,0,0,0] + + This means that c and d are within a fibre's width of each other and the line between them does not represent the fibre + Instead we want to extend the outer lines (bc and de) to see where they would meet and offset from this point. + Roll up and down to find the first unoverlapped index from which to start each line from then go back one to get the two + points to form a line. + """ + dim=np.shape(array)[0] + index=range(dim) + line_points=np.ndarray(shape=[dim,dim],dtype=object) + for i in range(dim): + for j in range(dim): + if array[i][j]==1 and array[j][i]==1: + " Roll forwards to find the first unoverlapped index" + k=1 + while k < dim: + if np.roll(array[i],-k,axis=0)[j]==0: + break + else: + k+=1 + " Save the indices of the first unoverlapped index and the one before to create the line " + forward_line = [np.roll(index,-k,axis=0)[j],np.roll(index,-(k-1),axis=0)[j]] + forward_line.sort() + " Roll backwards to find the first unoverlapped index " + k=1 + while k < dim: + if np.roll(array[i],k,axis=0)[j]==0: + break + else: + k+=1 + " Save the indices of the first unoverlapped index and the one before to create the line " + backward_line = [np.roll(index,k,axis=0)[j],np.roll(index,(k-1),axis=0)[j]] + backward_line.sort() + line_points[i][j]=(forward_line,backward_line) + + return line_points + + def _all_overlap(self,array): + r""" + Find out whether all offset vertices (columns) are overlapped by at least one other + If so then throat is fully occluded + """ + dim=np.shape(array)[0] + overlap=[False]*dim + all_overlap=False + for i in range(dim): + for j in range(dim): + if array[j][i]==1: + overlap[i]=True + if sum(overlap)==dim: + all_overlap = True + + return all_overlap + + def print_throat(self,throats): + r""" + Print a given throat or list of throats accepted as [1,2,3,...,n] + e.g geom.print_throat([34,65,99]) + Original vertices plus offset vertices are rotated to align with + the z-axis and then printed in 2D + """ + import matplotlib.pyplot as plt + if len(throats) > 0: + verts = self._net['throat.verts'][throats] + offsets = self._net['throat.offset_verts'][throats] + normals = self._net['throat.normals'][throats] + for i in range(len(verts)): + fig = plt.figure() + vert_2D = self._rotate_and_chop(verts[i],normals[i],[0,0,1]) + hull = ConvexHull(vert_2D) + for simplex in hull.simplices: + plt.plot(vert_2D[simplex,0], vert_2D[simplex,1], 'k-',linewidth=2) + plt.scatter(vert_2D[:,0], vert_2D[:,1]) + + offset_2D = self._rotate_and_chop(offsets[i],normals[i],[0,0,1]) + offset_hull = ConvexHull(offset_2D) + for simplex in offset_hull.simplices: + plt.plot(offset_2D[simplex,0], offset_2D[simplex,1], 'g-',linewidth=2) + plt.scatter(offset_2D[:,0], offset_2D[:,1]) + " Make sure the plot looks nice by finding the greatest range of points and setting the plot to look square" + xmax = vert_2D[:,0].max() + xmin = vert_2D[:,0].min() + ymax = vert_2D[:,1].max() + ymin = vert_2D[:,1].min() + x_range = xmax - xmin + y_range = ymax - ymin + if (x_range > y_range): + my_range = x_range + else: + my_range = y_range + lower_bound_x = xmin - my_range*0.5 + upper_bound_x = xmin + my_range*1.5 + lower_bound_y = ymin - my_range*0.5 + upper_bound_y = ymin + my_range*1.5 + plt.axis((lower_bound_x,upper_bound_x,lower_bound_y,upper_bound_y)) + plt.grid(b=True, which='major', color='b', linestyle='-') + fig.show() + else: + print("Please provide throat indices") + + def print_pore(self,pores): + r""" + Print all throats around a given pore or list of pores accepted as [1,2,3,...,n] + e.g geom.print_pore([34,65,99]) + Original vertices plus offset vertices used to create faces and + then printed in 3D + To print all pores (n) + pore_range = np.arange(0,n-1,1) + geom.print_pore(pore_range) + """ + import matplotlib.pyplot as plt + from mpl_toolkits.mplot3d import Axes3D + from mpl_toolkits.mplot3d.art3d import Poly3DCollection + if len(pores) > 0: + throats = self._net.find_neighbor_throats(pores=pores) + verts = self._net['throat.verts'][throats] + normals = self._net['throat.normals'][throats] + " Get verts in hull order " + ordered_verts=[] + for i in range(len(verts)): + vert_2D = self._rotate_and_chop(verts[i],normals[i],[0,0,1]) + hull = ConvexHull(vert_2D) + ordered_verts.append(verts[i][hull.vertices]) + offsets = self._net['throat.offset_verts'][throats] + + xmin = 999 + xmax = -999 + ymin = 999 + ymax = -999 + zmin = 999 + zmax = -999 + " Find the span of points " + for vert in verts: + if vert[:,0].min() < xmin: + xmin = vert[:,0].min() + if vert[:,0].max() > xmax: + xmax = vert[:,0].max() + if vert[:,1].min() < ymin: + ymin = vert[:,1].min() + if vert[:,1].max() > ymax: + ymax = vert[:,1].max() + if vert[:,2].min() < zmin: + zmin = vert[:,2].min() + if vert[:,2].max() > zmax: + zmax = vert[:,2].max() + + fig = plt.figure() + ax = fig.gca(projection='3d') + outer_items = Poly3DCollection(ordered_verts,linewidths=1, alpha=0.2) + outer_face_colours=[(0.5, 0, 0.5, 0.05)] + outer_items.set_facecolor(outer_face_colours) + ax.add_collection(outer_items) + inner_items = Poly3DCollection(offsets,linewidths=1, alpha=0.2) + inner_face_colours=[(0.7, 0, 0.3, 0.0)] + inner_items.set_facecolor(inner_face_colours) + ax.add_collection(inner_items) + ax.set_xlim(xmin,xmax) + ax.set_ylim(ymin,ymax) + ax.set_zlim(zmin,zmax) + plt.show() + else: + print("Please provide pore indices") + + def _rotate_and_chop(self,verts,normal,axis=[0,0,1]): + r""" + Method to rotate a set of vertices (or coords) to align with an axis + points must be coplanar and normal must be given + Chops axis coord to give vertices back in 2D + Used to prepare verts for printing or calculating convex hull in order to arrange + them in hull order for calculations and printing + """ + xaxis=[1,0,0] + yaxis=[0,1,0] + zaxis=[0,0,1] + angle = tr.angle_between_vectors(normal,axis) + if (angle==0.0)or(angle==np.pi): + "We are already aligned" + facet = verts + else: + M = tr.rotation_matrix(tr.angle_between_vectors(normal,axis),tr.vector_product(normal,axis)) + facet = np.dot(verts,M[:3,:3].T) + x = facet[:,0] + y = facet[:,1] + z = facet[:,2] + " Work out span of points and set axes scales to cover this and be equal in both dimensions " + if axis == xaxis: + output = np.column_stack((y,z)) + elif axis == yaxis: + output = np.column_stack((x,z)) + elif axis == zaxis: + output = np.column_stack((x,y)) + else: + output = facet + + return output + + + +if __name__ == '__main__': + pn = OpenPNM.Network.Delaunay(name='test_net') + pn.generate(num_pores=100, domain_size=[0.0001,0.0001,0.0001],add_boundaries=True) + test = OpenPNM.Geometry.Voronoi(loglevel=10,name='test_geom',locations=[0],network=pn) + test.set_locations(pores=pn.pores('internal'),throats='all') # Don't really know what this does but is needed + pn.regenerate_geometries() \ No newline at end of file diff --git a/OpenPNM/Geometry/__init__.py b/OpenPNM/Geometry/__init__.py index 08dabae4c9..9634353598 100644 --- a/OpenPNM/Geometry/__init__.py +++ b/OpenPNM/Geometry/__init__.py @@ -91,9 +91,12 @@ from .__Toray090__ import Toray090 from .__Boundary__ import Boundary from .__PlotTools__ import PlotTools +from .__Voronoi__ import Voronoi from . import pore_diameter from . import pore_seed from . import pore_volume +from . import pore_centroid +from . import throat_centroid from . import throat_diameter from . import throat_length from . import throat_seed diff --git a/OpenPNM/Geometry/pore_centroid.py b/OpenPNM/Geometry/pore_centroid.py new file mode 100644 index 0000000000..db3d64dedb --- /dev/null +++ b/OpenPNM/Geometry/pore_centroid.py @@ -0,0 +1,20 @@ +r""" +=============================================================================== +Submodule -- pore_centroid +=============================================================================== + +""" +import scipy as sp + +def voronoi(geometry, + network, + propname, + **params): + r""" + Calculate the centroid of the pore from the voronoi vertices - C.O.M + """ + verts = network['pore.vertices'] + value = sp.ndarray(len(verts),dtype=object) + for i,vert in enumerate(verts): + value[i] = sp.array([vert[:,0].mean(),vert[:,1].mean(),vert[:,2].mean()]) + network['pore.'+propname]=value \ No newline at end of file diff --git a/OpenPNM/Geometry/pore_diameter.py b/OpenPNM/Geometry/pore_diameter.py index 39621ce2c5..791209ecdf 100644 --- a/OpenPNM/Geometry/pore_diameter.py +++ b/OpenPNM/Geometry/pore_diameter.py @@ -6,7 +6,7 @@ """ import scipy as sp import scipy.stats as spst - +from scipy.special import cbrt def constant(geometry, network, @@ -31,4 +31,13 @@ def sphere(geometry, value = P.ppf(network.get_data(prop=seed,pores=geometry.pores())) network.set_data(prop=propname,pores=geometry.pores(),data=value) - \ No newline at end of file +def voronoi(geometry, + network, + propname, + **params): + r""" + Calculate pore diameter from equivalent sphere - volumes must be calculated first + """ + pore_vols = network.get_pore_data(prop='volume') + value = cbrt(6*pore_vols/sp.pi) + network.set_data(prop=propname,pores=geometry.pores(),data=value) \ No newline at end of file diff --git a/OpenPNM/Geometry/pore_volume.py b/OpenPNM/Geometry/pore_volume.py index 383b06bf13..d9e14d8aa7 100644 --- a/OpenPNM/Geometry/pore_volume.py +++ b/OpenPNM/Geometry/pore_volume.py @@ -37,3 +37,30 @@ def cube(geometry, """ value=network.get_data(prop=diameter,pores=geometry.pores())**3 network.set_data(prop=propname,pores=geometry.pores(),data=value) + +def voronoi(geometry, + network, + propname, + **params): + r""" + Calculate volume from the convex hull of the offset vertices making the throats + """ + conns = network.get_throat_data(prop='conns') + verts = network.get_throat_data(prop='offset_verts') + num_pores = network.num_pores() + value = sp.ndarray(num_pores,dtype=object) + for my_pore in range(num_pores): + throat_vert_list = [] + num_connections = 0 + for idx,check_pores in enumerate(conns): + if (check_pores[0] == my_pore) or (check_pores[1] == my_pore): + num_connections +=1 + for vertex in range(len(verts[idx])): + throat_vert_list.append(verts[idx][vertex]) + if num_connections > 1: + throat_array=sp.asarray(throat_vert_list) + value[my_pore]= geometry._get_hull_volume(throat_array) + else: + value[my_pore]=0.0 + + network.set_data(prop=propname,pores=geometry.pores(),data=value) \ No newline at end of file diff --git a/OpenPNM/Geometry/throat_area.py b/OpenPNM/Geometry/throat_area.py index 65e258496b..590e9faeac 100644 --- a/OpenPNM/Geometry/throat_area.py +++ b/OpenPNM/Geometry/throat_area.py @@ -5,7 +5,6 @@ """ import scipy as sp -import scipy.stats as spst def constant(geometry, network, @@ -39,5 +38,4 @@ def cuboid(geometry, """ D = network.get_data(prop=diameter,throats=geometry.throats()) value = (D)**2 - network.set_data(prop=propname,throats=geometry.throats(),data=value) - + network.set_data(prop=propname,throats=geometry.throats(),data=value) \ No newline at end of file diff --git a/OpenPNM/Geometry/throat_centroid.py b/OpenPNM/Geometry/throat_centroid.py new file mode 100644 index 0000000000..eb3f5c887b --- /dev/null +++ b/OpenPNM/Geometry/throat_centroid.py @@ -0,0 +1,20 @@ +r""" +=============================================================================== +Submodule -- throat_centroid +=============================================================================== + +""" +import scipy as sp + +def voronoi(geometry, + network, + propname, + **params): + r""" + Calculate the centroid of the pore from the voronoi vertices - C.O.M + """ + verts = network['throat.offset_verts'] + value = sp.ndarray(len(verts),dtype=object) + for i,vert in enumerate(verts): + value[i] = sp.array([vert[:,0].mean(),vert[:,1].mean(),vert[:,2].mean()]) + network['throat.'+propname]=value \ No newline at end of file diff --git a/OpenPNM/Geometry/throat_diameter.py b/OpenPNM/Geometry/throat_diameter.py index f635adf7a1..1f4ef5e3f4 100644 --- a/OpenPNM/Geometry/throat_diameter.py +++ b/OpenPNM/Geometry/throat_diameter.py @@ -6,6 +6,7 @@ """ import scipy as sp import scipy.stats as spst +import numpy as np def constant(geometry, network, @@ -45,5 +46,10 @@ def voronoi(geometry, **params): r""" Calculate throat diameter from analysis of Voronoi facets + Equivalent circular diameter from voronoi area + Could do better here and work out minimum diameter from verts """ - print('voronoi: nothing yet') \ No newline at end of file + areas = network.get_throat_data(prop='area') + value = 2*np.sqrt(areas/np.pi)#64 bit sqrt doesn't work! + network.set_data(prop=propname,throats=geometry.throats(),data=value) + #print('voronoi: nothing yet') diff --git a/OpenPNM/Geometry/throat_length.py b/OpenPNM/Geometry/throat_length.py index d25e00f43e..f751214dfd 100644 --- a/OpenPNM/Geometry/throat_length.py +++ b/OpenPNM/Geometry/throat_length.py @@ -35,4 +35,22 @@ def straight(geometry, value = E-(D1+D2)/2 network.set_data(prop=propname,throats=geometry.throats(),data=value) - \ No newline at end of file +def voronoi(geometry, + network, + propname, + **params): + r""" + Calculate the centre to centre distance from centroid of pore1 to centroid of throat to centroid of pore2 + """ + import numpy as np + connections = network.get_data(prop='connections',throats=geometry.throats()) + pore1 = connections[:,0] + pore2 = connections[:,1] + pore_centroids = network.get_pore_data(prop='centroid') + throat_centroids = network.get_throat_data(prop='centroid') + v1 =throat_centroids-pore_centroids[pore1] + v2 =throat_centroids-pore_centroids[pore2] + value = np.ndarray(len(connections), dtype=object) + for i in range(len(connections)): + value[i] = np.linalg.norm(v1[i])+np.linalg.norm(v2[i]) + network.set_data(prop=propname,throats=geometry.throats(),data=value) diff --git a/OpenPNM/Geometry/throat_surface_area.py b/OpenPNM/Geometry/throat_surface_area.py index 35d97251d4..c5ba307c70 100644 --- a/OpenPNM/Geometry/throat_surface_area.py +++ b/OpenPNM/Geometry/throat_surface_area.py @@ -45,3 +45,17 @@ def cuboid(geometry, value = 4*D*L network.set_data(prop=propname,throats=geometry.throats(),data=value) +def voronoi(geometry, + network, + propname, + perimeter='perimeter', + length='length', + **params): + r""" + Calculate surface area from perimeter and lenght - + perimeter calculated when throat area is calculated so must be run in correct order + """ + P = network.get_data(prop=perimeter,throats=geometry.throats()) + L = network.get_data(prop=length,throats=geometry.throats()) + value = P*L + network.set_data(prop=propname,throats=geometry.throats(),data=value) \ No newline at end of file diff --git a/OpenPNM/Geometry/throat_volume.py b/OpenPNM/Geometry/throat_volume.py index 87e315b384..0cde0401fa 100644 --- a/OpenPNM/Geometry/throat_volume.py +++ b/OpenPNM/Geometry/throat_volume.py @@ -45,3 +45,15 @@ def cuboid(geometry, Tdia = network.get_data(prop=throat_diameter,throats=geometry.throats()) value = Tlen*Tdia**2 network.set_data(prop=propname,throats=geometry.throats(),data=value) + +def voronoi(geometry, + network, + propname, + throat_length='length', + throat_area='area', + **params): + r""" + Calculate volume from the voronoi facet area and the throat length + """ + value=network.get_throat_data(prop=throat_length,locations=geometry)*network.get_throat_data(prop=throat_area,locations=geometry) + network.set_data(prop=propname,throats=geometry.throats(),data=value) \ No newline at end of file diff --git a/OpenPNM/Network/__Delaunay__.py b/OpenPNM/Network/__Delaunay__.py index c47d467489..87f1ef2828 100644 --- a/OpenPNM/Network/__Delaunay__.py +++ b/OpenPNM/Network/__Delaunay__.py @@ -18,6 +18,7 @@ import scipy.spatial as sptl import scipy.ndimage as spim from OpenPNM.Network.__GenericNetwork__ import GenericNetwork +from scipy.spatial import Voronoi class Delaunay(GenericNetwork): r""" @@ -47,6 +48,7 @@ class Delaunay(GenericNetwork): """ + add_boundaries = False def __init__(self,**kwargs): ''' @@ -69,10 +71,11 @@ def generate(self,**params): ''' self._logger.info(sys._getframe().f_code.co_name+": Start of network topology generation") self._generate_setup(**params) + if params['add_boundaries']: + self.add_boundaries = True self._generate_pores() self._generate_throats() -# self._add_boundaries() - self._add_labels() + #self._add_labels() self._logger.debug(sys._getframe().f_code.co_name+": Network generation complete") def _generate_setup(self, **params): @@ -101,8 +104,8 @@ def _generate_pores(self): coords = sp.rand(self._Np,3)*[self._Lx,self._Ly,self._Lz] self.set_pore_data(prop='coords',data=coords) self.set_pore_info(label='all',locations=np.ones_like(coords[:,0])) - self._logger.debug(sys._getframe().f_code.co_name+": End of method") - + self._logger.debug(sys._getframe().f_code.co_name+": End of method") + def _generate_throats(self): r""" Generate the throats connections @@ -115,16 +118,33 @@ def _generate_throats(self): Lx = self._Lx Ly = self._Ly Lz = self._Lz - f = 0.1; #Scale factor to reduce size of dummy domains - Np_f = sp.array(Np*f,dtype=int) - ptsX0 = sp.rand(Np_f,3)*sp.array([-Lx*f,Ly*f,Lz*f]) - ptsY0 = sp.rand(Np_f,3)*[Lx*f,-Ly*f,Lz*f] - ptsZ0 = sp.rand(Np_f,3)*[Lx*f,Ly*f,-Lz*f] - ptsXX = sp.rand(Np_f,3)*[Lx*f,Ly*f,Lz*f]+[Lx,0,0] - ptsYY = sp.rand(Np_f,3)*[Lx*f,Ly*f,Lz*f]+[0,Ly,0] - ptsZZ = sp.rand(Np_f,3)*[Lx*f,Ly*f,Lz*f]+[0,0,Lz] + #f = 0.1; #Scale factor to reduce size of dummy domains + #Np_f = sp.array(Np*f,dtype=int) + #ptsX0 = sp.rand(Np_f,3)*sp.array([-Lx*f,Ly*f,Lz*f]) + #ptsY0 = sp.rand(Np_f,3)*[Lx*f,-Ly*f,Lz*f] + #ptsZ0 = sp.rand(Np_f,3)*[Lx*f,Ly*f,-Lz*f] + #ptsXX = sp.rand(Np_f,3)*[Lx*f,Ly*f,Lz*f]+[Lx,0,0] + #ptsYY = sp.rand(Np_f,3)*[Lx*f,Ly*f,Lz*f]+[0,Ly,0] + #ptsZZ = sp.rand(Np_f,3)*[Lx*f,Ly*f,Lz*f]+[0,0,Lz] + + " Reflect in X = Lx and 0 " + Pxp = pts.copy() + Pxp[:,0]=(2*Lx-Pxp[:,0]) + Pxm= pts.copy() + Pxm[:,0] = Pxm[:,0]*(-1) + " Reflect in Y = Ly and 0 " + Pyp = pts.copy() + Pyp[:,1]=(2*Ly-Pxp[:,1]) + Pym = pts.copy() + Pym[:,1] = Pxm[:,1]*(-1) + " Reflect in Z = Lz and 0 " + Pzp = pts.copy() + Pzp[:,2]=(2*Lz-Pxp[:,2]) + Pzm = pts.copy() + Pzm[:,2] = Pxm[:,2]*(-1) + pts = np.vstack((pts,Pxp,Pxm,Pyp,Pym,Pzp,Pzm)) #Order important for boundary logic #Add dummy domains to real domain - pts = sp.concatenate([pts,ptsX0,ptsXX,ptsY0,ptsYY,ptsZ0,ptsZZ]) + #pts = sp.concatenate([pts,ptsX0,ptsXX,ptsY0,ptsYY,ptsZ0,ptsZZ]) #Perform tessellation self._logger.debug(sys._getframe().f_code.co_name+": Beginning tessellation") Tri = sptl.Delaunay(pts) @@ -143,6 +163,72 @@ def _generate_throats(self): self.set_throat_data(prop='conns',data=sp.vstack((adjmat.row, adjmat.col)).T) tpore1 = self.get_throat_data(prop='conns')[:,0] self.set_throat_info(label='all',locations=np.ones_like(tpore1)) + + if self.add_boundaries: + " New code to identify boundary pores - those that connect to pores inside and outside original set of pores " + boundary_pores=[False]*Np + boundary_pore_list = [] + xp = [] + xm = [] + yp = [] + ym = [] + zp = [] + zm = [] + for i in sp.arange(0,sp.shape(Tri.simplices)[0]): + pores_in = Tri.simplices[i] < Np # Pores in the original domain + if (sum(pores_in) >= 1) and (sum(pores_in) < len(pores_in)): + # We have connections between at least one pore in and out of the domain + # Identify which boundary we are connected to. At corners could be more than one + pores_xp = (Tri.simplices[i] >= Np) & (Tri.simplices[i] < 2*Np) # Pores in positive x dummy reflection + pores_xm = (Tri.simplices[i] >= 2*Np) & (Tri.simplices[i] < 3*Np) # Pores in negative x dummy reflection + pores_yp = (Tri.simplices[i] >= 3*Np) & (Tri.simplices[i] < 4*Np) # Pores in positive y dummy reflection + pores_ym = (Tri.simplices[i] >= 4*Np) & (Tri.simplices[i] < 5*Np) # Pores in negative y dummy reflection + pores_zp = (Tri.simplices[i] >= 5*Np) & (Tri.simplices[i] < 6*Np) # Pores in positive z dummy reflection + pores_zm = (Tri.simplices[i] >= 6*Np) & (Tri.simplices[i] < 7*Np) # Pores in negative z dummy reflection + for j in range(len(Tri.simplices[i])): + if pores_in[j] == True: + pore_id = Tri.simplices[i][j] + boundary_pores[pore_id]=True + if pore_id not in boundary_pore_list: + boundary_pore_list.append(pore_id) + if sum(pores_xp) >= 1 and pore_id not in xp: + xp.append(pore_id) + if sum(pores_xm) >= 1 and pore_id not in xm: + xm.append(pore_id) + if sum(pores_yp) >= 1 and pore_id not in yp: + yp.append(pore_id) + if sum(pores_ym) >= 1 and pore_id not in ym: + ym.append(pore_id) + if sum(pores_zp) >= 1 and pore_id not in zp: + zp.append(pore_id) + if sum(pores_zm) >= 1 and pore_id not in zm: + zm.append(pore_id) + + vor_bounds=sp.asarray(boundary_pore_list) + vor_bound_back = sp.asarray(xp) + vor_bound_front = sp.asarray(xm) + vor_bound_right = sp.asarray(yp) + vor_bound_left = sp.asarray(ym) + vor_bound_top = sp.asarray(zp) + vor_bound_bottom = sp.asarray(zm) + self.set_pore_info(label='boundary',locations=vor_bounds) + self.set_pore_info(label='right',locations=vor_bound_right) + self.set_pore_info(label='left',locations=vor_bound_left) + self.set_pore_info(label='front',locations=vor_bound_front) + self.set_pore_info(label='back',locations=vor_bound_back) + self.set_pore_info(label='top',locations=vor_bound_top) + self.set_pore_info(label='bottom',locations=vor_bound_bottom) + self.set_pore_info(label='internal',locations='all') + + # Do Voronoi diagram - creating voronoi polyhedra around each pore and save vertex information + vor = Voronoi(pts) + all_verts = sp.ndarray(Np,dtype=object) + for i,polygon in enumerate(vor.point_region[0:Np]): + if -1 not in vor.regions[polygon]: + all_verts[i]=vor.vertices[vor.regions[polygon]] + else: + all_verts[i]="unbounded" + self.set_pore_data(prop='vertices',data=all_verts) self._logger.debug(sys._getframe().f_code.co_name+": End of method") def _add_labels(self): @@ -347,7 +433,23 @@ def _add_opposing_boundaries(self,btype=[1,6]): self._net.throat_data['numbering'] = np.append(self._net.throat_data['numbering'],bt_numbering) self._net.throat_data['type'] = np.append(self._net.throat_data['type'],bt_type) self._net.throat_data['conns'] = np.concatenate((self._net.throat_data['conns'],bt_connections)) - + + def domain_size(self,dimension=''): + if dimension == 'front' or dimension == 'back': + return self._Ly*self._Lz + if dimension == 'left' or dimension == 'right': + return self._Lx*self._Lz + if dimension == 'top' or dimension == 'bottom': + return self._Lx*self._Ly + if dimension == 'volume': + return self._Lx*self._Ly*self._Lz + if dimension == 'height': + return self._Lz + if dimension == 'width': + return self._Lx + if dimension == 'depth': + return self._Ly + if __name__ == '__main__': #Run doc tests import doctest diff --git a/OpenPNM/Network/__GenericNetwork__.py b/OpenPNM/Network/__GenericNetwork__.py index a95a794e75..7def953c5a 100644 --- a/OpenPNM/Network/__GenericNetwork__.py +++ b/OpenPNM/Network/__GenericNetwork__.py @@ -18,6 +18,7 @@ import scipy.spatial as sptl import scipy.signal as spsg + class GenericNetwork(OpenPNM.Utilities.Tools): r""" GenericNetwork - Base class to construct pore networks @@ -93,7 +94,7 @@ def domain_length(self,face_1,face_2): Ds = misc.dist(x,y) L = sp.median(sp.amin(Ds,axis=0)) else: - self._logger.warn('The supplied pores are not coplanar. Length will be approximate.') + self._logger.warning('The supplied pores are not coplanar. Length will be approximate.') f1 = self['pore.coords'][face_1] f2 = self['pore.coords'][face_2] distavg = [0,0,0] @@ -888,6 +889,8 @@ def trim(self, pores=[], throats=[], check_health=False): else: self._logger.warning('No pores or throats recieved') return + + #Remap throat connections Pnew = sp.arange(0,sum(Pkeep),dtype=int) Pmap = sp.ones((self.num_pores(),),dtype=int)*-1 @@ -958,13 +961,13 @@ def subset(self,pores,name=None,incl_labels=True,incl_props=True): >>> pn.count() {'pore': 125, 'throat': 300} >>> Ps = pn.pores(['top','bottom','front']) - >>> pn2 = pn.subset(pores=Ps) - >>> pn2.count() + >>> psn = pn.subset(pores=Ps) + >>> sn.count() {'pore': 65, 'throat': 112} - >>> pn2.labels()[0:3] # It automatically transfers labels and props + >>> sn.labels()[0:3] # It automatically transfers labels and props ['pore.all', 'pore.back', 'pore.bottom'] - >>> pn2 = pn.subset(pores=Ps,incl_labels=False) - >>> pn2.labels() + >>> sn = pn.subset(pores=Ps,incl_labels=False) + >>> sn.labels() ['pore.all', 'throat.all'] ''' newpnm = OpenPNM.Network.GenericNetwork(name=name) @@ -974,31 +977,32 @@ def subset(self,pores,name=None,incl_labels=True,incl_props=True): #Remap throats on to new pore numbering Pmap = sp.zeros_like(self.pores())*sp.nan Pmap[pores] = sp.arange(0,sp.shape(pores)[0]) - tpore1 = Pmap[self['throat.conns'][throats,0]] - tpore2 = Pmap[self['throat.conns'][throats,1]] + tpore1 = sp.array(Pmap[self['throat.conns'][throats,0]],dtype=int) + tpore2 = sp.array(Pmap[self['throat.conns'][throats,1]],dtype=int) newpnm['throat.conns'] = sp.vstack((tpore1,tpore2)).T + newpnm['pore.coords'] = self['pore.coords'][pores] #Now scan through labels and props, and keep if needed + newpnm['pore.all'] = self['pore.all'][pores] + newpnm['throat.all'] = self['throat.all'][throats] if incl_labels == True: labels = self.labels() + labels.remove('pore.all') + labels.remove('throat.all') for item in labels: if item.split('.')[0] == 'pore': newpnm[item] = self[item][pores] if item.split('.')[0] == 'throat': newpnm[item] = self[item][throats] - else: - newpnm['pore.all'] = self['pore.all'][pores] - newpnm['throat.all'] = self['throat.all'][throats] if incl_props == True: props = self.props() props.remove('throat.conns') + props.remove('pore.coords') for item in props: if item.split('.')[0] == 'pore': newpnm[item] = self[item][pores] if item.split('.')[0] == 'throat': newpnm[item] = self[item][throats] - else: - newpnm['pore.coords'] = self['pore.coords'][pores] #Append pore and throat mapping to main network as attributes and data self['pore.'+newpnm.name] = sp.zeros_like(self['pore.all'],dtype=bool) @@ -1006,6 +1010,9 @@ def subset(self,pores,name=None,incl_labels=True,incl_props=True): self['throat.'+newpnm.name] = sp.zeros_like(self['throat.all'],dtype=bool) self['throat.'+newpnm.name][throats] = True + import types + newpnm.subset_fluid = types.MethodType(subset_fluid, newpnm) + return newpnm def find_clusters(self,mask=[]): @@ -1066,7 +1073,43 @@ def network_health(self): self._logger.warning('Isolated clusters exist in the network') for i in sp.unique(Cs): health.disconnected_clusters.append(sp.where(Cs==i)[0]) - return health + return health + +#------------------------------------------------------------------------------ +'''Additional Functions''' +#------------------------------------------------------------------------------ +# These functions are not automatically attached to the network, but can be +# using object.method_name = types.MethodType(method_name, object) + +def subset_fluid(self,fluid): + r''' + This method is appended to subset networks. It takes a fluid from the main + network, and converts it to the size and shape of the sub-network. + + Parameters + ---------- + fluid : OpenPNM Fluid Object + A fluid object that is associated with the main network from which the + subnetwork was extracted. + + Returns + ------- + newfluid : OpenPNM Fluid Object + A fluid object with the same shape as the sub-network. It contains all + the data of the main fluid, but not the property calculation methods. + ''' + newfluid = OpenPNM.Fluids.GenericFluid(network=self) + for item in fluid.props(mode='vectors'): + if item.split('.')[0] == 'pore': + newfluid[item] = fluid[item][fluid._net['pore.'+self.name]] + if item.split('.')[0] == 'throat': + newfluid[item] = fluid[item][fluid._net['throat.'+self.name]] + for item in fluid.props(mode='scalars'): + if item.split('.')[0] == 'pore': + newfluid[item] = fluid[item] + if item.split('.')[0] == 'throat': + newfluid[item] = fluid[item] + return newfluid if __name__ == '__main__': #Run doc tests diff --git a/OpenPNM/Physics/diffusive_conductance.py b/OpenPNM/Physics/diffusive_conductance.py index d62e7452d7..dd33f14112 100644 --- a/OpenPNM/Physics/diffusive_conductance.py +++ b/OpenPNM/Physics/diffusive_conductance.py @@ -66,10 +66,12 @@ def bulk_diffusion(physics, Ps = network.find_connected_pores(Ts,flatten=0) #Find g for half of pore 1 pdia = network.get_data(prop=pore_diameter,pores='all') - gp1 = ct*DABt*pdia[Ps[:,0]]**2/(0.5*pdia[Ps[:,0]]) + #gp1 = ct*DABt*pdia[Ps[:,0]]**2/(0.5*pdia[Ps[:,0]]) + gp1 = ct*DABt*pdia[Ps[:,0]]*2 gp1[~(gp1>0)] = sp.inf #Set 0 conductance pores (boundaries) to inf #Find g for half of pore 2 - gp2 = ct*DABt*pdia[Ps[:,1]]**2/(0.5*pdia[Ps[:,1]]) + #gp2 = ct*DABt*pdia[Ps[:,1]]**2/(0.5*pdia[Ps[:,1]]) + gp2 = ct*DABt*pdia[Ps[:,1]]*2 gp2[~(gp2>0)] = sp.inf #Set 0 conductance pores (boundaries) to inf #Find g for full throat tdia = network.get_data(prop=throat_diameter,throats='all') diff --git a/OpenPNM/Physics/hydraulic_conductance.py b/OpenPNM/Physics/hydraulic_conductance.py index 022d229c84..b1dbeb9ba7 100644 --- a/OpenPNM/Physics/hydraulic_conductance.py +++ b/OpenPNM/Physics/hydraulic_conductance.py @@ -53,10 +53,12 @@ def hagen_poiseuille(physics, pores = network.find_connected_pores(tind,flatten=0) #Find g for half of pore 1 pdia = network.get_pore_data(prop=pore_diameter) - gp1 = 2.28*(pdia[pores[:,0]]/2)**4/(pdia[pores[:,0]]*mut) + #gp1 = 2.28*(pdia[pores[:,0]]/2)**4/(pdia[pores[:,0]]*mut) + gp1 = 2.28*(pdia[pores[:,0]])**3/(16*mut) gp1[~(gp1>0)] = sp.inf #Set 0 conductance pores (boundaries) to inf #Find g for half of pore 2 - gp2 = 2.28*(pdia[pores[:,1]]/2)**4/(pdia[pores[:,1]]*mut) + #gp2 = 2.28*(pdia[pores[:,1]]/2)**4/(pdia[pores[:,1]]*mut) + gp2 = 2.28*(pdia[pores[:,1]])**3/(16*mut) gp2[~(gp2>0)] = sp.inf #Set 0 conductance pores (boundaries) to inf #Find g for full throat tdia = network.get_throat_data(prop=throat_diameter) diff --git a/OpenPNM/Utilities/__Tools__.py b/OpenPNM/Utilities/__Tools__.py index f102537a93..12a62d8a04 100644 --- a/OpenPNM/Utilities/__Tools__.py +++ b/OpenPNM/Utilities/__Tools__.py @@ -13,6 +13,7 @@ import scipy as sp import OpenPNM from OpenPNM.Utilities import Base +from OpenPNM.Utilities import misc class Tools(Base,dict): @@ -488,12 +489,14 @@ def amalgamate_data(self,objs=[]): if type(objs) != list: objs = list(objs) data_amalgamated = {} + exclusion_list = ['pore.centroid','pore.vertices','throat.centroid','throat.offset_verts','throat.verts','throat.normals','throat.perimeter'] for item in objs: try: for key in item.keys(): - if sp.amax(item[key]) < sp.inf: - dict_name = item.name+'.'+key - data_amalgamated.update({dict_name : item[key]}) + if key not in exclusion_list: + if sp.amax(item[key]) < sp.inf: + dict_name = item.name+'.'+key + data_amalgamated.update({dict_name : item[key]}) except: self._logger.error('Only network and fluid items contain data') @@ -572,7 +575,7 @@ def props(self,element='',pores=[],throats=[],mode='all'): props = self._get_props(mode=mode) if (pores == []) and (throats == []): if element == '': - return props + temp = props elif element == 'pore': temp = [item for item in props if item.split('.')[0]=='pore'] elif element == 'throat': @@ -580,7 +583,6 @@ def props(self,element='',pores=[],throats=[],mode='all'): else: self._logger.error('Unrecognized element') return - return temp elif pores != []: temp = {} for item in props: @@ -590,7 +592,6 @@ def props(self,element='',pores=[],throats=[],mode='all'): else: vals = self[item][pores] temp.update({item:vals}) - return temp elif throats != []: temp = {} for item in props: @@ -600,7 +601,7 @@ def props(self,element='',pores=[],throats=[],mode='all'): else: vals = self[item][throats] temp.update({item:vals}) - return temp + return misc.PrintableList(temp) def _get_labels(self,element='',locations=[],mode='union'): r''' @@ -687,19 +688,17 @@ def labels(self,element='',pores=[],throats=[],mode='union'): else: self._logger.error('Unrecognized element') return - return temp elif pores != []: if pores == 'all': pores = self.pores() pores = sp.array(pores,ndmin=1) temp = self._get_labels(element='pore',locations=pores, mode=mode) - return temp elif throats != []: if throats == 'all': throats = self.throats() throats = sp.array(throats,ndmin=1) temp = self._get_labels(element='throat',locations=throats,mode=mode) - return temp + return misc.PrintableList(temp) def filter_by_label(self,pores=[],throats=[],label=''): r''' @@ -884,7 +883,39 @@ def tomask(self,pores=None,throats=None): mask = sp.zeros((Nt,),dtype=bool) mask[throats] = True return mask - + + def toindices(self,mask): + r''' + Convert a boolean mask a list of pore or throat indices + + Parameters + ---------- + mask : array_like booleans + A boolean array with True at locations where indices are desired. + The appropriate indices are returned based an the length of mask, + which must be either Np or Nt long. + + Returns + ------- + indices : array_like + A list of pore or throat indices corresponding the locations where + the received mask was True. + + Notes + ----- + This behavior could just as easily be accomplished by using the mask + in pn.pores()[mask] or pn.throats()[mask]. This method is just a thin + convenience function and is a compliment to tomask(). + + ''' + if sp.shape(mask)[0] == self.num_pores(): + indices = self.pores()[mask] + elif sp.shape(mask)[0] == self.num_throats(): + indices = self.throats()[mask] + else: + raise Exception('Mask received was neither Np nor Nt long') + return indices + def interpolate_data(self,data=[],prop='',throats=[],pores=[]): r""" Determines a pore (or throat) property as the average of it's neighboring @@ -1067,7 +1098,8 @@ def count(self,element=None): Parameters ---------- element : string, optional - Can be either 'pore' or 'throat', which specifies which count to return. + Can be either 'pore' , 'pores', 'throat' or 'throats', which + specifies which count to return. Returns ------- @@ -1077,6 +1109,13 @@ def count(self,element=None): See Also -------- num_pores, num_throats + + Notes + ----- + The ability to send plurals is useful for some types of 'programmatic' + access. For instance, the standard argument for locations is pores + or throats. If these are bundled up in a **kwargs dict then you can + just use the dict key in count() without remove the 's'. Examples -------- @@ -1086,6 +1125,11 @@ def count(self,element=None): >>> pn.count('pore') 125 ''' + #Remove pluralizaton + if element == 'pores': + element = 'pore' + if element == 'throats': + element = 'throat' temp = {} temp['pore'] = self.num_pores() temp['throat'] = self.num_throats() diff --git a/OpenPNM/Utilities/misc.py b/OpenPNM/Utilities/misc.py index cde86fbda4..c897fee80b 100644 --- a/OpenPNM/Utilities/misc.py +++ b/OpenPNM/Utilities/misc.py @@ -1,6 +1,7 @@ import scipy as _sp import time as _time from scipy.spatial.distance import cdist as dist +import _transformations as tr def iscoplanar(coords): r''' @@ -38,7 +39,11 @@ def iscoplanar(coords): n_cross = _sp.cross(n0,n) n_dot = _sp.multiply(n0,n_cross) - + #angles=[] + #for vec in n_cross[2:len(n_cross)]: + # angles.append(180*(tr.angle_between_vectors(n_cross[1],vec,directed=False)/_sp.pi)) + #angles=_sp.asarray(angles) + #if angles.mean() < 20: if _sp.sum(_sp.absolute(n_dot)) == 0: return True else: @@ -70,4 +75,38 @@ def toc(quiet=False): else: return t else: - print("Toc: start time not set") \ No newline at end of file + print("Toc: start time not set") + +class PrintableList(list): + def __str__(self): + count = 0 + header = '-'*50 + print(header) + for item in self: + count = count + 1 + print(count,'\t: ',item) + return header + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/_transformations.py b/_transformations.py new file mode 100644 index 0000000000..0190a46d3f --- /dev/null +++ b/_transformations.py @@ -0,0 +1,1920 @@ +# -*- coding: utf-8 -*- +# transformations.py + +# Copyright (c) 2006-2014, Christoph Gohlke +# Copyright (c) 2006-2014, The Regents of the University of California +# Produced at the Laboratory for Fluorescence Dynamics +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holders nor the names of any +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +"""Homogeneous Transformation Matrices and Quaternions. + +A library for calculating 4x4 matrices for translating, rotating, reflecting, +scaling, shearing, projecting, orthogonalizing, and superimposing arrays of +3D homogeneous coordinates as well as for converting between rotation matrices, +Euler angles, and quaternions. Also includes an Arcball control object and +functions to decompose transformation matrices. + +:Author: + `Christoph Gohlke `_ + +:Organization: + Laboratory for Fluorescence Dynamics, University of California, Irvine + +:Version: 2013.06.29 + +Requirements +------------ +* `CPython 2.7 or 3.3 `_ +* `Numpy 1.7 `_ +* `Transformations.c 2013.01.18 `_ + (recommended for speedup of some functions) + +Notes +----- +The API is not stable yet and is expected to change between revisions. + +This Python code is not optimized for speed. Refer to the transformations.c +module for a faster implementation of some functions. + +Documentation in HTML format can be generated with epydoc. + +Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using +numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using +numpy.dot(M, v) for shape (4, \*) column vectors, respectively +numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points"). + +This module follows the "column vectors on the right" and "row major storage" +(C contiguous) conventions. The translation components are in the right column +of the transformation matrix, i.e. M[:3, 3]. +The transpose of the transformation matrices may have to be used to interface +with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16]. + +Calculations are carried out with numpy.float64 precision. + +Vector, point, quaternion, and matrix function arguments are expected to be +"array like", i.e. tuple, list, or numpy arrays. + +Return types are numpy arrays unless specified otherwise. + +Angles are in radians unless specified otherwise. + +Quaternions w+ix+jy+kz are represented as [w, x, y, z]. + +A triple of Euler angles can be applied/interpreted in 24 ways, which can +be specified using a 4 character string or encoded 4-tuple: + + *Axes 4-string*: e.g. 'sxyz' or 'ryxy' + + - first character : rotations are applied to 's'tatic or 'r'otating frame + - remaining characters : successive rotation axis 'x', 'y', or 'z' + + *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1) + + - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix. + - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed + by 'z', or 'z' is followed by 'x'. Otherwise odd (1). + - repetition : first and last axis are same (1) or different (0). + - frame : rotations are applied to static (0) or rotating (1) frame. + +References +---------- +(1) Matrices and transformations. Ronald Goldman. + In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990. +(2) More matrices and transformations: shear and pseudo-perspective. + Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. +(3) Decomposing a matrix into simple transformations. Spencer Thomas. + In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. +(4) Recovering the data from the transformation matrix. Ronald Goldman. + In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991. +(5) Euler angle conversion. Ken Shoemake. + In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994. +(6) Arcball rotation control. Ken Shoemake. + In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994. +(7) Representing attitude: Euler angles, unit quaternions, and rotation + vectors. James Diebel. 2006. +(8) A discussion of the solution for the best rotation to relate two sets + of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828. +(9) Closed-form solution of absolute orientation using unit quaternions. + BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642. +(10) Quaternions. Ken Shoemake. + http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf +(11) From quaternion to matrix and back. JMP van Waveren. 2005. + http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm +(12) Uniform random rotations. Ken Shoemake. + In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992. +(13) Quaternion in molecular modeling. CFF Karney. + J Mol Graph Mod, 25(5):595-604 +(14) New method for extracting the quaternion from a rotation matrix. + Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087. +(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann. + Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130. +(16) Column Vectors vs. Row Vectors. + http://steve.hollasch.net/cgindex/math/matrix/column-vec.html + +Examples +-------- +>>> alpha, beta, gamma = 0.123, -1.234, 2.345 +>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] +>>> I = identity_matrix() +>>> Rx = rotation_matrix(alpha, xaxis) +>>> Ry = rotation_matrix(beta, yaxis) +>>> Rz = rotation_matrix(gamma, zaxis) +>>> R = concatenate_matrices(Rx, Ry, Rz) +>>> euler = euler_from_matrix(R, 'rxyz') +>>> numpy.allclose([alpha, beta, gamma], euler) +True +>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz') +>>> is_same_transform(R, Re) +True +>>> al, be, ga = euler_from_matrix(Re, 'rxyz') +>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz')) +True +>>> qx = quaternion_about_axis(alpha, xaxis) +>>> qy = quaternion_about_axis(beta, yaxis) +>>> qz = quaternion_about_axis(gamma, zaxis) +>>> q = quaternion_multiply(qx, qy) +>>> q = quaternion_multiply(q, qz) +>>> Rq = quaternion_matrix(q) +>>> is_same_transform(R, Rq) +True +>>> S = scale_matrix(1.23, origin) +>>> T = translation_matrix([1, 2, 3]) +>>> Z = shear_matrix(beta, xaxis, origin, zaxis) +>>> R = random_rotation_matrix(numpy.random.rand(3)) +>>> M = concatenate_matrices(T, R, Z, S) +>>> scale, shear, angles, trans, persp = decompose_matrix(M) +>>> numpy.allclose(scale, 1.23) +True +>>> numpy.allclose(trans, [1, 2, 3]) +True +>>> numpy.allclose(shear, [0, math.tan(beta), 0]) +True +>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles)) +True +>>> M1 = compose_matrix(scale, shear, angles, trans, persp) +>>> is_same_transform(M, M1) +True +>>> v0, v1 = random_vector(3), random_vector(3) +>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1)) +>>> v2 = numpy.dot(v0, M[:3,:3].T) +>>> numpy.allclose(unit_vector(v1), unit_vector(v2)) +True + +""" + +from __future__ import division, print_function + +import math + +import numpy + + +__version__ = '2013.06.29' +__docformat__ = 'restructuredtext en' +__all__ = [] + + +def identity_matrix(): + """Return 4x4 identity/unit matrix. + + >>> I = identity_matrix() + >>> numpy.allclose(I, numpy.dot(I, I)) + True + >>> numpy.sum(I), numpy.trace(I) + (4.0, 4.0) + >>> numpy.allclose(I, numpy.identity(4)) + True + + """ + return numpy.identity(4) + + +def translation_matrix(direction): + """Return matrix to translate by direction vector. + + >>> v = numpy.random.random(3) - 0.5 + >>> numpy.allclose(v, translation_matrix(v)[:3, 3]) + True + + """ + M = numpy.identity(4) + M[:3, 3] = direction[:3] + return M + + +def translation_from_matrix(matrix): + """Return translation vector from translation matrix. + + >>> v0 = numpy.random.random(3) - 0.5 + >>> v1 = translation_from_matrix(translation_matrix(v0)) + >>> numpy.allclose(v0, v1) + True + + """ + return numpy.array(matrix, copy=False)[:3, 3].copy() + + +def reflection_matrix(point, normal): + """Return matrix to mirror at plane defined by point and normal vector. + + >>> v0 = numpy.random.random(4) - 0.5 + >>> v0[3] = 1. + >>> v1 = numpy.random.random(3) - 0.5 + >>> R = reflection_matrix(v0, v1) + >>> numpy.allclose(2, numpy.trace(R)) + True + >>> numpy.allclose(v0, numpy.dot(R, v0)) + True + >>> v2 = v0.copy() + >>> v2[:3] += v1 + >>> v3 = v0.copy() + >>> v2[:3] -= v1 + >>> numpy.allclose(v2, numpy.dot(R, v3)) + True + + """ + normal = unit_vector(normal[:3]) + M = numpy.identity(4) + M[:3, :3] -= 2.0 * numpy.outer(normal, normal) + M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal + return M + + +def reflection_from_matrix(matrix): + """Return mirror plane point and normal vector from reflection matrix. + + >>> v0 = numpy.random.random(3) - 0.5 + >>> v1 = numpy.random.random(3) - 0.5 + >>> M0 = reflection_matrix(v0, v1) + >>> point, normal = reflection_from_matrix(M0) + >>> M1 = reflection_matrix(point, normal) + >>> is_same_transform(M0, M1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + # normal: unit eigenvector corresponding to eigenvalue -1 + w, V = numpy.linalg.eig(M[:3, :3]) + i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue -1") + normal = numpy.real(V[:, i[0]]).squeeze() + # point: any unit eigenvector corresponding to eigenvalue 1 + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue 1") + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + return point, normal + + +def rotation_matrix(angle, direction, point=None): + """Return matrix to rotate about axis defined by point and direction. + + >>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0]) + >>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1]) + True + >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> direc = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) + >>> is_same_transform(R0, R1) + True + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(-angle, -direc, point) + >>> is_same_transform(R0, R1) + True + >>> I = numpy.identity(4, numpy.float64) + >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) + True + >>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2, + ... direc, point))) + True + + """ + sina = math.sin(angle) + cosa = math.cos(angle) + direction = unit_vector(direction[:3]) + # rotation matrix around unit vector + R = numpy.diag([cosa, cosa, cosa]) + R += numpy.outer(direction, direction) * (1.0 - cosa) + direction *= sina + R += numpy.array([[ 0.0, -direction[2], direction[1]], + [ direction[2], 0.0, -direction[0]], + [-direction[1], direction[0], 0.0]]) + M = numpy.identity(4) + M[:3, :3] = R + if point is not None: + # rotation not around origin + point = numpy.array(point[:3], dtype=numpy.float64, copy=False) + M[:3, 3] = point - numpy.dot(R, point) + return M + + +def rotation_from_matrix(matrix): + """Return rotation angle and axis from rotation matrix. + + >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> direc = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> R0 = rotation_matrix(angle, direc, point) + >>> angle, direc, point = rotation_from_matrix(R0) + >>> R1 = rotation_matrix(angle, direc, point) + >>> is_same_transform(R0, R1) + True + + """ + R = numpy.array(matrix, dtype=numpy.float64, copy=False) + R33 = R[:3, :3] + # direction: unit eigenvector of R33 corresponding to eigenvalue of 1 + w, W = numpy.linalg.eig(R33.T) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue 1") + direction = numpy.real(W[:, i[-1]]).squeeze() + # point: unit eigenvector of R33 corresponding to eigenvalue of 1 + w, Q = numpy.linalg.eig(R) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue 1") + point = numpy.real(Q[:, i[-1]]).squeeze() + point /= point[3] + # rotation angle depending on direction + cosa = (numpy.trace(R33) - 1.0) / 2.0 + if abs(direction[2]) > 1e-8: + sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2] + elif abs(direction[1]) > 1e-8: + sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1] + else: + sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0] + angle = math.atan2(sina, cosa) + return angle, direction, point + + +def scale_matrix(factor, origin=None, direction=None): + """Return matrix to scale by factor around origin in direction. + + Use factor -1 for point symmetry. + + >>> v = (numpy.random.rand(4, 5) - 0.5) * 20 + >>> v[3] = 1 + >>> S = scale_matrix(-1.234) + >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3]) + True + >>> factor = random.random() * 10 - 5 + >>> origin = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> S = scale_matrix(factor, origin) + >>> S = scale_matrix(factor, origin, direct) + + """ + if direction is None: + # uniform scaling + M = numpy.diag([factor, factor, factor, 1.0]) + if origin is not None: + M[:3, 3] = origin[:3] + M[:3, 3] *= 1.0 - factor + else: + # nonuniform scaling + direction = unit_vector(direction[:3]) + factor = 1.0 - factor + M = numpy.identity(4) + M[:3, :3] -= factor * numpy.outer(direction, direction) + if origin is not None: + M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction + return M + + +def scale_from_matrix(matrix): + """Return scaling factor, origin and direction from scaling matrix. + + >>> factor = random.random() * 10 - 5 + >>> origin = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> S0 = scale_matrix(factor, origin) + >>> factor, origin, direction = scale_from_matrix(S0) + >>> S1 = scale_matrix(factor, origin, direction) + >>> is_same_transform(S0, S1) + True + >>> S0 = scale_matrix(factor, origin, direct) + >>> factor, origin, direction = scale_from_matrix(S0) + >>> S1 = scale_matrix(factor, origin, direction) + >>> is_same_transform(S0, S1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + factor = numpy.trace(M33) - 2.0 + try: + # direction: unit eigenvector corresponding to eigenvalue factor + w, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0] + direction = numpy.real(V[:, i]).squeeze() + direction /= vector_norm(direction) + except IndexError: + # uniform scaling + factor = (factor + 2.0) / 3.0 + direction = None + # origin: any eigenvector corresponding to eigenvalue 1 + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no eigenvector corresponding to eigenvalue 1") + origin = numpy.real(V[:, i[-1]]).squeeze() + origin /= origin[3] + return factor, origin, direction + + +def projection_matrix(point, normal, direction=None, + perspective=None, pseudo=False): + """Return matrix to project onto plane defined by point and normal. + + Using either perspective point, projection direction, or none of both. + + If pseudo is True, perspective projections will preserve relative depth + such that Perspective = dot(Orthogonal, PseudoPerspective). + + >>> P = projection_matrix([0, 0, 0], [1, 0, 0]) + >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) + True + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(3) - 0.5 + >>> P0 = projection_matrix(point, normal) + >>> P1 = projection_matrix(point, normal, direction=direct) + >>> P2 = projection_matrix(point, normal, perspective=persp) + >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) + >>> is_same_transform(P2, numpy.dot(P0, P3)) + True + >>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0]) + >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20 + >>> v0[3] = 1 + >>> v1 = numpy.dot(P, v0) + >>> numpy.allclose(v1[1], v0[1]) + True + >>> numpy.allclose(v1[0], 3-v1[1]) + True + + """ + M = numpy.identity(4) + point = numpy.array(point[:3], dtype=numpy.float64, copy=False) + normal = unit_vector(normal[:3]) + if perspective is not None: + # perspective projection + perspective = numpy.array(perspective[:3], dtype=numpy.float64, + copy=False) + M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) + M[:3, :3] -= numpy.outer(perspective, normal) + if pseudo: + # preserve relative depth + M[:3, :3] -= numpy.outer(normal, normal) + M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) + else: + M[:3, 3] = numpy.dot(point, normal) * perspective + M[3, :3] = -normal + M[3, 3] = numpy.dot(perspective, normal) + elif direction is not None: + # parallel projection + direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) + scale = numpy.dot(direction, normal) + M[:3, :3] -= numpy.outer(direction, normal) / scale + M[:3, 3] = direction * (numpy.dot(point, normal) / scale) + else: + # orthogonal projection + M[:3, :3] -= numpy.outer(normal, normal) + M[:3, 3] = numpy.dot(point, normal) * normal + return M + + +def projection_from_matrix(matrix, pseudo=False): + """Return projection plane and perspective point from projection matrix. + + Return values are same as arguments for projection_matrix function: + point, normal, direction, perspective, and pseudo. + + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(3) - 0.5 + >>> P0 = projection_matrix(point, normal) + >>> result = projection_from_matrix(P0) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, direct) + >>> result = projection_from_matrix(P0) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) + >>> result = projection_from_matrix(P0, pseudo=False) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) + >>> result = projection_from_matrix(P0, pseudo=True) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not pseudo and len(i): + # point: any eigenvector corresponding to eigenvalue 1 + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + # direction: unit eigenvector corresponding to eigenvalue 0 + w, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(w)) < 1e-8)[0] + if not len(i): + raise ValueError("no eigenvector corresponding to eigenvalue 0") + direction = numpy.real(V[:, i[0]]).squeeze() + direction /= vector_norm(direction) + # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 + w, V = numpy.linalg.eig(M33.T) + i = numpy.where(abs(numpy.real(w)) < 1e-8)[0] + if len(i): + # parallel projection + normal = numpy.real(V[:, i[0]]).squeeze() + normal /= vector_norm(normal) + return point, normal, direction, None, False + else: + # orthogonal projection, where normal equals direction vector + return point, direction, None, None, False + else: + # perspective projection + i = numpy.where(abs(numpy.real(w)) > 1e-8)[0] + if not len(i): + raise ValueError( + "no eigenvector not corresponding to eigenvalue 0") + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + normal = - M[3, :3] + perspective = M[:3, 3] / numpy.dot(point[:3], normal) + if pseudo: + perspective -= normal + return point, normal, None, perspective, pseudo + + +def clip_matrix(left, right, bottom, top, near, far, perspective=False): + """Return matrix to obtain normalized device coordinates from frustum. + + The frustum bounds are axis-aligned along x (left, right), + y (bottom, top) and z (near, far). + + Normalized device coordinates are in range [-1, 1] if coordinates are + inside the frustum. + + If perspective is True the frustum is a truncated pyramid with the + perspective point at origin and direction along z axis, otherwise an + orthographic canonical view volume (a box). + + Homogeneous coordinates transformed by the perspective clip matrix + need to be dehomogenized (divided by w coordinate). + + >>> frustum = numpy.random.rand(6) + >>> frustum[1] += frustum[0] + >>> frustum[3] += frustum[2] + >>> frustum[5] += frustum[4] + >>> M = clip_matrix(perspective=False, *frustum) + >>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1]) + array([-1., -1., -1., 1.]) + >>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1]) + array([ 1., 1., 1., 1.]) + >>> M = clip_matrix(perspective=True, *frustum) + >>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1]) + >>> v / v[3] + array([-1., -1., -1., 1.]) + >>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1]) + >>> v / v[3] + array([ 1., 1., -1., 1.]) + + """ + if left >= right or bottom >= top or near >= far: + raise ValueError("invalid frustum") + if perspective: + if near <= _EPS: + raise ValueError("invalid frustum: near <= 0") + t = 2.0 * near + M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0], + [0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0], + [0.0, 0.0, (far+near)/(near-far), t*far/(far-near)], + [0.0, 0.0, -1.0, 0.0]] + else: + M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)], + [0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)], + [0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)], + [0.0, 0.0, 0.0, 1.0]] + return numpy.array(M) + + +def shear_matrix(angle, direction, point, normal): + """Return matrix to shear by angle along direction vector on shear plane. + + The shear plane is defined by a point and normal vector. The direction + vector must be orthogonal to the plane's normal vector. + + A point P is transformed by the shear matrix into P" such that + the vector P-P" is parallel to the direction vector and its extent is + given by the angle of P-P'-P", where P' is the orthogonal projection + of P onto the shear plane. + + >>> angle = (random.random() - 0.5) * 4*math.pi + >>> direct = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.cross(direct, numpy.random.random(3)) + >>> S = shear_matrix(angle, direct, point, normal) + >>> numpy.allclose(1, numpy.linalg.det(S)) + True + + """ + normal = unit_vector(normal[:3]) + direction = unit_vector(direction[:3]) + if abs(numpy.dot(normal, direction)) > 1e-6: + raise ValueError("direction and normal vectors are not orthogonal") + angle = math.tan(angle) + M = numpy.identity(4) + M[:3, :3] += angle * numpy.outer(direction, normal) + M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction + return M + + +def shear_from_matrix(matrix): + """Return shear angle, direction and plane from shear matrix. + + >>> angle = (random.random() - 0.5) * 4*math.pi + >>> direct = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.cross(direct, numpy.random.random(3)) + >>> S0 = shear_matrix(angle, direct, point, normal) + >>> angle, direct, point, normal = shear_from_matrix(S0) + >>> S1 = shear_matrix(angle, direct, point, normal) + >>> is_same_transform(S0, S1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + # normal: cross independent eigenvectors corresponding to the eigenvalue 1 + w, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0] + if len(i) < 2: + raise ValueError("no two linear independent eigenvectors found %s" % w) + V = numpy.real(V[:, i]).squeeze().T + lenorm = -1.0 + for i0, i1 in ((0, 1), (0, 2), (1, 2)): + n = numpy.cross(V[i0], V[i1]) + w = vector_norm(n) + if w > lenorm: + lenorm = w + normal = n + normal /= lenorm + # direction and angle + direction = numpy.dot(M33 - numpy.identity(3), normal) + angle = vector_norm(direction) + direction /= angle + angle = math.atan(angle) + # point: eigenvector corresponding to eigenvalue 1 + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no eigenvector corresponding to eigenvalue 1") + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + return angle, direction, point, normal + + +def decompose_matrix(matrix): + """Return sequence of transformations from transformation matrix. + + matrix : array_like + Non-degenerative homogeneous transformation matrix + + Return tuple of: + scale : vector of 3 scaling factors + shear : list of shear factors for x-y, x-z, y-z axes + angles : list of Euler angles about static x, y, z axes + translate : translation vector along x, y, z axes + perspective : perspective partition of matrix + + Raise ValueError if matrix is of wrong type or degenerative. + + >>> T0 = translation_matrix([1, 2, 3]) + >>> scale, shear, angles, trans, persp = decompose_matrix(T0) + >>> T1 = translation_matrix(trans) + >>> numpy.allclose(T0, T1) + True + >>> S = scale_matrix(0.123) + >>> scale, shear, angles, trans, persp = decompose_matrix(S) + >>> scale[0] + 0.123 + >>> R0 = euler_matrix(1, 2, 3) + >>> scale, shear, angles, trans, persp = decompose_matrix(R0) + >>> R1 = euler_matrix(*angles) + >>> numpy.allclose(R0, R1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=True).T + if abs(M[3, 3]) < _EPS: + raise ValueError("M[3, 3] is zero") + M /= M[3, 3] + P = M.copy() + P[:, 3] = 0.0, 0.0, 0.0, 1.0 + if not numpy.linalg.det(P): + raise ValueError("matrix is singular") + + scale = numpy.zeros((3, )) + shear = [0.0, 0.0, 0.0] + angles = [0.0, 0.0, 0.0] + + if any(abs(M[:3, 3]) > _EPS): + perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T)) + M[:, 3] = 0.0, 0.0, 0.0, 1.0 + else: + perspective = numpy.array([0.0, 0.0, 0.0, 1.0]) + + translate = M[3, :3].copy() + M[3, :3] = 0.0 + + row = M[:3, :3].copy() + scale[0] = vector_norm(row[0]) + row[0] /= scale[0] + shear[0] = numpy.dot(row[0], row[1]) + row[1] -= row[0] * shear[0] + scale[1] = vector_norm(row[1]) + row[1] /= scale[1] + shear[0] /= scale[1] + shear[1] = numpy.dot(row[0], row[2]) + row[2] -= row[0] * shear[1] + shear[2] = numpy.dot(row[1], row[2]) + row[2] -= row[1] * shear[2] + scale[2] = vector_norm(row[2]) + row[2] /= scale[2] + shear[1:] /= scale[2] + + if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0: + numpy.negative(scale, scale) + numpy.negative(row, row) + + angles[1] = math.asin(-row[0, 2]) + if math.cos(angles[1]): + angles[0] = math.atan2(row[1, 2], row[2, 2]) + angles[2] = math.atan2(row[0, 1], row[0, 0]) + else: + #angles[0] = math.atan2(row[1, 0], row[1, 1]) + angles[0] = math.atan2(-row[2, 1], row[1, 1]) + angles[2] = 0.0 + + return scale, shear, angles, translate, perspective + + +def compose_matrix(scale=None, shear=None, angles=None, translate=None, + perspective=None): + """Return transformation matrix from sequence of transformations. + + This is the inverse of the decompose_matrix function. + + Sequence of transformations: + scale : vector of 3 scaling factors + shear : list of shear factors for x-y, x-z, y-z axes + angles : list of Euler angles about static x, y, z axes + translate : translation vector along x, y, z axes + perspective : perspective partition of matrix + + >>> scale = numpy.random.random(3) - 0.5 + >>> shear = numpy.random.random(3) - 0.5 + >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi) + >>> trans = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(4) - 0.5 + >>> M0 = compose_matrix(scale, shear, angles, trans, persp) + >>> result = decompose_matrix(M0) + >>> M1 = compose_matrix(*result) + >>> is_same_transform(M0, M1) + True + + """ + M = numpy.identity(4) + if perspective is not None: + P = numpy.identity(4) + P[3, :] = perspective[:4] + M = numpy.dot(M, P) + if translate is not None: + T = numpy.identity(4) + T[:3, 3] = translate[:3] + M = numpy.dot(M, T) + if angles is not None: + R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') + M = numpy.dot(M, R) + if shear is not None: + Z = numpy.identity(4) + Z[1, 2] = shear[2] + Z[0, 2] = shear[1] + Z[0, 1] = shear[0] + M = numpy.dot(M, Z) + if scale is not None: + S = numpy.identity(4) + S[0, 0] = scale[0] + S[1, 1] = scale[1] + S[2, 2] = scale[2] + M = numpy.dot(M, S) + M /= M[3, 3] + return M + + +def orthogonalization_matrix(lengths, angles): + """Return orthogonalization matrix for crystallographic cell coordinates. + + Angles are expected in degrees. + + The de-orthogonalization matrix is the inverse. + + >>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90]) + >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10) + True + >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) + >>> numpy.allclose(numpy.sum(O), 43.063229) + True + + """ + a, b, c = lengths + angles = numpy.radians(angles) + sina, sinb, _ = numpy.sin(angles) + cosa, cosb, cosg = numpy.cos(angles) + co = (cosa * cosb - cosg) / (sina * sinb) + return numpy.array([ + [ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0], + [-a*sinb*co, b*sina, 0.0, 0.0], + [ a*cosb, b*cosa, c, 0.0], + [ 0.0, 0.0, 0.0, 1.0]]) + + +def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): + """Return affine transform matrix to register two point sets. + + v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous + coordinates, where ndims is the dimensionality of the coordinate space. + + If shear is False, a similarity transformation matrix is returned. + If also scale is False, a rigid/Euclidean transformation matrix + is returned. + + By default the algorithm by Hartley and Zissermann [15] is used. + If usesvd is True, similarity and Euclidean transformation matrices + are calculated by minimizing the weighted sum of squared deviations + (RMSD) according to the algorithm by Kabsch [8]. + Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9] + is used, which is slower when using this Python implementation. + + The returned matrix performs rotation, translation and uniform scaling + (if specified). + + >>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]] + >>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]] + >>> affine_matrix_from_points(v0, v1) + array([[ 0.14549, 0.00062, 675.50008], + [ 0.00048, 0.14094, 53.24971], + [ 0. , 0. , 1. ]]) + >>> T = translation_matrix(numpy.random.random(3)-0.5) + >>> R = random_rotation_matrix(numpy.random.random(3)) + >>> S = scale_matrix(random.random()) + >>> M = concatenate_matrices(T, R, S) + >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 + >>> v0[3] = 1 + >>> v1 = numpy.dot(M, v0) + >>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1) + >>> M = affine_matrix_from_points(v0[:3], v1[:3]) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + + More examples in superimposition_matrix() + + """ + v0 = numpy.array(v0, dtype=numpy.float64, copy=True) + v1 = numpy.array(v1, dtype=numpy.float64, copy=True) + + ndims = v0.shape[0] + if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape: + raise ValueError("input arrays are of wrong shape or type") + + # move centroids to origin + t0 = -numpy.mean(v0, axis=1) + M0 = numpy.identity(ndims+1) + M0[:ndims, ndims] = t0 + v0 += t0.reshape(ndims, 1) + t1 = -numpy.mean(v1, axis=1) + M1 = numpy.identity(ndims+1) + M1[:ndims, ndims] = t1 + v1 += t1.reshape(ndims, 1) + + if shear: + # Affine transformation + A = numpy.concatenate((v0, v1), axis=0) + u, s, vh = numpy.linalg.svd(A.T) + vh = vh[:ndims].T + B = vh[:ndims] + C = vh[ndims:2*ndims] + t = numpy.dot(C, numpy.linalg.pinv(B)) + t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1) + M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,))) + elif usesvd or ndims != 3: + # Rigid transformation via SVD of covariance matrix + u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T)) + # rotation matrix from SVD orthonormal bases + R = numpy.dot(u, vh) + if numpy.linalg.det(R) < 0.0: + # R does not constitute right handed system + R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0) + s[-1] *= -1.0 + # homogeneous transformation matrix + M = numpy.identity(ndims+1) + M[:ndims, :ndims] = R + else: + # Rigid transformation matrix via quaternion + # compute symmetric matrix N + xx, yy, zz = numpy.sum(v0 * v1, axis=1) + xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1) + xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1) + N = [[xx+yy+zz, 0.0, 0.0, 0.0], + [yz-zy, xx-yy-zz, 0.0, 0.0], + [zx-xz, xy+yx, yy-xx-zz, 0.0], + [xy-yx, zx+xz, yz+zy, zz-xx-yy]] + # quaternion: eigenvector corresponding to most positive eigenvalue + w, V = numpy.linalg.eigh(N) + q = V[:, numpy.argmax(w)] + q /= vector_norm(q) # unit quaternion + # homogeneous transformation matrix + M = quaternion_matrix(q) + + if scale and not shear: + # Affine transformation; scale is ratio of RMS deviations from centroid + v0 *= v0 + v1 *= v1 + M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0)) + + # move centroids back + M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0)) + M /= M[ndims, ndims] + return M + + +def superimposition_matrix(v0, v1, scale=False, usesvd=True): + """Return matrix to transform given 3D point set into second point set. + + v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points. + + The parameters scale and usesvd are explained in the more general + affine_matrix_from_points function. + + The returned matrix is a similarity or Euclidean transformation matrix. + This function has a fast C implementation in transformations.c. + + >>> v0 = numpy.random.rand(3, 10) + >>> M = superimposition_matrix(v0, v0) + >>> numpy.allclose(M, numpy.identity(4)) + True + >>> R = random_rotation_matrix(numpy.random.random(3)) + >>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]] + >>> v1 = numpy.dot(R, v0) + >>> M = superimposition_matrix(v0, v1) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 + >>> v0[3] = 1 + >>> v1 = numpy.dot(R, v0) + >>> M = superimposition_matrix(v0, v1) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> S = scale_matrix(random.random()) + >>> T = translation_matrix(numpy.random.random(3)-0.5) + >>> M = concatenate_matrices(T, R, S) + >>> v1 = numpy.dot(M, v0) + >>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1) + >>> M = superimposition_matrix(v0, v1, scale=True) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> v = numpy.empty((4, 100, 3)) + >>> v[:, :, 0] = v0 + >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) + >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0])) + True + + """ + v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3] + v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3] + return affine_matrix_from_points(v0, v1, shear=False, + scale=scale, usesvd=usesvd) + + +def euler_matrix(ai, aj, ak, axes='sxyz'): + """Return homogeneous rotation matrix from Euler angles and axis sequence. + + ai, aj, ak : Euler's roll, pitch and yaw angles + axes : One of 24 axis sequences as string or encoded tuple + + >>> R = euler_matrix(1, 2, 3, 'syxz') + >>> numpy.allclose(numpy.sum(R[0]), -1.34786452) + True + >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) + >>> numpy.allclose(numpy.sum(R[0]), -0.383436184) + True + >>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5) + >>> for axes in _AXES2TUPLE.keys(): + ... R = euler_matrix(ai, aj, ak, axes) + >>> for axes in _TUPLE2AXES.keys(): + ... R = euler_matrix(ai, aj, ak, axes) + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes] + except (AttributeError, KeyError): + _TUPLE2AXES[axes] # validation + firstaxis, parity, repetition, frame = axes + + i = firstaxis + j = _NEXT_AXIS[i+parity] + k = _NEXT_AXIS[i-parity+1] + + if frame: + ai, ak = ak, ai + if parity: + ai, aj, ak = -ai, -aj, -ak + + si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) + ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) + cc, cs = ci*ck, ci*sk + sc, ss = si*ck, si*sk + + M = numpy.identity(4) + if repetition: + M[i, i] = cj + M[i, j] = sj*si + M[i, k] = sj*ci + M[j, i] = sj*sk + M[j, j] = -cj*ss+cc + M[j, k] = -cj*cs-sc + M[k, i] = -sj*ck + M[k, j] = cj*sc+cs + M[k, k] = cj*cc-ss + else: + M[i, i] = cj*ck + M[i, j] = sj*sc-cs + M[i, k] = sj*cc+ss + M[j, i] = cj*sk + M[j, j] = sj*ss+cc + M[j, k] = sj*cs-sc + M[k, i] = -sj + M[k, j] = cj*si + M[k, k] = cj*ci + return M + + +def euler_from_matrix(matrix, axes='sxyz'): + """Return Euler angles from rotation matrix for specified axis sequence. + + axes : One of 24 axis sequences as string or encoded tuple + + Note that many Euler angle triplets can describe one matrix. + + >>> R0 = euler_matrix(1, 2, 3, 'syxz') + >>> al, be, ga = euler_from_matrix(R0, 'syxz') + >>> R1 = euler_matrix(al, be, ga, 'syxz') + >>> numpy.allclose(R0, R1) + True + >>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5) + >>> for axes in _AXES2TUPLE.keys(): + ... R0 = euler_matrix(axes=axes, *angles) + ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) + ... if not numpy.allclose(R0, R1): print(axes, "failed") + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] + except (AttributeError, KeyError): + _TUPLE2AXES[axes] # validation + firstaxis, parity, repetition, frame = axes + + i = firstaxis + j = _NEXT_AXIS[i+parity] + k = _NEXT_AXIS[i-parity+1] + + M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3] + if repetition: + sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k]) + if sy > _EPS: + ax = math.atan2( M[i, j], M[i, k]) + ay = math.atan2( sy, M[i, i]) + az = math.atan2( M[j, i], -M[k, i]) + else: + ax = math.atan2(-M[j, k], M[j, j]) + ay = math.atan2( sy, M[i, i]) + az = 0.0 + else: + cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i]) + if cy > _EPS: + ax = math.atan2( M[k, j], M[k, k]) + ay = math.atan2(-M[k, i], cy) + az = math.atan2( M[j, i], M[i, i]) + else: + ax = math.atan2(-M[j, k], M[j, j]) + ay = math.atan2(-M[k, i], cy) + az = 0.0 + + if parity: + ax, ay, az = -ax, -ay, -az + if frame: + ax, az = az, ax + return ax, ay, az + + +def euler_from_quaternion(quaternion, axes='sxyz'): + """Return Euler angles from quaternion for specified axis sequence. + + >>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0]) + >>> numpy.allclose(angles, [0.123, 0, 0]) + True + + """ + return euler_from_matrix(quaternion_matrix(quaternion), axes) + + +def quaternion_from_euler(ai, aj, ak, axes='sxyz'): + """Return quaternion from Euler angles and axis sequence. + + ai, aj, ak : Euler's roll, pitch and yaw angles + axes : One of 24 axis sequences as string or encoded tuple + + >>> q = quaternion_from_euler(1, 2, 3, 'ryxz') + >>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435]) + True + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] + except (AttributeError, KeyError): + _TUPLE2AXES[axes] # validation + firstaxis, parity, repetition, frame = axes + + i = firstaxis + 1 + j = _NEXT_AXIS[i+parity-1] + 1 + k = _NEXT_AXIS[i-parity] + 1 + + if frame: + ai, ak = ak, ai + if parity: + aj = -aj + + ai /= 2.0 + aj /= 2.0 + ak /= 2.0 + ci = math.cos(ai) + si = math.sin(ai) + cj = math.cos(aj) + sj = math.sin(aj) + ck = math.cos(ak) + sk = math.sin(ak) + cc = ci*ck + cs = ci*sk + sc = si*ck + ss = si*sk + + q = numpy.empty((4, )) + if repetition: + q[0] = cj*(cc - ss) + q[i] = cj*(cs + sc) + q[j] = sj*(cc + ss) + q[k] = sj*(cs - sc) + else: + q[0] = cj*cc + sj*ss + q[i] = cj*sc - sj*cs + q[j] = cj*ss + sj*cc + q[k] = cj*cs - sj*sc + if parity: + q[j] *= -1.0 + + return q + + +def quaternion_about_axis(angle, axis): + """Return quaternion for rotation about axis. + + >>> q = quaternion_about_axis(0.123, [1, 0, 0]) + >>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0]) + True + + """ + q = numpy.array([0.0, axis[0], axis[1], axis[2]]) + qlen = vector_norm(q) + if qlen > _EPS: + q *= math.sin(angle/2.0) / qlen + q[0] = math.cos(angle/2.0) + return q + + +def quaternion_matrix(quaternion): + """Return homogeneous rotation matrix from quaternion. + + >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) + >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0])) + True + >>> M = quaternion_matrix([1, 0, 0, 0]) + >>> numpy.allclose(M, numpy.identity(4)) + True + >>> M = quaternion_matrix([0, 1, 0, 0]) + >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1])) + True + + """ + q = numpy.array(quaternion, dtype=numpy.float64, copy=True) + n = numpy.dot(q, q) + if n < _EPS: + return numpy.identity(4) + q *= math.sqrt(2.0 / n) + q = numpy.outer(q, q) + return numpy.array([ + [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0], + [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0], + [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0], + [ 0.0, 0.0, 0.0, 1.0]]) + + +def quaternion_from_matrix(matrix, isprecise=False): + """Return quaternion from rotation matrix. + + If isprecise is True, the input matrix is assumed to be a precise rotation + matrix and a faster algorithm is used. + + >>> q = quaternion_from_matrix(numpy.identity(4), True) + >>> numpy.allclose(q, [1, 0, 0, 0]) + True + >>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1])) + >>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0]) + True + >>> R = rotation_matrix(0.123, (1, 2, 3)) + >>> q = quaternion_from_matrix(R, True) + >>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786]) + True + >>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0], + ... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]] + >>> q = quaternion_from_matrix(R) + >>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611]) + True + >>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0], + ... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]] + >>> q = quaternion_from_matrix(R) + >>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603]) + True + >>> R = random_rotation_matrix() + >>> q = quaternion_from_matrix(R) + >>> is_same_transform(R, quaternion_matrix(q)) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4] + if isprecise: + q = numpy.empty((4, )) + t = numpy.trace(M) + if t > M[3, 3]: + q[0] = t + q[3] = M[1, 0] - M[0, 1] + q[2] = M[0, 2] - M[2, 0] + q[1] = M[2, 1] - M[1, 2] + else: + i, j, k = 1, 2, 3 + if M[1, 1] > M[0, 0]: + i, j, k = 2, 3, 1 + if M[2, 2] > M[i, i]: + i, j, k = 3, 1, 2 + t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] + q[i] = t + q[j] = M[i, j] + M[j, i] + q[k] = M[k, i] + M[i, k] + q[3] = M[k, j] - M[j, k] + q *= 0.5 / math.sqrt(t * M[3, 3]) + else: + m00 = M[0, 0] + m01 = M[0, 1] + m02 = M[0, 2] + m10 = M[1, 0] + m11 = M[1, 1] + m12 = M[1, 2] + m20 = M[2, 0] + m21 = M[2, 1] + m22 = M[2, 2] + # symmetric matrix K + K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0], + [m01+m10, m11-m00-m22, 0.0, 0.0], + [m02+m20, m12+m21, m22-m00-m11, 0.0], + [m21-m12, m02-m20, m10-m01, m00+m11+m22]]) + K /= 3.0 + # quaternion is eigenvector of K that corresponds to largest eigenvalue + w, V = numpy.linalg.eigh(K) + q = V[[3, 0, 1, 2], numpy.argmax(w)] + if q[0] < 0.0: + numpy.negative(q, q) + return q + + +def quaternion_multiply(quaternion1, quaternion0): + """Return multiplication of two quaternions. + + >>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7]) + >>> numpy.allclose(q, [28, -44, -14, 48]) + True + + """ + w0, x0, y0, z0 = quaternion0 + w1, x1, y1, z1 = quaternion1 + return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0, + x1*w0 + y1*z0 - z1*y0 + w1*x0, + -x1*z0 + y1*w0 + z1*x0 + w1*y0, + x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64) + + +def quaternion_conjugate(quaternion): + """Return conjugate of quaternion. + + >>> q0 = random_quaternion() + >>> q1 = quaternion_conjugate(q0) + >>> q1[0] == q0[0] and all(q1[1:] == -q0[1:]) + True + + """ + q = numpy.array(quaternion, dtype=numpy.float64, copy=True) + numpy.negative(q[1:], q[1:]) + return q + + +def quaternion_inverse(quaternion): + """Return inverse of quaternion. + + >>> q0 = random_quaternion() + >>> q1 = quaternion_inverse(q0) + >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0]) + True + + """ + q = numpy.array(quaternion, dtype=numpy.float64, copy=True) + numpy.negative(q[1:], q[1:]) + return q / numpy.dot(q, q) + + +def quaternion_real(quaternion): + """Return real part of quaternion. + + >>> quaternion_real([3, 0, 1, 2]) + 3.0 + + """ + return float(quaternion[0]) + + +def quaternion_imag(quaternion): + """Return imaginary part of quaternion. + + >>> quaternion_imag([3, 0, 1, 2]) + array([ 0., 1., 2.]) + + """ + return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True) + + +def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): + """Return spherical linear interpolation between two quaternions. + + >>> q0 = random_quaternion() + >>> q1 = random_quaternion() + >>> q = quaternion_slerp(q0, q1, 0) + >>> numpy.allclose(q, q0) + True + >>> q = quaternion_slerp(q0, q1, 1, 1) + >>> numpy.allclose(q, q1) + True + >>> q = quaternion_slerp(q0, q1, 0.5) + >>> angle = math.acos(numpy.dot(q0, q)) + >>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \ + numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle) + True + + """ + q0 = unit_vector(quat0[:4]) + q1 = unit_vector(quat1[:4]) + if fraction == 0.0: + return q0 + elif fraction == 1.0: + return q1 + d = numpy.dot(q0, q1) + if abs(abs(d) - 1.0) < _EPS: + return q0 + if shortestpath and d < 0.0: + # invert rotation + d = -d + numpy.negative(q1, q1) + angle = math.acos(d) + spin * math.pi + if abs(angle) < _EPS: + return q0 + isin = 1.0 / math.sin(angle) + q0 *= math.sin((1.0 - fraction) * angle) * isin + q1 *= math.sin(fraction * angle) * isin + q0 += q1 + return q0 + + +def random_quaternion(rand=None): + """Return uniform random unit quaternion. + + rand: array like or None + Three independent random variables that are uniformly distributed + between 0 and 1. + + >>> q = random_quaternion() + >>> numpy.allclose(1, vector_norm(q)) + True + >>> q = random_quaternion(numpy.random.random(3)) + >>> len(q.shape), q.shape[0]==4 + (1, True) + + """ + if rand is None: + rand = numpy.random.rand(3) + else: + assert len(rand) == 3 + r1 = numpy.sqrt(1.0 - rand[0]) + r2 = numpy.sqrt(rand[0]) + pi2 = math.pi * 2.0 + t1 = pi2 * rand[1] + t2 = pi2 * rand[2] + return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1, + numpy.cos(t1)*r1, numpy.sin(t2)*r2]) + + +def random_rotation_matrix(rand=None): + """Return uniform random rotation matrix. + + rand: array like + Three independent random variables that are uniformly distributed + between 0 and 1 for each returned quaternion. + + >>> R = random_rotation_matrix() + >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4)) + True + + """ + return quaternion_matrix(random_quaternion(rand)) + + +class Arcball(object): + """Virtual Trackball Control. + + >>> ball = Arcball() + >>> ball = Arcball(initial=numpy.identity(4)) + >>> ball.place([320, 320], 320) + >>> ball.down([500, 250]) + >>> ball.drag([475, 275]) + >>> R = ball.matrix() + >>> numpy.allclose(numpy.sum(R), 3.90583455) + True + >>> ball = Arcball(initial=[1, 0, 0, 0]) + >>> ball.place([320, 320], 320) + >>> ball.setaxes([1, 1, 0], [-1, 1, 0]) + >>> ball.constrain = True + >>> ball.down([400, 200]) + >>> ball.drag([200, 400]) + >>> R = ball.matrix() + >>> numpy.allclose(numpy.sum(R), 0.2055924) + True + >>> ball.next() + + """ + def __init__(self, initial=None): + """Initialize virtual trackball control. + + initial : quaternion or rotation matrix + + """ + self._axis = None + self._axes = None + self._radius = 1.0 + self._center = [0.0, 0.0] + self._vdown = numpy.array([0.0, 0.0, 1.0]) + self._constrain = False + if initial is None: + self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0]) + else: + initial = numpy.array(initial, dtype=numpy.float64) + if initial.shape == (4, 4): + self._qdown = quaternion_from_matrix(initial) + elif initial.shape == (4, ): + initial /= vector_norm(initial) + self._qdown = initial + else: + raise ValueError("initial not a quaternion or matrix") + self._qnow = self._qpre = self._qdown + + def place(self, center, radius): + """Place Arcball, e.g. when window size changes. + + center : sequence[2] + Window coordinates of trackball center. + radius : float + Radius of trackball in window coordinates. + + """ + self._radius = float(radius) + self._center[0] = center[0] + self._center[1] = center[1] + + def setaxes(self, *axes): + """Set axes to constrain rotations.""" + if axes is None: + self._axes = None + else: + self._axes = [unit_vector(axis) for axis in axes] + + @property + def constrain(self): + """Return state of constrain to axis mode.""" + return self._constrain + + @constrain.setter + def constrain(self, value): + """Set state of constrain to axis mode.""" + self._constrain = bool(value) + + def down(self, point): + """Set initial cursor window coordinates and pick constrain-axis.""" + self._vdown = arcball_map_to_sphere(point, self._center, self._radius) + self._qdown = self._qpre = self._qnow + if self._constrain and self._axes is not None: + self._axis = arcball_nearest_axis(self._vdown, self._axes) + self._vdown = arcball_constrain_to_axis(self._vdown, self._axis) + else: + self._axis = None + + def drag(self, point): + """Update current cursor window coordinates.""" + vnow = arcball_map_to_sphere(point, self._center, self._radius) + if self._axis is not None: + vnow = arcball_constrain_to_axis(vnow, self._axis) + self._qpre = self._qnow + t = numpy.cross(self._vdown, vnow) + if numpy.dot(t, t) < _EPS: + self._qnow = self._qdown + else: + q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]] + self._qnow = quaternion_multiply(q, self._qdown) + + def next(self, acceleration=0.0): + """Continue rotation in direction of last drag.""" + q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False) + self._qpre, self._qnow = self._qnow, q + + def matrix(self): + """Return homogeneous rotation matrix.""" + return quaternion_matrix(self._qnow) + + +def arcball_map_to_sphere(point, center, radius): + """Return unit sphere coordinates from window coordinates.""" + v0 = (point[0] - center[0]) / radius + v1 = (center[1] - point[1]) / radius + n = v0*v0 + v1*v1 + if n > 1.0: + # position outside of sphere + n = math.sqrt(n) + return numpy.array([v0/n, v1/n, 0.0]) + else: + return numpy.array([v0, v1, math.sqrt(1.0 - n)]) + + +def arcball_constrain_to_axis(point, axis): + """Return sphere point perpendicular to axis.""" + v = numpy.array(point, dtype=numpy.float64, copy=True) + a = numpy.array(axis, dtype=numpy.float64, copy=True) + v -= a * numpy.dot(a, v) # on plane + n = vector_norm(v) + if n > _EPS: + if v[2] < 0.0: + numpy.negative(v, v) + v /= n + return v + if a[2] == 1.0: + return numpy.array([1.0, 0.0, 0.0]) + return unit_vector([-a[1], a[0], 0.0]) + + +def arcball_nearest_axis(point, axes): + """Return axis, which arc is nearest to point.""" + point = numpy.array(point, dtype=numpy.float64, copy=False) + nearest = None + mx = -1.0 + for axis in axes: + t = numpy.dot(arcball_constrain_to_axis(point, axis), point) + if t > mx: + nearest = axis + mx = t + return nearest + + +# epsilon for testing whether a number is close to zero +_EPS = numpy.finfo(float).eps * 4.0 + +# axis sequences for Euler angles +_NEXT_AXIS = [1, 2, 0, 1] + +# map axes strings to/from tuples of inner axis, parity, repetition, frame +_AXES2TUPLE = { + 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), + 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), + 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), + 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), + 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), + 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), + 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), + 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} + +_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) + + +def vector_norm(data, axis=None, out=None): + """Return length, i.e. Euclidean norm, of ndarray along axis. + + >>> v = numpy.random.random(3) + >>> n = vector_norm(v) + >>> numpy.allclose(n, numpy.linalg.norm(v)) + True + >>> v = numpy.random.rand(6, 5, 3) + >>> n = vector_norm(v, axis=-1) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) + True + >>> n = vector_norm(v, axis=1) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) + True + >>> v = numpy.random.rand(5, 4, 3) + >>> n = numpy.empty((5, 3)) + >>> vector_norm(v, axis=1, out=n) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) + True + >>> vector_norm([]) + 0.0 + >>> vector_norm([1]) + 1.0 + + """ + data = numpy.array(data, dtype=numpy.float64, copy=True) + if out is None: + if data.ndim == 1: + return math.sqrt(numpy.dot(data, data)) + data *= data + out = numpy.atleast_1d(numpy.sum(data, axis=axis)) + numpy.sqrt(out, out) + return out + else: + data *= data + numpy.sum(data, axis=axis, out=out) + numpy.sqrt(out, out) + + +def unit_vector(data, axis=None, out=None): + """Return ndarray normalized by length, i.e. Euclidean norm, along axis. + + >>> v0 = numpy.random.random(3) + >>> v1 = unit_vector(v0) + >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) + True + >>> v0 = numpy.random.rand(5, 4, 3) + >>> v1 = unit_vector(v0, axis=-1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) + >>> numpy.allclose(v1, v2) + True + >>> v1 = unit_vector(v0, axis=1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) + >>> numpy.allclose(v1, v2) + True + >>> v1 = numpy.empty((5, 4, 3)) + >>> unit_vector(v0, axis=1, out=v1) + >>> numpy.allclose(v1, v2) + True + >>> list(unit_vector([])) + [] + >>> list(unit_vector([1])) + [1.0] + + """ + if out is None: + data = numpy.array(data, dtype=numpy.float64, copy=True) + if data.ndim == 1: + data /= math.sqrt(numpy.dot(data, data)) + return data + else: + if out is not data: + out[:] = numpy.array(data, copy=False) + data = out + length = numpy.atleast_1d(numpy.sum(data*data, axis)) + numpy.sqrt(length, length) + if axis is not None: + length = numpy.expand_dims(length, axis) + data /= length + if out is None: + return data + + +def random_vector(size): + """Return array of random doubles in the half-open interval [0.0, 1.0). + + >>> v = random_vector(10000) + >>> numpy.all(v >= 0) and numpy.all(v < 1) + True + >>> v0 = random_vector(10) + >>> v1 = random_vector(10) + >>> numpy.any(v0 == v1) + False + + """ + return numpy.random.random(size) + + +def vector_product(v0, v1, axis=0): + """Return vector perpendicular to vectors. + + >>> v = vector_product([2, 0, 0], [0, 3, 0]) + >>> numpy.allclose(v, [0, 0, 6]) + True + >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]] + >>> v1 = [[3], [0], [0]] + >>> v = vector_product(v0, v1) + >>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]]) + True + >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]] + >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]] + >>> v = vector_product(v0, v1, axis=1) + >>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]]) + True + + """ + return numpy.cross(v0, v1, axis=axis) + + +def angle_between_vectors(v0, v1, directed=True, axis=0): + """Return angle between vectors. + + If directed is False, the input vectors are interpreted as undirected axes, + i.e. the maximum angle is pi/2. + + >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3]) + >>> numpy.allclose(a, math.pi) + True + >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False) + >>> numpy.allclose(a, 0) + True + >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]] + >>> v1 = [[3], [0], [0]] + >>> a = angle_between_vectors(v0, v1) + >>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532]) + True + >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]] + >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]] + >>> a = angle_between_vectors(v0, v1, axis=1) + >>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532]) + True + + """ + import warnings as w + w.simplefilter("error") + numpy.seterr(all='warn') + v0 = numpy.array(v0, dtype=numpy.float64, copy=False) + v1 = numpy.array(v1, dtype=numpy.float64, copy=False) + dot = numpy.sum(v0 * v1, axis=axis) + try: + dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis) + except RuntimeWarning: + #print(v0,v1) + dot =0 + try: + angle= numpy.arccos(dot if directed else numpy.fabs(dot)) + except RuntimeWarning: + angle=0 + #print("Run time error on arccos, v0: " + str(v0) + " v1: " + str(v1) + " dot: " +str(dot)) + return angle + +def inverse_matrix(matrix): + """Return inverse of square transformation matrix. + + >>> M0 = random_rotation_matrix() + >>> M1 = inverse_matrix(M0.T) + >>> numpy.allclose(M1, numpy.linalg.inv(M0.T)) + True + >>> for size in range(1, 7): + ... M0 = numpy.random.rand(size, size) + ... M1 = inverse_matrix(M0) + ... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size) + + """ + return numpy.linalg.inv(matrix) + + +def concatenate_matrices(*matrices): + """Return concatenation of series of transformation matrices. + + >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 + >>> numpy.allclose(M, concatenate_matrices(M)) + True + >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) + True + + """ + M = numpy.identity(4) + for i in matrices: + M = numpy.dot(M, i) + return M + + +def is_same_transform(matrix0, matrix1): + """Return True if two matrices perform same transformation. + + >>> is_same_transform(numpy.identity(4), numpy.identity(4)) + True + >>> is_same_transform(numpy.identity(4), random_rotation_matrix()) + False + + """ + matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True) + matrix0 /= matrix0[3, 3] + matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True) + matrix1 /= matrix1[3, 3] + return numpy.allclose(matrix0, matrix1) + + +def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'): + """Try import all public attributes from module into global namespace. + + Existing attributes with name clashes are renamed with prefix. + Attributes starting with underscore are ignored by default. + + Return True on successful import. + + """ + import warnings + from importlib import import_module + try: + if not package: + module = import_module(name) + else: + module = import_module('.' + name, package=package) + except ImportError: + if warn: + warnings.warn("failed to import module %s" % name) + else: + for attr in dir(module): + if ignore and attr.startswith(ignore): + continue + if prefix: + if attr in globals(): + globals()[prefix + attr] = globals()[attr] + elif warn: + warnings.warn("no Python implementation of " + attr) + globals()[attr] = getattr(module, attr) + return True + + +_import_module('_transformations') + +if __name__ == "__main__": + import doctest + import random # used in doctests + numpy.set_printoptions(suppress=True, precision=5) + doctest.testmod() diff --git a/run_script.py b/run_script.py index fcc1f4e0e4..f1971af0f4 100644 --- a/run_script.py +++ b/run_script.py @@ -51,11 +51,13 @@ a = pn.pores(labels=['bottom','boundary'],mode='intersection') OP_1.setup(invading_fluid=water,defending_fluid=air,inlets=a,npts=100) OP_1.run() + # Updating data based on the result of Percolation Algorithms OP_1.update(Pc=11000) #OP_1.plot_drainage_curve() inlets = pn.get_pore_indices(labels = ['bottom']) outlets = pn.get_pore_indices(labels = ['top']) + IP_1 = OpenPNM.Algorithms.InvasionPercolation(network = pn, name = 'OP_1', loglevel = 30) IP_1.setup(invading_fluid = water, defending_fluid = air, inlets = inlets, outlets = outlets, end_condition = 'breakthrough') @@ -92,12 +94,8 @@ #------------------------------------------------------------------------------ '''Export to VTK''' #------------------------------------------------------------------------------ + #OpenPNM.Visualization.Vtp.write(filename='test.vtp',fluids=[air,water],network=pn) + vis = OpenPNM.Visualization.VTK() -vis.write(network=pn,fluids=[air,water]) - - - - - - +vis.write(network=pn,fluids=[air,water]) diff --git a/test_openpnm.py b/test_openpnm.py deleted file mode 100644 index a4f265bdc7..0000000000 --- a/test_openpnm.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" Tests for OpenPNM """ - -from __future__ import print_function, division, absolute_import -import pytest - -import OpenPNM -import numpy as np -import scipy as sp - -def test_template_generator(): - R = np.array([[[0,0],[0,1]]]) - pn = OpenPNM.Network.Template() - pn.generate(R) - pn.trim(R>0.5) - assert len(pn.get_pore_data(prop='coords'))==3 - assert len(pn.get_throat_data(prop='conns'))==2 - -#def test_linear_solver(): -# # fix cube dimensions? -# pn = OpenPNM.Network.Cubic() -# pn.generate(add_boundaries=False) -# -# x,y,z = pn.get_pore_data(prop='coords').T -# left = x==x.min() -# right = x==x.max() -# ics = 2*left + 1*right -# -# sol = OpenPNM.Shortcuts.solve_linear(pn, ics) -# assert( np.allclose(sol, np.array([2,1.5,1]*9)) ) - -#def test_rectilinear_integrity_after_prune(): -# R = np.random.rand(50,40,30) -# # some simple visual pruning, for comparison -# M = np.where(R > R.mean(), R, 0) -# # the convoluted graph way -# pn = OpenPNM.Network.Template() -# pn.generate(R) -# pn.trim(R<=R.mean()) -# O = pn.asarray() -# # what it would look like normally -# assert np.allclose(M, O) - -def test_graph_queries(): - pn = OpenPNM.Network.TestNet() - pn.clone(pores=[0]) - pn.num_pores() - assert pn.find_neighbor_pores(pores=pn.pores()[-1]) == [0] - assert sp.all(pn.find_neighbor_pores(pores=[0]) == [ 1, 5, 25, 125]) - pn.stitch(heads=[124],tails=[125]) - assert pn.num_throats() == 302 - pn.extend(pore_coords=[[1,1,1]]) - assert pn.find_neighbor_pores(pores=126) == [] - assert pn.find_neighbor_throats(pores=126) == [] - -def test_transport(): - pn = OpenPNM.Network.TestNet() - geom = OpenPNM.Geometry.Stick_and_Ball(network=pn) - geom.set_locations(pores=pn.pores('all'), throats=pn.throats('all')) - geom.add_property(prop='pore_seed',model='random',seed=2) - geom.regenerate() - air = OpenPNM.Fluids.Air(network=pn) - air.regenerate() - phys = OpenPNM.Physics.GenericPhysics(network=pn,fluid=air,geometry=geom) - phys.add_method(prop='diffusive_conductance',model='bulk_diffusion') - phys.regenerate() - fick = OpenPNM.Algorithms.FickianDiffusion(network=pn) - fick.set_boundary_conditions(bctype='Dirichlet',bcvalue=0.6,pores=pn.pores('top')) - fick.set_boundary_conditions(bctype='Dirichlet',bcvalue=0.4,pores=pn.pores('bottom')) - fick.run(active_fluid=air) - fick.update() - data = sp.round_(air['pore.mole_fraction'][[0,25,50,75,100]],6) - assert sp.sum(sp.in1d(data, [ 0.4, 0.461799, 0.519957, 0.570009, 0.6])) == 5 - - -if __name__ == '__main__': - pytest.main([__file__]) \ No newline at end of file