#! /usr/bin/python3 # Last edited on 2025-10-23 14:50:28 by stolfi import sys, re, os from sys import stdout as out, stderr as err from math import sqrt, hypot, sin, cos, log, exp, floor, ceil, inf, nan, pi, isfinite import numpy from error_funcs import prog_error, arg_error, debug from process_funcs import bash, run_command import argparser import vms_color_image_funcs as cfn import vms_linear_gray_image_funcs as gfn # import vms_multispectral_image_funcs as mfn # import vms_spectral_analysis_funcs as afn def main(): # Command line arguments are # # "{run_tag} {page} {clip} {tproj} {prout} {masks[0} {masks[1} .." # # where {run_tag} is a tag for output file names, {page} is a page # name (like "079v1"), {clip} is the name of a clip from that page # (like "poolsw"), {tproj} is between 0 and 1, {prout} is a # probability, {masks[0]}, {masks[1]}, {masks[nm-1]}... are the names of # one or more binary masks. # # All inputs are read from the folder {in_dir} = "clips/{page}/{clip}/data". # Reads the RGB image {in_img} of the given page clip, from the PNG # file "{in_dir}/clip.png". Let {(ny,nx,nc)} be the # shape of {in_img}. # # The number of channels {nc} may be 3 or 4. If it is 4, the last # channel is assumed to be the alpha chennel, and it is discarded. # # The RGB colors of pixels are implicitly mapped by {proj_transform} # before the pixels are classified. The {tproj} parameter determines # the amount of distortion. A value of 0 means no distortion. A # positive value means a rotation that makes the gray axis vertical, # followed by a projective map that stretches the Z axis non-linearly # while keeping horz planes horizontal. A value of 1 is the maximum; # it sends the black point to infinity so that red, green, and blue # axes become vertical. # # Each mask {mk_img[im]} is read from file # "{in_dir}/masks/{masks[im]}.png". It should have # shape {(ny,nx,)} and samples 0 or 255. This program assumes that the # nonzero samples select an area with a specific color e.g. blank # vellum, green paint, etc. The masks had better be disjoint. # # Remaps the pixels of {in_img} into a {nm+1} grayscale images # {pc_img[0..nm]} For {im} in {0..nm-1}, the image {pg_img[im]} # specifies the probability that each pixel belongs to one of color # populations sampled by mask {mk_img[im]} The image {pc_img[nm]} is the # probability that it is an outlier, that belongs to none of those # populations. These probabilities are computed by Bayes's formula # where {prout} is the prior for outliers. # # All outputs go to the folder {ot_dir} = "clips/{page}/{clip}/out/{run_tag}" # # Writes the {nm} images {pc_img[0..nm-1]} as files # "{ot_dir}/probs/{masks[im]}.png", and # {pc_img[nm]} as "{ot_dir}/probs/OTHER.png". # # Also writes to "{ot_dir}/masks-redu/${masks[im]}.png" a "reduced" # version of the input mask "{in_dir}/masks/${masks[im]}.png" where # pixels whose colors are too far from the cloud's center have been # excluded. # # Also writes a file "{ot_dir}/ptcoud.geo" # suitable for "view_point_cloud.sh". # run_tag, page, clip, tproj, prout, masks = parse_options() nm = len(masks) assert nm > 0, "must specify at least one mask" noise = (1/255)/12.9 # Sample value noise threshold dgmax = 5.0 # Max Mahalonobis distance for cloud cleanup. in_dir = f"clips/{page}/{clip}/data" ot_dir = f"clips/{page}/{clip}/out/{run_tag}" bash(f"rm -rfv {ot_dir}") bash(f"mkdir -p {ot_dir}") bash(f"mkdir -p {ot_dir}/masks-redu/") bash(f"mkdir -p {ot_dir}/probs/") write_parms(ot_dir, run_tag, page, clip, tproj, prout, noise, dgmax, masks) # Read the image: in_file: str = f"{in_dir}/clip.png" bash(f"( gimp -n -s {in_file} & )") in_img = read_clip_image(in_file); ny, nx, nc = numpy.shape(in_img) assert nc == 3, "image should be RGB" # Read the mask images: mk_imgs = numpy.zeros((ny,nx,nm)) for im in range(nm): mk_file = f"{in_dir}/masks/{masks[im]}.png" mk_img = gfn.read_mask_image(mk_file) assert numpy.shape(mk_img) == (ny,nx,) mk_imgs[:,:,im] = mk_img err.write(f"gathering relevant pixels ...\n") S = [ None ]*nm W = [ None ]*nm K = [ None ]*nm for im in range(nm): err.write(f"--- subset {masks[im]} ---\n") S[im], W[im], K[im] = extract_relevant_pixels(in_img, mk_imgs[:,:,im], noise) err.write(f"computing population parameters ...\n") # Each element of {pop} is a tuple {(avg,vec,dev,mag)}. pop = [ None ]*nm for im in range(nm): err.write(f"--- subset {masks[im]} ---\n") pop[im] = compute_cloud_parameters(S[im], W[im], noise, tproj) if dgmax < +inf: pop[im] = cleanup_cloud(S[im], W[im], pop[im], noise, tproj, dgmax) err.write(f"writing points for {{view_point_cloud}} ...\n") write_points_for_view_point_cloud(ot_dir, S, W, noise, tproj) err.write(f"separating the colors ...\n") pc_img = remap_colors(in_img, pop, noise, tproj, prout) np = numpy.size(pc_img, 2) assert np == nm+1 err.write(f"writing province probability images ...\n") for ip in range(np): pn_img = pc_img[:,:,ip] # err.write(f"normalizing component {ip} to [0_1] range ...\n") # pn_img = map_gray_image_to_unit_range(pn_img, mk_imgs, f"pc[{ip}]") im = ip mask = masks[im] if im < nm else "OTHER" pn_file = f"{ot_dir}/probs/{mask}.png" err.write(f" --- subset {mask} ---\n") if ip >= 1: # Assume it is not blank vellum, so complement the image: pn_img = numpy.full((ny,nx), 1) - pn_img err.write(f" writing probability map image {im} = {pn_file} ...\n") gfn.write_image_as_gray_png(pn_file, pn_img) if im < nm: mn_file = f"{ot_dir}/masks-redu/{mask}.png" err.write(f" writing cleaned mask {im} = {mn_file} ...\n") write_cleaned_mask(mn_file, ny, nx, W[im], K[im]) else: mn_file = "" bash(f"gimp -s {pn_file}") return 0 # ---------------------------------------------------------------------- def write_parms(ot_dir, run_tag, page, clip, tproj, prout, noise, dgmax, masks): parms_file = f"{ot_dir}/parms.txt" with open(parms_file, "w") as wr: wr.write(f"{run_tag = }\n") wr.write(f"{page = }\n") wr.write(f"{clip = }\n") wr.write(f"{tproj = :.6f}\n") wr.write(f"{prout = :.6f}\n") wr.write(f"{noise = :.6f}\n") wr.write(f"{dgmax = :.6f}\n") wr.write(f"{masks = }\n") wr.close() return # ---------------------------------------------------------------------- def read_clip_image(in_file): # Reads the SRGB file "{in_dir}.png" and returns it as a {numpy} # array with three channels R, G, B, discarding the alpha channel. maxval = 255 in_img = cfn.read_color_png_image(in_file, maxval) assert len(numpy.shape(in_img)) == 3, "image should be multichannel" ny, nx, nc = numpy.shape(in_img) err.write(f"{in_file}: rows = {ny:4d} cols = {nx:4d} chns = {nc:d}\n") assert nc == 3 or nc == 4, "image should be RGB or RGBA" if nc == 4: in_img = in_img[:,:,0:3] nc = numpy.size(in_img, 2) assert nc == 3, "bug {nc}" err.write(f"undoing gamma encoding ...\n") cfn.undo_SRGB_gamma_encoding(in_img) for ic in range(nc): fval_min = numpy.min(in_img[:,:,ic], axis=(0,1)) fval_max = numpy.max(in_img[:,:,ic], axis=(0,1)) err.write(f" channel {ic} range = [{fval_min:8.6f} _ {fval_max:8.6f}]\n") return in_img # ---------------------------------------------------------------------- def extract_relevant_pixels(in_img, mk_img, noise): # Given a multichannel image {in_img} and a binary mask {mk_img} # enumerates the set of pixels where the mask is nonzero, and returns # the set {S} of their colors, the corresponding weights {W}, and the # list {K} of the indices of those pixels. # # Ignores pixels whose SRGB brightness {Y} is less than {noise}. # # The weight of a pixel is 0 if its brightness is 1 or {noise}, # and a quadratic function of {Y} between those values. # # Returns {S} as an array with shape {(ns,3)}. Returns {W} as an # array with shape {(ns,)}, normalized so that the max weight is 1.0. # Returns {K} as an array with shape {(ns,2)} where {K[ks,:]} are the # row and colum indices of the sample {ks}. ny, nx, nc = numpy.shape(in_img) assert numpy.shape(mk_img) == (ny,nx) # Extracts pixels and weights of mask {im_mask} as lists {S,W,K}. S = [ ] # Will later be a matrix of {ns} rows and 3 cols. W = [ ] # Will later be a vector of {ns} elems. K = [ ] # Will later be a matrix of {ns} rows and 2 cols. for iy in range(ny): for ix in range(nx): p = tuple(in_img[iy,ix,0:3]) Y = luminance(p) if Y > noise: m = mk_img[iy,ix] assert m == 0 or m == 255, f"mask 0 is not binary {m = }" if m != 0: w = sqrt(Y*Y - noise*noise)*max(0, 1-Y) S.append(p) W.append(w) K.append((iy,ix)) # Convert lists to {numpy} arrays: ns = len(S) assert ns >= 4, f"too few samples in population {ns=}" S = numpy.array(S) assert S.shape == (ns,3,) W = numpy.array(W) assert W.shape == (ns,) # Normalize weights to unit maximum: W_max = numpy.max(W) W /= W_max K = numpy.array(K) assert K.shape == (ns,2,) return S, W, K # ---------------------------------------------------------------------- def write_points_for_view_point_cloud(ot_dir, S, W, noise, tproj): # Given a list of sets of pixels as a list of arrays {S[im]} with # shape {(ns[im],np)} where {np >= 3} and the corrsponding weights # {W[im]} with shape {(ns[im],)}, writes a file "{ot_dir}/ptcloud.geo" suitable # for "view_point_cloud.sh", and calls the latter. # # Pixels from each set are painted with different colors # # If there are too many points, writes only a sample. nm = len(S) assert nm >= 1, "must give at least one population" np = numpy.size(S[0], 1) assert np >= 3, "insuff pixel coords" # Determine total sample count {ns}: ns_val = 0; # Total samples for im in range(nm): nsi = numpy.size(S[im],0) assert numpy.shape(S[im]) == (nsi,np) assert numpy.shape(W[im]) == (nsi,) ns_val += count_valid_samples(W[im]) err.write(f" total {ns_val} valid samples\n") ns_max = 3000 ns_max_per_set = max(4, ns_max//nm) gv_file = f"{ot_dir}/ptcloud.geo" err.write(f"writing {gv_file} ...\n") with open(gv_file, "w") as wr: def add_color_point(tag, im, cx, cy, cz, rad): # Writes a command to show a ball at RGB coords {(cx,cy,cz)} with tag {tag}. # If {im} is {-1} the color is {cx,cy,cz} clipped to the unit cube, # else it is a function of {im}. If {tproj} is nonzero, applies the projective transform # with parameter {tproj}. nonlocal wr if tproj != 0: px, py, pz = proj_transform((cx,cy,cz), tproj, noise) else: px, py, pz = cx, cy, cz wr.write("%s %7.4f %7.4f %7.4f %7.4f" % (tag, px,py,pz, rad)) if im < 0: R = min(1.0, max(0.0, cx)) G = min(1.0, max(0.0, cy)) B = min(1.0, max(0.0, cz)) else: phi = 0.618033 hue = (0.5 * im * phi) % 1.0 if hue < 1/6: t = max(0, min(1, 6*hue-0)); R = 1; G = t; B = 0 elif hue < 2/6: t = max(0, min(1, 6*hue-1)); R = 1-t; G = 1; B = 0 elif hue < 3/6: t = max(0, min(1, 6*hue-2)); R = 0; G = 1; B = t elif hue < 4/6: t = max(0, min(1, 6*hue-3)); R = 0; G = 1-t; B = 1 elif hue < 5/6: t = max(0, min(1, 6*hue-4)); R = t; G = 0; B = 1 else: t = max(0, min(1, 6*hue-5)); R = 1; G = 0; B = 1-t wr.write(" %5.3f %5.3f %5.3f\n" % (R, G, B)) return # .................................................................. def add_frame(): # Add cube frame and balls at primary colors: rbox = 0.01 wr.write("-box 0.300 0.300 0.300\n") for bx in range(2): for by in range(2): for bz in range(2): add_color_point("", -1, bx, by, bz, 0.02) # Add gray diagonal: wr.write("-segment 0.300 0.300 0.300\n") add_color_point("", -1, 0,0,0, 0.02) add_color_point("", -1, 1,1,1, 0.02) # Add equally apaced points along the cube edges: for bu in range(2): for bv in range(2): for k in range(1,10): pu, pv, pw = bu, bv, k/10 for axis in range(3): add_color_point("-point", -1, pu, pv, pw, 0.003) pv, pw, pu = pu, pv, pw # Add equally apaced points along the gray diagonal: for k in range(1,10): pw = k/10 add_color_point("-point", -1, pw, pw, pw, 0.003) if tproj != 0: # Add a marker at transformed zero: wr.write("-point 0 0 1 0.02 1.000 0.800 0.000\n") return # .................................................................... add_frame() for im in range(nm): Si = S[im] Wi = W[im] nsi = count_valid_samples(Wi) step = nsi//ns_max_per_set if step == 0: step = 1 nsi_write = nsi//step err.write(f" adding cloud {im} ({nsi_write} out of {nsi} samples) ...\n") for ks_write in range(nsi_write): ks = ks_write*step cx, cy, cz = Si[ks,0:3] w = Wi[ks] if w > 0: rad = 0.005*sqrt(sqrt(sqrt(w))) add_color_point("-point", im, cx, cy, cz, rad) # wr.write("-segment 0.700 0.700 0.700\n") # for Rext in Rmin, Rmax: # cx, cy, cz = Rext[:] # rad = 0.03 # add_color_point("", cx, cy, cz, rad) wr.close() bash(f"view_point_cloud.sh {gv_file}") return # ---------------------------------------------------------------------- def write_cleaned_mask(mn_file, ny, nx, W, K): # Writes a mask image (grayscale with values 0 or 255) # showing all samples with positive weight whose indices are in {K}. ns = numpy.size(W,0) assert numpy.shape(W) == ((ns,)) assert numpy.shape(K) == ((ns,2,)) mn_img = numpy.zeros((ny,nx,), dtype = numpy.uint8) for ks in range(ns): if W[ks] > 0: iy, ix = tuple(K[ks,:]) mn_img[iy,ix] = 255 gfn.write_uint_array_as_png(mn_file, mn_img) return # ---------------------------------------------------------------------- def count_valid_samples(W): # Given ar array of weights {W[0..ns-1]}, returns the number of nonzero # entries. ns = numpy.size(W,0) assert numpy.shape(W) == (ns,) ns_val = 0 for ks in range(ns): if W[ks] > 0: ns_val += 1 return ns_val # ---------------------------------------------------------------------- def compute_cloud_parameters(S, W, noise, tproj): # Given a set of pixels and their weights, computes their # parameters {(avg,vec,dev,mag)} of the PDF of the population; where # {avg} is the mean (a 3-vector), {vec} is the principal unit vectors # (as columns of a {3x3} matrix), {dev} is the standard deviation along each axis (a 3-vector), # and {mag} is the probability density at {avg}, such that the integral of the # PDF is 1.0. ns, nc = numpy.shape(S) assert nc == 3 # For now. assert numpy.shape(W) == (ns,) # Check if there are enough data points: ns_min = 10 # Why not? ns_val = count_valid_samples(W) assert ns_val >= ns_min, f"not enough sample points {ns_val=}" # Get sample array {P}, transformed if asked for: if tproj != 0: P = numpy.zeros((ns,nc,)) for ks in range(ns): P[ks,:] = numpy.array(proj_transform(S[ks,:], tproj, noise)) else: P = numpy.array(S) avg = average_color(P, W) assert numpy.shape(avg) == (nc,) err.write(f"subtracting the mean of rel chroma values ...\n") T = numpy.zeros((ns,nc,)) for ks in range(ns): T[ks,:] = P[ks,:] - avg; err.write(f"computing the covariance matrix ...\n") V = numpy.cov(T, rowvar = False, aweights = W) debug("V shape", numpy.shape(V)) assert numpy.shape(V) == (nc,nc,), "bug: eigenvalues shape" debug("max(V)", numpy.max(V)) err.write(f"computing the eigenvalues of the covariance matrix ...\n") E = numpy.linalg.eigh(V) ne = nc E_vec = E.eigenvectors err.write(f"eigenvectors:\n{E_vec}\n") assert numpy.shape(E_vec) == (nc,nc,), "bug: eigenvectors shape" vec = E_vec E_val = E.eigenvalues err.write(f"eigenvalues:\n{E_val}\n"); assert numpy.shape(E_val) == (ne,), "bug: eigenvalues shape" err.write(f"checking the eigenvalue decomposition ...\n") check_eigen(T, W, E_vec, E_val) # Convert eigenvalues to deviations: dev = numpy.zeros((ne,)) for ie in range(ne): assert E_val[ie] >= 0.0 dev[ie] = sqrt(E_val[ie] + noise*noise) mag = gaussian_mag(dev) # Adjust direction of eigenvectors to agree with luminance axis: for ie in range(ne): vi = vec[:,ie] si = numpy.sum(vi) if si < 0: vec[:,ie] = - vi return ( avg, vec, dev, mag ) # ---------------------------------------------------------------------- def cleanup_cloud(S, W, pop, noise, tproj, dgmax): # If {dgmax} is less than {+inf}, sets to zero the weight {W[ks]} of # any sample which is more than {dgmax} away from the mean of # distribution {pop} in the Mahalonobis distance. Recomputes {pop} and # iterates until the weights converge. Returns the new {pop}. assert isfinite(dgmax) and dgmax > 0, f"invalid {dgmax=}" avg, vec, dev, mag = pop ns = len(S) nc = numpy.size(avg,0) # Get sample array {P}, transformed if asked for: if tproj != 0: P = numpy.zeros((ns,nc,)) for ks in range(ns): P[ks,:] = numpy.array(proj_transform(S[ks,:], tproj, noise)) else: P = numpy.array(S) while True: # Check and delete points: ns_del = 0 for ks in range(ns): if W[ks] > 0: p = P[ks,:] d2 = gaussian_dist_sqr(p, avg, vec, dev) if d2 > dgmax*dgmax: err.write(f" excluding sample {ks}") err.write(f" = [ {p[0]:8.4f} {p[1]:8.4f} {p[2]:8.4f} ]") err.write(f" d = {sqrt(d2):8.4f}\n") W[ks] = 0 ns_del += 1 if ns_del == 0: return pop pop = compute_cloud_parameters(S, W, noise, tproj) assert False # ---------------------------------------------------------------------- def gaussian_mag(dev): # Gaussian PDF magnitude from deviations vector. ne = numpy.size(dev,0) assert numpy.shape(dev) == (ne,) tdev = 1.0 for ie in range(ne): tdev *= dev[ie] # Compute scale factor: mag = pow(2*pi, -ne/2)/tdev return mag # ---------------------------------------------------------------------- def average_color(S, W): # Given a list of pixels as an array {S} with shape {(ns,np)} and the # corrsponding weights with shape {(ns,)}, returns the weighted # average of those pixels as an array with shape {(np,)}. # err.write(f"computing average of relevant pixels ...\n") ns, nc = numpy.shape(S) assert numpy.shape(W) == (ns,) SW_sum = numpy.zeros((nc,)) W_sum = numpy.zeros((nc,)) for ks in range(ns): w = W[ks] W_sum += w for ip in range(nc): SW_sum[ip] += w*S[ks,ip] avg = SW_sum/W_sum debug("avg", avg) assert numpy.shape(avg) == (nc,), "bug: avg shape" return avg # ---------------------------------------------------------------------- def remap_colors(in_img, pops, noise, tproj, prout): # Given an RGB image {in_img} and {nm} populations with parameters # {pos[0..nm-1]}, classifies each pixel {p} of {in_img} into the {nm} # classes defined by {pops[0..nm-1]} or the "outlier" class {nm}. The # result is a matrix {rm_img} of shape {(ny,nx,np)} where {np = nm+1}. ny, nx, nc = numpy.shape(in_img) nm = len(pops) np = nm+1 rm_img = numpy.zeros((ny,nx,np)) err.write(f"remapping pixels ...\n") for iy in range(ny): for ix in range(nx): p = in_img[iy,ix,:] q = classify_pixel(p, pops, noise, tproj, prout) rm_img[iy,ix,:] = q return rm_img # ---------------------------------------------------------------------- def classify_pixel (p, pops, noise, tproj, prout) : # Given a triple or 3-vec {p} that represents an RGB and {nm} # population parameters {pops[0..nm-1]} returns a vector {q} with {np # = nm+1} elements where {q[ip]} is the posterior Bayesian probability # of {p} belonging to population {ip}, where population {nm} is the # outliers. nm = len(pops) np = nm+1 y = luminance(p) q = numpy.zeros(np) if not isfinite(y) or y <= noise: # Classify as outlier: q[nm] = 1.0 else: if tproj != 0: # Apply projective transformation to {p}: p = proj_transform(p, tproj, noise) # Compute numerators of Bayes's formula: prin = (1 - prout)/nm # Prior probability of each class. for im in range(nm): q[im] = prin * gaussian_distr(p, pops[im]) # The outlier PDF is uniform over the unit cube, with integral {prout}. # The unit cube has volume 1 so the PDF value is {prout}. q[nm] = prout # Normalize: q = q / numpy.sum(q) return q # ---------------------------------------------------------------------- def gaussian_distr(p, pop): # Given a triple or 3-vec {p} that represents a point of {R^3}, # evaluates the PDF of a 3-dim Gaussian probability distribution with # parameters {pop}. avg, vec, dev, mag = pop z2 = gaussian_dist_sqr(p, avg, vec, dev) assert isinstance(mag, float) pdf = mag*exp(-z2/2) return pdf # ---------------------------------------------------------------------- def gaussian_dist_sqr(p, avg, vec, dev): # The squared Mahalonobis distance from {p} to the mean {avg} in the # 3D Gaussian distrib with axes {vec} and deviations {dev}. nc = numpy.size(avg,0) assert numpy.shape(avg) == (nc,) assert numpy.shape(vec) == (nc,nc,) assert numpy.shape(dev) == (nc,) u = (numpy.array(p) - avg) @ vec z2sum = 0 for ic in range(nc): zi = u[ic]/dev[ic] z2sum += zi*zi return z2sum # ---------------------------------------------------------------------- def map_gray_image_to_unit_range(gr_img, mk_imgs, img_name): # Given a grayscale image {gr_img} as an array with shape {(ny,nx,)} # returns a copy of it with values mapped to span {[0_1]}. # # The mapping will be such that most samples in each channel of # {gr_img} will be affinely mapped (shifted and scaled) by the same # amounts, with smooth clipping to {[0_1]} at both ends of the range. # # If either of the masks {mk_imgs} is {None}, it is assumed to # be all zeros. Otherwise it must be an image with shape {(ny,nx,)} # and values 0 or 255. Then the mapping will be selected by # considering only those pixels {iy,ix} of the {gr_img} images where # either mask is nonzero (but will still be applied to every whole # image). # ny, nx = numpy.shape(gr_img) assert numpy.shape(m0_img) == (ny,nx) assert numpy.shape(m1_img) == (ny,nx) nr_img = numpy.zeros((ny,nx)) err.write(f" collecting samples of channel ...\n") samples = [] for iy in range(ny): for ix in range(nx): val = gr_img[iy,ix] m0 = 255 if m0_img is None else m0_img[iy,ix] m1 = 255 if m1_img is None else m1_img[iy,ix] if (m0 != 0 or m1 != 0) and isfinite(val): samples.append(val) err.write(f" choosing stretching parameters to span [0_1] ...\n") frac_val = 0.001 shift = True bmin, bmax, fmin, fmax = gfn.choose_sample_map(samples, frac_val, shift) err.write(f" {bmin =:.6f} {bmax =:.6f}\n") err.write(f" stretching samples ...\n") nr_img = gfn.clip_image_squashed(gr_img, img_name, bmin, bmax, fmin, fmax, 0.0, 1.0) return nr_img # ---------------------------------------------------------------------- def check_eigen(T, W, E_vec, E_val): # Checks the Eigenvalue decomposition {E_vec,E_val} of the covariance matrix of {T} # with weights {W}. Assumes that the average of the samples has been subtracted from {T} ns, np = numpy.shape(T) assert numpy.shape(W) == (ns,) assert numpy.shape(E_vec) == (np,np,) assert numpy.shape(E_val) == (np,) W_sum = 0 SW_sum = numpy.zeros((np,)) C2W_sum = numpy.zeros((np,)) for ks in range(ns): w = W[ks] s = T[ks,:] c = s.transpose() @ E_vec W_sum += w SW_sum += w*s for ip in range(np): C2W_sum[ip] += w*c[ip]*c[ip] for ip in range(np): assert SW_sum[ip]/W_sum < 1.0e-7, "T average not zero" c_dev = sqrt(C2W_sum[ip]/W_sum) ev = E_val[ip] assert ev >= 0, "negative eigenvalue" e_dev = sqrt(ev) err.write(f" eigen axis {ip} dev = {c_dev:.12f} expected = {e_dev:.12f}\n") assert abs(c_dev - e_dev) < 3.0e-4, f"eigenbug {abs(c_dev-e_dev) =:.12f}" return # ---------------------------------------------------------------------- def luminance(p): # Given a triplet p that represents an RGB color, returns its # SRGB luminance. r, g, b = tuple(p) Y = + 0.2126*r + 0.7152*g + 0.0722*b return Y # ---------------------------------------------------------------------- def proj_transform(p, tproj, noise): # Applies to the triple or 3-vec RGB color {p = (r,g,b)} a projective transformation # yielding {t = (x,y,z)}, that maps # (1,1,1) to (0,0,1). # (e,e,e) to {(0,0,e)} where {e = noise}. # (1,0,0) to {(a,0,h)} for some {a,h}. # (0,1,0) to {(0,c,h)} for some {c}. # and maps planes of same pseudo-liminance to horizontal planes. # Note that if if the pseudo-luminance is less than {noise} # the resulting {z} is negative. # First, a 3D rotation from RGB to WUV, sort of YUV with # W = pseudo-luninance, with scaling of UV: debug = False def zproj(Z,U,V): # Then a projective map that keeps the plane Z = 0 fixed # and sends {(-1,0,0)} (black) to {-T,0,0)} where {T = - 1/(1 - tproj)} # Formula {(Z,U,V) -> (Z/den,U/den,V/den)} where {den = 1 + tproj*Z} den = 1 + tproj*Z if debug: err.write(f" {den = :12.8f}\n") assert den > 0 and den <= 1, "bug proj_transform" z = Z/den u = U/den v = V/den return z, u, v # .................................................................... if debug: err.write(f" {p = }\n") W = (p[0] + p[1] + p[2])/3 U = (+1.00*p[0] - 0.50*p[1] - 0.50*p[2])/sqrt(3/2) V = (00.00*p[0] + 1.00*p[1] - 1.00*p[2])/sqrt(2) if debug: err.write(f" {W = :12.8f}\n") # Clip {W} to strictly inside {[0_1]} to avoid infinities, overflow: tiny = 1.0e-14 W = min(1-tiny, max(tiny, W)) # Shift along new gray axis so that white is at 0, black at -1: Z = W - 1 if debug: err.write(f" {Z = :12.8f}\n") z, u, v = zproj(Z, U, V) if debug: err.write(f" {z = :12.8f}\n") # Then a shift and scale so that original white ends at (1,0,0) # and the original {(e,e,e)} ends at {(e,0,0)} where {e = noise} e = noise W_e = e Z_e = W_e - 1 z_e, u_e, v_e = zproj(Z_e,0,0) if debug: err.write(f" {z_e = :12.8f}\n") m = noise + (1 - noise)*(z_e - z)/z_e if debug: err.write(f" {m = :12.8f}\n") # Then magnify the U,V coordinates UVscale = 0.75 u *= UVscale v *= UVscale # Package and ship: q = (u,v,m) return q # ---------------------------------------------------------------------- def parse_options(): na = len(sys.argv); ia = 1; def get_opt(): nonlocal ia if ia >= na: arg_error(f"missing arg {ia}") arg = sys.argv[ia]; ia += 1 return arg # ...................................................................... def get_bool_opt(): arg = get_opt() arg = arg.lower() if arg[0] == 't' or arg[0] == 'y' or arg == '1': arg = True elif arg[0] == 'f' or arg[0] == 'n' or arg == '0': arg = False else: arg_error(f"bad boolean value {arg =}") return arg # ...................................................................... run_tag = get_opt(); # Tag identifying the run. page = get_opt(); # Name of page. clip = get_opt(); # Name of clip in that page. tproj = float(get_opt()); # Parameter of color transform. prout = float(get_opt()) # Prob of outliers. masks = [] nm = 0 while ia < na: masks.append(get_opt()); if ia != na: arg_error(f"spurious args {sys.argv[ia:]}") return run_tag, page, clip, tproj, prout, masks # ---------------------------------------------------------------------- def test_gaussian_distr(): err.write("=== testing gaussian_distr ...\n") nc = 3 avg = numpy.array((0.3, 0.4, 0.5)) vec = numpy.array(((1,0,0), (0,3/5,4/5), (0,-4/5,3/5))).transpose() dev = numpy.array((0.01, 0.03, 0.02)) mag = gaussian_mag(dev) pop = (avg, vec, dev, mag) # Numerical integration: H = 100 step = 8*numpy.max(dev)/H psum = 0 for ir in range(-H,H+1): r = avg[0] + ir*step rsum = 0 for ip in range(-H,H+1): g = avg[1] + ip*step rgsum = 0 for ib in range(-H,H+1): b = avg[2] + ib*step p = (r, g, b) pdf = gaussian_distr(p, pop) rgsum += pdf rsum += rgsum psum += rsum prob_int = psum*step*step*step debug("prob_int", prob_int) assert(abs(prob_int - 1.0) < 1.0e-4) return # ---------------------------------------------------------------------- def test_proj_transform(tproj): err.write(f"=== testing proj_transform {tproj = :.4f} ...\n") noise = (1/255)/12.9 def do_test(p): q = proj_transform(p, tproj, noise) err.write(f" [ {p[0]:12.6f} {p[1]:12.6f} {p[2]:12.6f} ]") err.write(f" ->") err.write(f" [ {q[0]:12.6f} {q[1]:12.6f} {q[2]:12.6f} ]") err.write(f"\n") return # .................................................................... p = (noise, noise, noise); do_test(p) p = (0,0,0); do_test(p) p = (1,1,1); do_test(p) p = (2,1,0); do_test(p) for bx in range(2): for by in range(2): for bz in range(2): p = (bx, by, bz) if (bx + by + bz) % 2 == 1: p = numpy.array(p) do_test(p) return # ---------------------------------------------------------------------- # test_proj_transform(0.9999) # test_proj_transform(0.9000) # test_proj_transform(0.0000) # test_gaussian_distr() main()