2019-03-21 23:34:55 +08:00
|
|
|
"""
|
|
|
|
Stitching sample (advanced)
|
|
|
|
===========================
|
|
|
|
|
|
|
|
Show how to use Stitcher API from python.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Python 2/3 compatibility
|
|
|
|
from __future__ import print_function
|
|
|
|
|
2019-12-29 18:50:02 +08:00
|
|
|
import argparse
|
|
|
|
from collections import OrderedDict
|
|
|
|
|
2018-12-19 02:49:16 +08:00
|
|
|
import cv2 as cv
|
2019-12-29 18:50:02 +08:00
|
|
|
import numpy as np
|
2019-03-21 23:34:55 +08:00
|
|
|
|
2019-12-29 18:50:02 +08:00
|
|
|
EXPOS_COMP_CHOICES = OrderedDict()
|
|
|
|
EXPOS_COMP_CHOICES['gain_blocks'] = cv.detail.ExposureCompensator_GAIN_BLOCKS
|
|
|
|
EXPOS_COMP_CHOICES['gain'] = cv.detail.ExposureCompensator_GAIN
|
|
|
|
EXPOS_COMP_CHOICES['channel'] = cv.detail.ExposureCompensator_CHANNELS
|
|
|
|
EXPOS_COMP_CHOICES['channel_blocks'] = cv.detail.ExposureCompensator_CHANNELS_BLOCKS
|
|
|
|
EXPOS_COMP_CHOICES['no'] = cv.detail.ExposureCompensator_NO
|
|
|
|
|
|
|
|
BA_COST_CHOICES = OrderedDict()
|
|
|
|
BA_COST_CHOICES['ray'] = cv.detail_BundleAdjusterRay
|
|
|
|
BA_COST_CHOICES['reproj'] = cv.detail_BundleAdjusterReproj
|
|
|
|
BA_COST_CHOICES['affine'] = cv.detail_BundleAdjusterAffinePartial
|
|
|
|
BA_COST_CHOICES['no'] = cv.detail_NoBundleAdjuster
|
|
|
|
|
|
|
|
FEATURES_FIND_CHOICES = OrderedDict()
|
|
|
|
try:
|
|
|
|
FEATURES_FIND_CHOICES['surf'] = cv.xfeatures2d_SURF.create
|
|
|
|
except AttributeError:
|
|
|
|
print("SURF not available")
|
|
|
|
# if SURF not available, ORB is default
|
|
|
|
FEATURES_FIND_CHOICES['orb'] = cv.ORB.create
|
|
|
|
try:
|
|
|
|
FEATURES_FIND_CHOICES['sift'] = cv.xfeatures2d_SIFT.create
|
|
|
|
except AttributeError:
|
|
|
|
print("SIFT not available")
|
|
|
|
try:
|
|
|
|
FEATURES_FIND_CHOICES['brisk'] = cv.BRISK_create
|
|
|
|
except AttributeError:
|
|
|
|
print("BRISK not available")
|
|
|
|
try:
|
|
|
|
FEATURES_FIND_CHOICES['akaze'] = cv.AKAZE_create
|
|
|
|
except AttributeError:
|
|
|
|
print("AKAZE not available")
|
|
|
|
|
|
|
|
SEAM_FIND_CHOICES = OrderedDict()
|
|
|
|
SEAM_FIND_CHOICES['gc_color'] = cv.detail_GraphCutSeamFinder('COST_COLOR')
|
|
|
|
SEAM_FIND_CHOICES['gc_colorgrad'] = cv.detail_GraphCutSeamFinder('COST_COLOR_GRAD')
|
|
|
|
SEAM_FIND_CHOICES['dp_color'] = cv.detail_DpSeamFinder('COLOR')
|
|
|
|
SEAM_FIND_CHOICES['dp_colorgrad'] = cv.detail_DpSeamFinder('COLOR_GRAD')
|
|
|
|
SEAM_FIND_CHOICES['voronoi'] = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM)
|
|
|
|
SEAM_FIND_CHOICES['no'] = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)
|
2018-12-19 02:49:16 +08:00
|
|
|
|
2019-12-29 18:50:02 +08:00
|
|
|
ESTIMATOR_CHOICES = OrderedDict()
|
|
|
|
ESTIMATOR_CHOICES['homography'] = cv.detail_HomographyBasedEstimator
|
|
|
|
ESTIMATOR_CHOICES['affine'] = cv.detail_AffineBasedEstimator
|
|
|
|
|
|
|
|
WARP_CHOICES = (
|
|
|
|
'spherical',
|
|
|
|
'plane',
|
|
|
|
'affine',
|
|
|
|
'cylindrical',
|
|
|
|
'fisheye',
|
|
|
|
'stereographic',
|
|
|
|
'compressedPlaneA2B1',
|
|
|
|
'compressedPlaneA1.5B1',
|
|
|
|
'compressedPlanePortraitA2B1',
|
|
|
|
'compressedPlanePortraitA1.5B1',
|
|
|
|
'paniniA2B1',
|
|
|
|
'paniniA1.5B1',
|
|
|
|
'paniniPortraitA2B1',
|
|
|
|
'paniniPortraitA1.5B1',
|
|
|
|
'mercator',
|
|
|
|
'transverseMercator',
|
|
|
|
)
|
|
|
|
|
|
|
|
WAVE_CORRECT_CHOICES = ('horiz', 'no', 'vert',)
|
|
|
|
|
|
|
|
BLEND_CHOICES = ('multiband', 'feather', 'no',)
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
prog="stitching_detailed.py", description="Rotation model images stitcher"
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'img_names', nargs='+',
|
|
|
|
help="Files to stitch", type=str
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--try_cuda',
|
|
|
|
action='store',
|
|
|
|
default=False,
|
|
|
|
help="Try to use CUDA. The default value is no. All default values are for CPU mode.",
|
|
|
|
type=bool, dest='try_cuda'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--work_megapix', action='store', default=0.6,
|
|
|
|
help="Resolution for image registration step. The default is 0.6 Mpx",
|
|
|
|
type=float, dest='work_megapix'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--features', action='store', default=list(FEATURES_FIND_CHOICES.keys())[0],
|
2020-06-06 01:35:26 +08:00
|
|
|
help="Type of features used for images matching. The default is '%s'." % list(FEATURES_FIND_CHOICES.keys())[0],
|
2019-12-29 18:50:02 +08:00
|
|
|
choices=FEATURES_FIND_CHOICES.keys(),
|
|
|
|
type=str, dest='features'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--matcher', action='store', default='homography',
|
2020-06-06 01:35:26 +08:00
|
|
|
help="Matcher used for pairwise image matching. The default is 'homography'.",
|
2019-12-29 18:50:02 +08:00
|
|
|
choices=('homography', 'affine'),
|
|
|
|
type=str, dest='matcher'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--estimator', action='store', default=list(ESTIMATOR_CHOICES.keys())[0],
|
2020-06-06 01:35:26 +08:00
|
|
|
help="Type of estimator used for transformation estimation. The default is '%s'." % list(ESTIMATOR_CHOICES.keys())[0],
|
2019-12-29 18:50:02 +08:00
|
|
|
choices=ESTIMATOR_CHOICES.keys(),
|
|
|
|
type=str, dest='estimator'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--match_conf', action='store',
|
|
|
|
help="Confidence for feature matching step. The default is 0.3 for ORB and 0.65 for other feature types.",
|
|
|
|
type=float, dest='match_conf'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--conf_thresh', action='store', default=1.0,
|
|
|
|
help="Threshold for two images are from the same panorama confidence.The default is 1.0.",
|
|
|
|
type=float, dest='conf_thresh'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--ba', action='store', default=list(BA_COST_CHOICES.keys())[0],
|
|
|
|
help="Bundle adjustment cost function. The default is '%s'." % list(BA_COST_CHOICES.keys())[0],
|
|
|
|
choices=BA_COST_CHOICES.keys(),
|
|
|
|
type=str, dest='ba'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--ba_refine_mask', action='store', default='xxxxx',
|
|
|
|
help="Set refinement mask for bundle adjustment. It looks like 'x_xxx', "
|
|
|
|
"where 'x' means refine respective parameter and '_' means don't refine, "
|
|
|
|
"and has the following format:<fx><skew><ppx><aspect><ppy>. "
|
|
|
|
"The default mask is 'xxxxx'. "
|
|
|
|
"If bundle adjustment doesn't support estimation of selected parameter then "
|
|
|
|
"the respective flag is ignored.",
|
|
|
|
type=str, dest='ba_refine_mask'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--wave_correct', action='store', default=WAVE_CORRECT_CHOICES[0],
|
|
|
|
help="Perform wave effect correction. The default is '%s'" % WAVE_CORRECT_CHOICES[0],
|
|
|
|
choices=WAVE_CORRECT_CHOICES,
|
|
|
|
type=str, dest='wave_correct'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--save_graph', action='store', default=None,
|
|
|
|
help="Save matches graph represented in DOT language to <file_name> file.",
|
|
|
|
type=str, dest='save_graph'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--warp', action='store', default=WARP_CHOICES[0],
|
|
|
|
help="Warp surface type. The default is '%s'." % WARP_CHOICES[0],
|
|
|
|
choices=WARP_CHOICES,
|
|
|
|
type=str, dest='warp'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--seam_megapix', action='store', default=0.1,
|
|
|
|
help="Resolution for seam estimation step. The default is 0.1 Mpx.",
|
|
|
|
type=float, dest='seam_megapix'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--seam', action='store', default=list(SEAM_FIND_CHOICES.keys())[0],
|
|
|
|
help="Seam estimation method. The default is '%s'." % list(SEAM_FIND_CHOICES.keys())[0],
|
|
|
|
choices=SEAM_FIND_CHOICES.keys(),
|
|
|
|
type=str, dest='seam'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--compose_megapix', action='store', default=-1,
|
|
|
|
help="Resolution for compositing step. Use -1 for original resolution. The default is -1",
|
|
|
|
type=float, dest='compose_megapix'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--expos_comp', action='store', default=list(EXPOS_COMP_CHOICES.keys())[0],
|
|
|
|
help="Exposure compensation method. The default is '%s'." % list(EXPOS_COMP_CHOICES.keys())[0],
|
|
|
|
choices=EXPOS_COMP_CHOICES.keys(),
|
|
|
|
type=str, dest='expos_comp'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--expos_comp_nr_feeds', action='store', default=1,
|
|
|
|
help="Number of exposure compensation feed.",
|
|
|
|
type=np.int32, dest='expos_comp_nr_feeds'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--expos_comp_nr_filtering', action='store', default=2,
|
|
|
|
help="Number of filtering iterations of the exposure compensation gains.",
|
|
|
|
type=float, dest='expos_comp_nr_filtering'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--expos_comp_block_size', action='store', default=32,
|
|
|
|
help="BLock size in pixels used by the exposure compensator. The default is 32.",
|
|
|
|
type=np.int32, dest='expos_comp_block_size'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--blend', action='store', default=BLEND_CHOICES[0],
|
|
|
|
help="Blending method. The default is '%s'." % BLEND_CHOICES[0],
|
|
|
|
choices=BLEND_CHOICES,
|
|
|
|
type=str, dest='blend'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--blend_strength', action='store', default=5,
|
|
|
|
help="Blending strength from [0,100] range. The default is 5",
|
|
|
|
type=np.int32, dest='blend_strength'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--output', action='store', default='result.jpg',
|
|
|
|
help="The default is 'result.jpg'",
|
|
|
|
type=str, dest='output'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--timelapse', action='store', default=None,
|
|
|
|
help="Output warped images separately as frames of a time lapse movie, "
|
|
|
|
"with 'fixed_' prepended to input file names.",
|
|
|
|
type=str, dest='timelapse'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--rangewidth', action='store', default=-1,
|
|
|
|
help="uses range_width to limit number of images to match with.",
|
|
|
|
type=int, dest='rangewidth'
|
|
|
|
)
|
2019-03-21 23:34:55 +08:00
|
|
|
|
|
|
|
__doc__ += '\n' + parser.format_help()
|
|
|
|
|
2019-12-29 18:50:02 +08:00
|
|
|
|
|
|
|
def get_matcher(args):
|
|
|
|
try_cuda = args.try_cuda
|
|
|
|
matcher_type = args.matcher
|
|
|
|
if args.match_conf is None:
|
|
|
|
if args.features == 'orb':
|
|
|
|
match_conf = 0.3
|
|
|
|
else:
|
|
|
|
match_conf = 0.65
|
|
|
|
else:
|
|
|
|
match_conf = args.match_conf
|
|
|
|
range_width = args.rangewidth
|
|
|
|
if matcher_type == "affine":
|
|
|
|
matcher = cv.detail_AffineBestOf2NearestMatcher(False, try_cuda, match_conf)
|
|
|
|
elif range_width == -1:
|
|
|
|
matcher = cv.detail.BestOf2NearestMatcher_create(try_cuda, match_conf)
|
|
|
|
else:
|
|
|
|
matcher = cv.detail.BestOf2NearestRangeMatcher_create(range_width, try_cuda, match_conf)
|
|
|
|
return matcher
|
|
|
|
|
|
|
|
|
|
|
|
def get_compensator(args):
|
|
|
|
expos_comp_type = EXPOS_COMP_CHOICES[args.expos_comp]
|
|
|
|
expos_comp_nr_feeds = args.expos_comp_nr_feeds
|
|
|
|
expos_comp_block_size = args.expos_comp_block_size
|
|
|
|
# expos_comp_nr_filtering = args.expos_comp_nr_filtering
|
|
|
|
if expos_comp_type == cv.detail.ExposureCompensator_CHANNELS:
|
|
|
|
compensator = cv.detail_ChannelsCompensator(expos_comp_nr_feeds)
|
|
|
|
# compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering)
|
|
|
|
elif expos_comp_type == cv.detail.ExposureCompensator_CHANNELS_BLOCKS:
|
|
|
|
compensator = cv.detail_BlocksChannelsCompensator(
|
|
|
|
expos_comp_block_size, expos_comp_block_size,
|
|
|
|
expos_comp_nr_feeds
|
|
|
|
)
|
|
|
|
# compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering)
|
|
|
|
else:
|
|
|
|
compensator = cv.detail.ExposureCompensator_createDefault(expos_comp_type)
|
|
|
|
return compensator
|
|
|
|
|
|
|
|
|
2019-03-21 23:34:55 +08:00
|
|
|
def main():
|
2019-03-14 05:28:02 +08:00
|
|
|
args = parser.parse_args()
|
2019-12-29 18:50:02 +08:00
|
|
|
img_names = args.img_names
|
2019-03-14 05:28:02 +08:00
|
|
|
print(img_names)
|
|
|
|
work_megapix = args.work_megapix
|
|
|
|
seam_megapix = args.seam_megapix
|
|
|
|
compose_megapix = args.compose_megapix
|
|
|
|
conf_thresh = args.conf_thresh
|
|
|
|
ba_refine_mask = args.ba_refine_mask
|
|
|
|
wave_correct = args.wave_correct
|
2019-12-29 18:50:02 +08:00
|
|
|
if wave_correct == 'no':
|
|
|
|
do_wave_correct = False
|
2018-12-19 02:49:16 +08:00
|
|
|
else:
|
2019-12-29 18:50:02 +08:00
|
|
|
do_wave_correct = True
|
2019-03-14 05:28:02 +08:00
|
|
|
if args.save_graph is None:
|
|
|
|
save_graph = False
|
|
|
|
else:
|
2019-12-29 18:50:02 +08:00
|
|
|
save_graph = True
|
2019-03-14 05:28:02 +08:00
|
|
|
warp_type = args.warp
|
|
|
|
blend_type = args.blend
|
|
|
|
blend_strength = args.blend_strength
|
|
|
|
result_name = args.output
|
|
|
|
if args.timelapse is not None:
|
|
|
|
timelapse = True
|
2019-12-29 18:50:02 +08:00
|
|
|
if args.timelapse == "as_is":
|
2019-03-14 05:28:02 +08:00
|
|
|
timelapse_type = cv.detail.Timelapser_AS_IS
|
2019-12-29 18:50:02 +08:00
|
|
|
elif args.timelapse == "crop":
|
2019-03-14 05:28:02 +08:00
|
|
|
timelapse_type = cv.detail.Timelapser_CROP
|
|
|
|
else:
|
|
|
|
print("Bad timelapse method")
|
|
|
|
exit()
|
|
|
|
else:
|
2019-12-29 18:50:02 +08:00
|
|
|
timelapse = False
|
|
|
|
finder = FEATURES_FIND_CHOICES[args.features]()
|
2019-03-14 05:28:02 +08:00
|
|
|
seam_work_aspect = 1
|
2019-12-29 18:50:02 +08:00
|
|
|
full_img_sizes = []
|
|
|
|
features = []
|
|
|
|
images = []
|
2019-03-14 05:28:02 +08:00
|
|
|
is_work_scale_set = False
|
|
|
|
is_seam_scale_set = False
|
2019-11-01 23:59:35 +08:00
|
|
|
is_compose_scale_set = False
|
2019-03-14 05:28:02 +08:00
|
|
|
for name in img_names:
|
2019-03-21 23:34:55 +08:00
|
|
|
full_img = cv.imread(cv.samples.findFile(name))
|
2019-03-14 05:28:02 +08:00
|
|
|
if full_img is None:
|
2019-03-21 23:34:55 +08:00
|
|
|
print("Cannot read image ", name)
|
2019-03-14 05:28:02 +08:00
|
|
|
exit()
|
2019-12-29 18:50:02 +08:00
|
|
|
full_img_sizes.append((full_img.shape[1], full_img.shape[0]))
|
2019-03-14 05:28:02 +08:00
|
|
|
if work_megapix < 0:
|
|
|
|
img = full_img
|
|
|
|
work_scale = 1
|
2018-12-19 02:49:16 +08:00
|
|
|
is_work_scale_set = True
|
2019-03-14 05:28:02 +08:00
|
|
|
else:
|
|
|
|
if is_work_scale_set is False:
|
2019-12-29 18:50:02 +08:00
|
|
|
work_scale = min(1.0, np.sqrt(work_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1])))
|
2019-03-14 05:28:02 +08:00
|
|
|
is_work_scale_set = True
|
|
|
|
img = cv.resize(src=full_img, dsize=None, fx=work_scale, fy=work_scale, interpolation=cv.INTER_LINEAR_EXACT)
|
|
|
|
if is_seam_scale_set is False:
|
2019-12-29 18:50:02 +08:00
|
|
|
seam_scale = min(1.0, np.sqrt(seam_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1])))
|
2019-03-14 05:28:02 +08:00
|
|
|
seam_work_aspect = seam_scale / work_scale
|
|
|
|
is_seam_scale_set = True
|
2019-12-29 18:50:02 +08:00
|
|
|
img_feat = cv.detail.computeImageFeatures2(finder, img)
|
|
|
|
features.append(img_feat)
|
2019-03-14 05:28:02 +08:00
|
|
|
img = cv.resize(src=full_img, dsize=None, fx=seam_scale, fy=seam_scale, interpolation=cv.INTER_LINEAR_EXACT)
|
|
|
|
images.append(img)
|
2019-12-29 18:50:02 +08:00
|
|
|
|
|
|
|
matcher = get_matcher(args)
|
|
|
|
p = matcher.apply2(features)
|
2019-03-14 05:28:02 +08:00
|
|
|
matcher.collectGarbage()
|
2019-12-29 18:50:02 +08:00
|
|
|
|
2019-03-14 05:28:02 +08:00
|
|
|
if save_graph:
|
2019-12-29 18:50:02 +08:00
|
|
|
with open(args.save_graph, 'w') as fh:
|
|
|
|
fh.write(cv.detail.matchesGraphAsString(img_names, p, conf_thresh))
|
|
|
|
|
|
|
|
indices = cv.detail.leaveBiggestComponent(features, p, 0.3)
|
|
|
|
img_subset = []
|
|
|
|
img_names_subset = []
|
|
|
|
full_img_sizes_subset = []
|
2019-03-14 05:28:02 +08:00
|
|
|
for i in range(len(indices)):
|
2019-12-29 18:50:02 +08:00
|
|
|
img_names_subset.append(img_names[indices[i, 0]])
|
|
|
|
img_subset.append(images[indices[i, 0]])
|
|
|
|
full_img_sizes_subset.append(full_img_sizes[indices[i, 0]])
|
2019-11-01 23:59:35 +08:00
|
|
|
images = img_subset
|
|
|
|
img_names = img_names_subset
|
|
|
|
full_img_sizes = full_img_sizes_subset
|
2019-03-14 05:28:02 +08:00
|
|
|
num_images = len(img_names)
|
|
|
|
if num_images < 2:
|
|
|
|
print("Need more images")
|
|
|
|
exit()
|
2018-12-19 02:49:16 +08:00
|
|
|
|
2019-12-29 18:50:02 +08:00
|
|
|
estimator = ESTIMATOR_CHOICES[args.estimator]()
|
|
|
|
b, cameras = estimator.apply(features, p, None)
|
2019-03-14 05:28:02 +08:00
|
|
|
if not b:
|
|
|
|
print("Homography estimation failed.")
|
|
|
|
exit()
|
|
|
|
for cam in cameras:
|
2019-12-29 18:50:02 +08:00
|
|
|
cam.R = cam.R.astype(np.float32)
|
|
|
|
|
|
|
|
adjuster = BA_COST_CHOICES[args.ba]()
|
2019-03-14 05:28:02 +08:00
|
|
|
adjuster.setConfThresh(1)
|
2019-12-29 18:50:02 +08:00
|
|
|
refine_mask = np.zeros((3, 3), np.uint8)
|
2019-03-14 05:28:02 +08:00
|
|
|
if ba_refine_mask[0] == 'x':
|
2019-12-29 18:50:02 +08:00
|
|
|
refine_mask[0, 0] = 1
|
2019-03-14 05:28:02 +08:00
|
|
|
if ba_refine_mask[1] == 'x':
|
2019-12-29 18:50:02 +08:00
|
|
|
refine_mask[0, 1] = 1
|
2019-03-14 05:28:02 +08:00
|
|
|
if ba_refine_mask[2] == 'x':
|
2019-12-29 18:50:02 +08:00
|
|
|
refine_mask[0, 2] = 1
|
2019-03-14 05:28:02 +08:00
|
|
|
if ba_refine_mask[3] == 'x':
|
2019-12-29 18:50:02 +08:00
|
|
|
refine_mask[1, 1] = 1
|
2019-03-14 05:28:02 +08:00
|
|
|
if ba_refine_mask[4] == 'x':
|
2019-12-29 18:50:02 +08:00
|
|
|
refine_mask[1, 2] = 1
|
2019-03-14 05:28:02 +08:00
|
|
|
adjuster.setRefinementMask(refine_mask)
|
2019-12-29 18:50:02 +08:00
|
|
|
b, cameras = adjuster.apply(features, p, cameras)
|
2019-03-14 05:28:02 +08:00
|
|
|
if not b:
|
|
|
|
print("Camera parameters adjusting failed.")
|
|
|
|
exit()
|
2019-12-29 18:50:02 +08:00
|
|
|
focals = []
|
2018-12-19 02:49:16 +08:00
|
|
|
for cam in cameras:
|
2019-03-14 05:28:02 +08:00
|
|
|
focals.append(cam.focal)
|
2020-11-13 11:30:54 +08:00
|
|
|
focals.sort()
|
2019-12-29 18:50:02 +08:00
|
|
|
if len(focals) % 2 == 1:
|
2019-03-14 05:28:02 +08:00
|
|
|
warped_image_scale = focals[len(focals) // 2]
|
|
|
|
else:
|
2019-12-29 18:50:02 +08:00
|
|
|
warped_image_scale = (focals[len(focals) // 2] + focals[len(focals) // 2 - 1]) / 2
|
2019-03-14 05:28:02 +08:00
|
|
|
if do_wave_correct:
|
2019-12-29 18:50:02 +08:00
|
|
|
rmats = []
|
2019-03-14 05:28:02 +08:00
|
|
|
for cam in cameras:
|
|
|
|
rmats.append(np.copy(cam.R))
|
2019-12-29 18:50:02 +08:00
|
|
|
rmats = cv.detail.waveCorrect(rmats, cv.detail.WAVE_CORRECT_HORIZ)
|
|
|
|
for idx, cam in enumerate(cameras):
|
2019-03-14 05:28:02 +08:00
|
|
|
cam.R = rmats[idx]
|
2019-12-29 18:50:02 +08:00
|
|
|
corners = []
|
|
|
|
masks_warped = []
|
|
|
|
images_warped = []
|
|
|
|
sizes = []
|
|
|
|
masks = []
|
|
|
|
for i in range(0, num_images):
|
|
|
|
um = cv.UMat(255 * np.ones((images[i].shape[0], images[i].shape[1]), np.uint8))
|
2019-03-14 05:28:02 +08:00
|
|
|
masks.append(um)
|
2018-12-19 02:49:16 +08:00
|
|
|
|
2019-12-29 18:50:02 +08:00
|
|
|
warper = cv.PyRotationWarper(warp_type, warped_image_scale * seam_work_aspect) # warper could be nullptr?
|
|
|
|
for idx in range(0, num_images):
|
2019-03-14 05:28:02 +08:00
|
|
|
K = cameras[idx].K().astype(np.float32)
|
|
|
|
swa = seam_work_aspect
|
2019-12-29 18:50:02 +08:00
|
|
|
K[0, 0] *= swa
|
|
|
|
K[0, 2] *= swa
|
|
|
|
K[1, 1] *= swa
|
|
|
|
K[1, 2] *= swa
|
|
|
|
corner, image_wp = warper.warp(images[idx], K, cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT)
|
2019-03-14 05:28:02 +08:00
|
|
|
corners.append(corner)
|
2019-12-29 18:50:02 +08:00
|
|
|
sizes.append((image_wp.shape[1], image_wp.shape[0]))
|
2019-03-14 05:28:02 +08:00
|
|
|
images_warped.append(image_wp)
|
2019-12-29 18:50:02 +08:00
|
|
|
p, mask_wp = warper.warp(masks[idx], K, cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT)
|
2019-03-14 05:28:02 +08:00
|
|
|
masks_warped.append(mask_wp.get())
|
2019-12-29 18:50:02 +08:00
|
|
|
|
|
|
|
images_warped_f = []
|
2019-03-14 05:28:02 +08:00
|
|
|
for img in images_warped:
|
2019-12-29 18:50:02 +08:00
|
|
|
imgf = img.astype(np.float32)
|
2019-03-14 05:28:02 +08:00
|
|
|
images_warped_f.append(imgf)
|
2019-12-29 18:50:02 +08:00
|
|
|
|
|
|
|
compensator = get_compensator(args)
|
2019-03-14 05:28:02 +08:00
|
|
|
compensator.feed(corners=corners, images=images_warped, masks=masks_warped)
|
2019-12-29 18:50:02 +08:00
|
|
|
|
|
|
|
seam_finder = SEAM_FIND_CHOICES[args.seam]
|
|
|
|
seam_finder.find(images_warped_f, corners, masks_warped)
|
|
|
|
compose_scale = 1
|
|
|
|
corners = []
|
|
|
|
sizes = []
|
|
|
|
blender = None
|
|
|
|
timelapser = None
|
|
|
|
# https://github.com/opencv/opencv/blob/master/samples/cpp/stitching_detailed.cpp#L725 ?
|
|
|
|
for idx, name in enumerate(img_names):
|
|
|
|
full_img = cv.imread(name)
|
2019-03-14 05:28:02 +08:00
|
|
|
if not is_compose_scale_set:
|
|
|
|
if compose_megapix > 0:
|
2019-12-29 18:50:02 +08:00
|
|
|
compose_scale = min(1.0, np.sqrt(compose_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1])))
|
2019-11-01 23:59:35 +08:00
|
|
|
is_compose_scale_set = True
|
|
|
|
compose_work_aspect = compose_scale / work_scale
|
2019-03-14 05:28:02 +08:00
|
|
|
warped_image_scale *= compose_work_aspect
|
2019-12-29 18:50:02 +08:00
|
|
|
warper = cv.PyRotationWarper(warp_type, warped_image_scale)
|
|
|
|
for i in range(0, len(img_names)):
|
2019-03-14 05:28:02 +08:00
|
|
|
cameras[i].focal *= compose_work_aspect
|
|
|
|
cameras[i].ppx *= compose_work_aspect
|
|
|
|
cameras[i].ppy *= compose_work_aspect
|
2019-12-29 18:50:02 +08:00
|
|
|
sz = (full_img_sizes[i][0] * compose_scale, full_img_sizes[i][1] * compose_scale)
|
2019-03-14 05:28:02 +08:00
|
|
|
K = cameras[i].K().astype(np.float32)
|
2019-11-01 23:59:35 +08:00
|
|
|
roi = warper.warpRoi(sz, K, cameras[i].R)
|
2019-03-14 05:28:02 +08:00
|
|
|
corners.append(roi[0:2])
|
|
|
|
sizes.append(roi[2:4])
|
|
|
|
if abs(compose_scale - 1) > 1e-1:
|
2019-12-29 18:50:02 +08:00
|
|
|
img = cv.resize(src=full_img, dsize=None, fx=compose_scale, fy=compose_scale,
|
|
|
|
interpolation=cv.INTER_LINEAR_EXACT)
|
2019-03-14 05:28:02 +08:00
|
|
|
else:
|
2019-11-01 23:59:35 +08:00
|
|
|
img = full_img
|
2019-12-29 18:50:02 +08:00
|
|
|
_img_size = (img.shape[1], img.shape[0])
|
|
|
|
K = cameras[idx].K().astype(np.float32)
|
|
|
|
corner, image_warped = warper.warp(img, K, cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT)
|
|
|
|
mask = 255 * np.ones((img.shape[0], img.shape[1]), np.uint8)
|
|
|
|
p, mask_warped = warper.warp(mask, K, cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT)
|
|
|
|
compensator.apply(idx, corners[idx], image_warped, mask_warped)
|
2019-03-14 05:28:02 +08:00
|
|
|
image_warped_s = image_warped.astype(np.int16)
|
2019-12-29 18:50:02 +08:00
|
|
|
dilated_mask = cv.dilate(masks_warped[idx], None)
|
|
|
|
seam_mask = cv.resize(dilated_mask, (mask_warped.shape[1], mask_warped.shape[0]), 0, 0, cv.INTER_LINEAR_EXACT)
|
|
|
|
mask_warped = cv.bitwise_and(seam_mask, mask_warped)
|
|
|
|
if blender is None and not timelapse:
|
2018-12-19 02:49:16 +08:00
|
|
|
blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
|
2019-12-29 18:50:02 +08:00
|
|
|
dst_sz = cv.detail.resultRoi(corners=corners, sizes=sizes)
|
|
|
|
blend_width = np.sqrt(dst_sz[2] * dst_sz[3]) * blend_strength / 100
|
2019-03-14 05:28:02 +08:00
|
|
|
if blend_width < 1:
|
|
|
|
blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
|
|
|
|
elif blend_type == "multiband":
|
|
|
|
blender = cv.detail_MultiBandBlender()
|
2019-12-29 18:50:02 +08:00
|
|
|
blender.setNumBands((np.log(blend_width) / np.log(2.) - 1.).astype(np.int))
|
2019-03-14 05:28:02 +08:00
|
|
|
elif blend_type == "feather":
|
|
|
|
blender = cv.detail_FeatherBlender()
|
2019-12-29 18:50:02 +08:00
|
|
|
blender.setSharpness(1. / blend_width)
|
2019-03-14 05:28:02 +08:00
|
|
|
blender.prepare(dst_sz)
|
2019-12-29 18:50:02 +08:00
|
|
|
elif timelapser is None and timelapse:
|
2019-03-14 05:28:02 +08:00
|
|
|
timelapser = cv.detail.Timelapser_createDefault(timelapse_type)
|
|
|
|
timelapser.initialize(corners, sizes)
|
|
|
|
if timelapse:
|
2019-12-29 18:50:02 +08:00
|
|
|
ma_tones = np.ones((image_warped_s.shape[0], image_warped_s.shape[1]), np.uint8)
|
|
|
|
timelapser.process(image_warped_s, ma_tones, corners[idx])
|
2019-11-01 23:59:35 +08:00
|
|
|
pos_s = img_names[idx].rfind("/")
|
2019-03-14 05:28:02 +08:00
|
|
|
if pos_s == -1:
|
2019-12-29 18:50:02 +08:00
|
|
|
fixed_file_name = "fixed_" + img_names[idx]
|
2019-03-14 05:28:02 +08:00
|
|
|
else:
|
2019-12-29 18:50:02 +08:00
|
|
|
fixed_file_name = img_names[idx][:pos_s + 1] + "fixed_" + img_names[idx][pos_s + 1:]
|
|
|
|
cv.imwrite(fixed_file_name, timelapser.getDst())
|
2018-12-19 02:49:16 +08:00
|
|
|
else:
|
2019-03-14 05:28:02 +08:00
|
|
|
blender.feed(cv.UMat(image_warped_s), mask_warped, corners[idx])
|
|
|
|
if not timelapse:
|
2019-12-29 18:50:02 +08:00
|
|
|
result = None
|
|
|
|
result_mask = None
|
|
|
|
result, result_mask = blender.blend(result, result_mask)
|
|
|
|
cv.imwrite(result_name, result)
|
|
|
|
zoom_x = 600.0 / result.shape[1]
|
|
|
|
dst = cv.normalize(src=result, dst=None, alpha=255., norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
|
|
|
|
dst = cv.resize(dst, dsize=None, fx=zoom_x, fy=zoom_x)
|
|
|
|
cv.imshow(result_name, dst)
|
2019-03-14 05:28:02 +08:00
|
|
|
cv.waitKey()
|
2019-03-21 23:34:55 +08:00
|
|
|
|
2019-12-29 18:50:02 +08:00
|
|
|
print("Done")
|
2019-03-21 23:34:55 +08:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
print(__doc__)
|
|
|
|
main()
|
|
|
|
cv.destroyAllWindows()
|