Revert "Merge branch 'master' of github.com:3b1b/manim into alt-calc"

This reverts commit 17a1ea6db500eddfdec75cc727b70737d892e961, reversing
changes made to c8c6e6d9ba77d42c8d50d8187ca2b0427da0079c.
This commit is contained in:
Grant Sanderson
2018-05-21 12:11:46 -07:00
parent 0f3b54694d
commit 13d7228918
22 changed files with 468 additions and 328 deletions

View File

@ -1,5 +1,6 @@
import itertools as it
import numpy as np
import operator as op
import aggdraw
import copy
@ -7,9 +8,10 @@ import time
from PIL import Image
from colour import Color
from scipy.spatial.distance import pdist
from constants import *
from mobject.types.image_mobject import ImageMobject
from mobject.types.image_mobject import AbstractImageMobject
from mobject.mobject import Mobject
from mobject.types.point_cloud_mobject import PMobject
from mobject.types.vectorized_mobject import VMobject
@ -21,22 +23,26 @@ from utils.iterables import batch_by_property
from utils.iterables import list_difference_update
from utils.iterables import remove_list_redundancies
from utils.simple_functions import fdiv
from utils.space_ops import angle_of_vector
class Camera(object):
CONFIG = {
"background_image": None,
"pixel_shape": (DEFAULT_PIXEL_HEIGHT, DEFAULT_PIXEL_WIDTH),
# Note: frame_shape will be resized to match pixel_shape
"frame_shape": (FRAME_HEIGHT, FRAME_WIDTH),
"space_center": ORIGIN,
"pixel_height": DEFAULT_PIXEL_HEIGHT,
"pixel_width": DEFAULT_PIXEL_WIDTH,
# Note: frame height and width will be resized to match
# the pixel aspect ratio
"frame_height": FRAME_HEIGHT,
"frame_width": FRAME_WIDTH,
"frame_center": ORIGIN,
"background_color": BLACK,
"background_opacity": 0,
# Points in vectorized mobjects with norm greater
# than this value will be rescaled.
"max_allowable_norm": FRAME_WIDTH,
"image_mode": "RGBA",
"n_rgb_coords": 4,
"background_alpha": 0, # Out of rgb_max_val
"pixel_array_dtype": 'uint8',
"use_z_coordinate_for_display_order": False,
# z_buff_func is only used if the flag above is set to True.
@ -58,36 +64,72 @@ class Camera(object):
self.canvas = None
return copy.copy(self)
def reset_pixel_shape(self, new_height, new_width):
self.pixel_width = new_width
self.pixel_height = new_height
self.init_background()
self.resize_frame_shape()
self.reset()
def get_pixel_height(self):
return self.pixel_height
def get_pixel_width(self):
return self.pixel_width
def get_frame_height(self):
return self.frame_height
def get_frame_width(self):
return self.frame_width
def get_frame_center(self):
return self.frame_center
def set_frame_height(self, frame_height):
self.frame_height = frame_height
def set_frame_width(self, frame_width):
self.frame_width = frame_width
def set_frame_center(self, frame_center):
self.frame_center = frame_center
def resize_frame_shape(self, fixed_dimension=0):
"""
Changes frame_shape to match the aspect ratio
of pixel_shape, where fixed_dimension determines
whether frame_shape[0] (height) or frame_shape[1] (width)
of the pixels, where fixed_dimension determines
whether frame_height or frame_width
remains fixed while the other changes accordingly.
"""
aspect_ratio = float(self.pixel_shape[1]) / self.pixel_shape[0]
frame_width, frame_height = self.frame_shape
pixel_height = self.get_pixel_height()
pixel_width = self.get_pixel_width()
frame_height = self.get_frame_height()
frame_width = self.get_frame_width()
aspect_ratio = fdiv(pixel_width, pixel_height)
if fixed_dimension == 0:
frame_height = aspect_ratio * frame_width
frame_height = frame_width / aspect_ratio
else:
frame_width = frame_height / aspect_ratio
self.frame_shape = (frame_width, frame_height)
frame_width = aspect_ratio * frame_height
self.set_frame_height(frame_height)
self.set_frame_width(frame_width)
def init_background(self):
height = self.get_pixel_height()
width = self.get_pixel_width()
if self.background_image is not None:
path = get_full_raster_image_path(self.background_image)
image = Image.open(path).convert(self.image_mode)
height, width = self.pixel_shape
# TODO, how to gracefully handle backgrounds
# with different sizes?
self.background = np.array(image)[:height, :width]
self.background = self.background.astype(self.pixel_array_dtype)
else:
background_rgba = color_to_int_rgba(
self.background_color, alpha=self.background_alpha
self.background_color, self.background_opacity
)
self.background = np.zeros(
list(self.pixel_shape) + [self.n_rgb_coords],
(height, width, self.n_rgb_coords),
dtype=self.pixel_array_dtype
)
self.background[:, :] = background_rgba
@ -105,10 +147,10 @@ class Camera(object):
retval = np.array(pixel_array)
if convert_from_floats:
retval = np.apply_along_axis(
lambda f: (
f * self.rgb_max_val).astype(self.pixel_array_dtype),
lambda f: (f * self.rgb_max_val).astype(self.pixel_array_dtype),
2,
retval)
retval
)
return retval
def set_pixel_array(self, pixel_array, convert_from_floats=False):
@ -148,6 +190,7 @@ class Camera(object):
def reset(self):
self.set_pixel_array(self.background)
return self
####
@ -201,7 +244,7 @@ class Camera(object):
type_func_pairs = [
(VMobject, self.display_multiple_vectorized_mobjects),
(PMobject, self.display_multiple_point_cloud_mobjects),
(ImageMobject, self.display_multiple_image_mobjects),
(AbstractImageMobject, self.display_multiple_image_mobjects),
(Mobject, lambda batch: batch), # Do nothing
]
@ -301,8 +344,7 @@ class Camera(object):
# points = self.adjust_out_of_range_points(points)
if len(points) == 0:
continue
aligned_points = self.align_points_to_camera(points)
coords = self.points_to_pixel_coords(aligned_points)
coords = self.points_to_pixel_coords(points)
coord_strings = coords.flatten().astype(str)
# Start new path string with M
coord_strings[0] = "M" + coord_strings[0]
@ -342,7 +384,6 @@ class Camera(object):
def display_point_cloud(self, points, rgbas, thickness):
if len(points) == 0:
return
points = self.align_points_to_camera(points)
pixel_coords = self.points_to_pixel_coords(points)
pixel_coords = self.thickened_coordinates(
pixel_coords, thickness
@ -358,7 +399,8 @@ class Camera(object):
pixel_coords = pixel_coords[on_screen_indices]
rgbas = rgbas[on_screen_indices]
ph, pw = self.pixel_shape
ph = self.get_pixel_height()
pw = self.get_pixel_width()
flattener = np.array([1, pw], dtype='int')
flattener = flattener.reshape((2, 1))
@ -378,94 +420,56 @@ class Camera(object):
ul_coords, ur_coords, dl_coords = corner_coords
right_vect = ur_coords - ul_coords
down_vect = dl_coords - ul_coords
center_coords = ul_coords + (right_vect + down_vect) / 2
impa = image_mobject.pixel_array
oh, ow = self.pixel_array.shape[:2] # Outer width and height
ih, iw = impa.shape[:2] # inner with and height
rgb_len = self.pixel_array.shape[2]
image = np.zeros((oh, ow, rgb_len), dtype=self.pixel_array_dtype)
if right_vect[1] == 0 and down_vect[0] == 0:
rv0 = right_vect[0]
dv1 = down_vect[1]
x_indices = np.arange(rv0, dtype='int') * iw / rv0
y_indices = np.arange(dv1, dtype='int') * ih / dv1
stretched_impa = impa[y_indices][:, x_indices]
x0, x1 = ul_coords[0], ur_coords[0]
y0, y1 = ul_coords[1], dl_coords[1]
if x0 >= ow or x1 < 0 or y0 >= oh or y1 < 0:
return
siy0 = max(-y0, 0) # stretched_impa y0
siy1 = dv1 - max(y1 - oh, 0)
six0 = max(-x0, 0)
six1 = rv0 - max(x1 - ow, 0)
x0 = max(x0, 0)
y0 = max(y0, 0)
image[y0:y1, x0:x1] = stretched_impa[siy0:siy1, six0:six1]
else:
# Alternate (slower) tactic if image is tilted
# List of all coordinates of pixels, given as (x, y),
# which matches the return type of points_to_pixel_coords,
# even though np.array indexing naturally happens as (y, x)
all_pixel_coords = np.zeros((oh * ow, 2), dtype='int')
a = np.arange(oh * ow, dtype='int')
all_pixel_coords[:, 0] = a % ow
all_pixel_coords[:, 1] = a / ow
recentered_coords = all_pixel_coords - ul_coords
with np.errstate(divide='ignore'):
ix_coords, iy_coords = [
np.divide(
dim * np.dot(recentered_coords, vect),
np.dot(vect, vect),
)
for vect, dim in (right_vect, iw), (down_vect, ih)
]
to_change = reduce(op.and_, [
ix_coords >= 0, ix_coords < iw,
iy_coords >= 0, iy_coords < ih,
])
inner_flat_coords = iw * \
iy_coords[to_change] + ix_coords[to_change]
flat_impa = impa.reshape((iw * ih, rgb_len))
target_rgbas = flat_impa[inner_flat_coords, :]
image = image.reshape((ow * oh, rgb_len))
image[to_change] = target_rgbas
image = image.reshape((oh, ow, rgb_len))
self.overlay_rgba_array(image)
def overlay_rgba_array(self, arr):
fg = arr
bg = self.pixel_array
# rgba_max_val = self.rgb_max_val
src_rgb, src_a, dst_rgb, dst_a = [
a.astype(np.float32) / self.rgb_max_val
for a in fg[..., :3], fg[..., 3], bg[..., :3], bg[..., 3]
]
out_a = src_a + dst_a * (1.0 - src_a)
# When the output alpha is 0 for full transparency,
# we have a choice over what RGB value to use in our
# output representation. We choose 0 here.
out_rgb = fdiv(
src_rgb * src_a[..., None] +
dst_rgb * dst_a[..., None] * (1.0 - src_a[..., None]),
out_a[..., None],
zero_over_zero_value=0
sub_image = Image.fromarray(
image_mobject.get_pixel_array(),
mode="RGBA"
)
self.pixel_array[..., :3] = out_rgb * self.rgb_max_val
self.pixel_array[..., 3] = out_a * self.rgb_max_val
# Reshape
pixel_width = int(pdist([ul_coords, ur_coords]))
pixel_height = int(pdist([ul_coords, dl_coords]))
sub_image = sub_image.resize(
(pixel_width, pixel_height), resample=Image.BICUBIC
)
def align_points_to_camera(self, points):
# This is where projection should live
return points - self.space_center
# Rotate
angle = angle_of_vector(right_vect)
adjusted_angle = -int(360 * angle / TAU)
if adjusted_angle != 0:
sub_image = sub_image.rotate(
adjusted_angle, resample=Image.BICUBIC, expand=1
)
# TODO, there is no accounting for a shear...
# Paste into an image as large as the camear's pixel array
full_image = Image.fromarray(
np.zeros((self.get_pixel_height(), self.get_pixel_width())),
mode="RGBA"
)
new_ul_coords = center_coords - np.array(sub_image.size) / 2
full_image.paste(
sub_image,
box=(
new_ul_coords[0],
new_ul_coords[1],
new_ul_coords[0] + sub_image.size[0],
new_ul_coords[1] + sub_image.size[1],
)
)
# Paint on top of existing pixel array
self.overlay_PIL_image(full_image)
def overlay_rgba_array(self, arr):
self.overlay_PIL_image(Image.fromarray(arr, mode="RGBA"))
def overlay_PIL_image(self, image):
self.pixel_array = np.array(
Image.alpha_composite(self.get_image(), image)
)
def adjust_out_of_range_points(self, points):
if not np.any(points > self.max_allowable_norm):
@ -483,31 +487,43 @@ class Camera(object):
return points
def points_to_pixel_coords(self, points):
shifted_points = points - self.get_frame_center()
result = np.zeros((len(points), 2))
ph, pw = self.pixel_shape
sh, sw = self.frame_shape
width_mult = pw / sw
width_add = pw / 2
height_mult = ph / sh
height_add = ph / 2
pixel_height = self.get_pixel_height()
pixel_width = self.get_pixel_width()
frame_height = self.get_frame_height()
frame_width = self.get_frame_width()
width_mult = pixel_width / frame_width
width_add = pixel_width / 2
height_mult = pixel_height / frame_height
height_add = pixel_height / 2
# Flip on y-axis as you go
height_mult *= -1
result[:, 0] = points[:, 0] * width_mult + width_add
result[:, 1] = points[:, 1] * height_mult + height_add
result[:, 0] = shifted_points[:, 0] * width_mult + width_add
result[:, 1] = shifted_points[:, 1] * height_mult + height_add
return result.astype('int')
def on_screen_pixels(self, pixel_coords):
return reduce(op.and_, [
pixel_coords[:, 0] >= 0,
pixel_coords[:, 0] < self.pixel_shape[1],
pixel_coords[:, 0] < self.get_pixel_width(),
pixel_coords[:, 1] >= 0,
pixel_coords[:, 1] < self.pixel_shape[0],
pixel_coords[:, 1] < self.get_pixel_height(),
])
def adjusted_thickness(self, thickness):
big_shape = PRODUCTION_QUALITY_CAMERA_CONFIG["pixel_shape"]
factor = sum(big_shape) / sum(self.pixel_shape)
# TODO: This seems...unsystematic
big_sum = op.add(
PRODUCTION_QUALITY_CAMERA_CONFIG["pixel_height"],
PRODUCTION_QUALITY_CAMERA_CONFIG["pixel_width"],
)
this_sum = op.add(
self.get_pixel_height(),
self.get_pixel_width(),
)
factor = fdiv(big_sum, this_sum)
return 1 + (thickness - 1) / factor
def get_thickening_nudges(self, thickness):
@ -525,13 +541,20 @@ class Camera(object):
def get_coords_of_all_pixels(self):
# These are in x, y order, to help me keep things straight
full_space_dims = np.array(self.frame_shape)[::-1]
full_pixel_dims = np.array(self.pixel_shape)[::-1]
full_space_dims = np.array([
self.get_frame_width(),
self.get_frame_height()
])
full_pixel_dims = np.array([
self.get_pixel_width(),
self.get_pixel_height()
])
# These are addressed in the same y, x order as in pixel_array, but the values in them
# are listed in x, y order
uncentered_pixel_coords = np.indices(self.pixel_shape)[
::-1].transpose(1, 2, 0)
uncentered_pixel_coords = np.indices(
[self.get_pixel_height(), self.get_pixel_width()]
)[::-1].transpose(1, 2, 0)
uncentered_space_coords = fdiv(
uncentered_pixel_coords * full_space_dims,
full_pixel_dims)
@ -541,7 +564,8 @@ class Camera(object):
# overflow is unlikely to be a problem)
centered_space_coords = (
uncentered_space_coords - fdiv(full_space_dims, 2))
uncentered_space_coords - fdiv(full_space_dims, 2)
)
# Have to also flip the y coordinates to account for pixel array being listed in
# top-to-bottom order, opposite of screen coordinate convention