25 #ifdef USE_IMAGEMAGICK
68 wave_color =
Color((
unsigned char)0, (
unsigned char)123, (
unsigned char)255, (
unsigned char)255);
93 parentTrackedObject =
nullptr;
94 parentClipObject = NULL;
121 if (reader && reader->
info.
metadata.count(
"rotate") > 0) {
125 float rotate_metadata = strtof(reader->
info.
metadata[
"rotate"].c_str(), 0);
127 }
catch (
const std::exception& e) {}
135 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
142 Clip::Clip(
ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
161 Clip::Clip(std::string
path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
167 std::string ext = get_file_extension(
path);
168 std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
171 if (ext==
"avi" || ext==
"mov" || ext==
"mkv" || ext==
"mpg" || ext==
"mpeg" || ext==
"mp3" || ext==
"mp4" || ext==
"mts" ||
172 ext==
"ogg" || ext==
"wav" || ext==
"wmv" || ext==
"webm" || ext==
"vob" ||
path.find(
"%") != std::string::npos)
214 allocated_reader = reader;
224 if (allocated_reader) {
225 delete allocated_reader;
226 allocated_reader = NULL;
246 if (parentTimeline) {
248 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->
GetTrackedObject(object_id);
249 Clip* clipObject = parentTimeline->
GetClip(object_id);
254 parentClipObject = NULL;
256 else if (clipObject) {
258 parentTrackedObject =
nullptr;
265 parentTrackedObject = trackedObject;
270 parentClipObject = clipObject;
278 bool is_same_reader =
false;
279 if (new_reader && allocated_reader) {
280 if (new_reader->
Name() ==
"FrameMapper") {
283 if (allocated_reader == clip_mapped_reader->
Reader()) {
284 is_same_reader =
true;
289 if (allocated_reader && !is_same_reader) {
291 allocated_reader->
Close();
292 delete allocated_reader;
294 allocated_reader = NULL;
316 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
337 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
343 if (is_open && reader) {
368 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
394 return GetFrame(NULL, clip_frame_number, NULL);
399 std::shared_ptr<Frame>
Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
402 return GetFrame(background_frame, clip_frame_number, NULL);
410 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
415 std::shared_ptr<Frame> frame = NULL;
418 frame = final_cache.
GetFrame(clip_frame_number);
421 frame = GetOrCreateFrame(clip_frame_number);
424 int64_t timeline_frame_number = clip_frame_number;
425 QSize timeline_size(frame->GetWidth(), frame->GetHeight());
426 if (background_frame) {
428 timeline_frame_number = background_frame->number;
429 timeline_size.setWidth(background_frame->GetWidth());
430 timeline_size.setHeight(background_frame->GetHeight());
434 apply_timemapping(frame);
437 apply_waveform(frame, timeline_size);
440 apply_effects(frame, timeline_frame_number, options,
true);
443 apply_keyframes(frame, timeline_size);
446 apply_effects(frame, timeline_frame_number, options,
false);
449 final_cache.
Add(frame);
452 if (!background_frame) {
454 background_frame = std::make_shared<Frame>(frame->number, frame->GetWidth(), frame->GetHeight(),
455 "#00000000", frame->GetAudioSamplesCount(),
456 frame->GetAudioChannelsCount());
460 apply_background(frame, background_frame);
467 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
474 for (
const auto& effect : effects) {
475 if (effect->Id() ==
id) {
484 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
488 return parentClipObject;
493 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
497 return parentTrackedObject;
501 std::string Clip::get_file_extension(std::string
path)
504 return path.substr(
path.find_last_of(
".") + 1);
508 void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
513 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
518 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
520 int64_t clip_frame_number = frame->number;
521 int64_t new_frame_number = adjust_frame_number_minimum(
time.
GetLong(clip_frame_number));
537 int source_sample_count = round(target_sample_count * fabs(delta));
543 location.
frame = new_frame_number;
557 init_samples.clear();
558 resampler->
SetBuffer(&init_samples, 1.0);
566 if (source_sample_count <= 0) {
568 frame->AddAudioSilence(target_sample_count);
574 source_samples->clear();
577 int remaining_samples = source_sample_count;
579 while (remaining_samples > 0) {
580 std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.
frame,
false);
581 int frame_sample_count = source_frame->GetAudioSamplesCount() - location.
sample_start;
583 if (frame_sample_count == 0) {
593 if (remaining_samples - frame_sample_count >= 0) {
595 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
596 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, frame_sample_count, 1.0f);
604 remaining_samples -= frame_sample_count;
605 source_pos += frame_sample_count;
609 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
610 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, remaining_samples, 1.0f);
613 remaining_samples = 0;
614 source_pos += remaining_samples;
621 frame->AddAudioSilence(target_sample_count);
623 if (source_sample_count != target_sample_count) {
625 double resample_ratio = double(source_sample_count) / double(target_sample_count);
626 resampler->
SetBuffer(source_samples, resample_ratio);
634 frame->AddAudio(
true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
640 frame->AddAudio(
true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
645 delete source_samples;
653 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
656 if (frame_number < 1)
664 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number,
bool enable_time)
668 int64_t clip_frame_number = adjust_frame_number_minimum(number);
672 clip_frame_number = adjust_frame_number_minimum(
time.
GetLong(clip_frame_number));
677 "Clip::GetOrCreateFrame (from reader)",
678 "number", number,
"clip_frame_number", clip_frame_number);
681 auto reader_frame = reader->
GetFrame(clip_frame_number);
682 reader_frame->number = number;
689 auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
692 reader_copy->AddColor(QColor(Qt::transparent));
696 reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
712 "Clip::GetOrCreateFrame (create blank)",
714 "estimated_samples_in_frame", estimated_samples_in_frame);
717 auto new_frame = std::make_shared<Frame>(
719 "#000000", estimated_samples_in_frame, reader->
info.
channels);
722 new_frame->AddAudioSilence(estimated_samples_in_frame);
738 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(), NULL, -1, -1,
true, requested_frame);
739 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
741 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
742 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
743 root[
"duration"] =
add_property_json(
"Duration", Duration(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
true, requested_frame);
748 root[
"waveform"] =
add_property_json(
"Waveform", waveform,
"int",
"", NULL, 0, 1,
false, requested_frame);
749 root[
"parentObjectId"] =
add_property_json(
"Parent", 0.0,
"string", parentObjectId, NULL, -1, -1,
false, requested_frame);
784 if (parentClipObject)
789 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
792 float parentObject_location_x = parentClipObject->
location_x.
GetValue(timeline_frame_number);
793 float parentObject_location_y = parentClipObject->
location_y.
GetValue(timeline_frame_number);
794 float parentObject_scale_x = parentClipObject->
scale_x.
GetValue(timeline_frame_number);
795 float parentObject_scale_y = parentClipObject->
scale_y.
GetValue(timeline_frame_number);
796 float parentObject_shear_x = parentClipObject->
shear_x.
GetValue(timeline_frame_number);
797 float parentObject_shear_y = parentClipObject->
shear_y.
GetValue(timeline_frame_number);
798 float parentObject_rotation = parentClipObject->
rotation.
GetValue(timeline_frame_number);
801 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
802 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
803 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
804 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
805 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
806 root[
"shear_x"] =
add_property_json(
"Shear X", parentObject_shear_x,
"float",
"", &
shear_x, -1.0, 1.0,
false, requested_frame);
807 root[
"shear_y"] =
add_property_json(
"Shear Y", parentObject_shear_y,
"float",
"", &
shear_y, -1.0, 1.0,
false, requested_frame);
847 return root.toStyledString();
855 root[
"parentObjectId"] = parentObjectId;
857 root[
"scale"] =
scale;
861 root[
"waveform"] = waveform;
889 root[
"effects"] = Json::Value(Json::arrayValue);
892 for (
auto existing_effect : effects)
894 root[
"effects"].append(existing_effect->JsonValue());
900 root[
"reader"] = Json::Value(Json::objectValue);
916 catch (
const std::exception& e)
919 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
930 if (!root[
"parentObjectId"].isNull()){
931 parentObjectId = root[
"parentObjectId"].asString();
932 if (parentObjectId.size() > 0 && parentObjectId !=
""){
935 parentTrackedObject =
nullptr;
936 parentClipObject = NULL;
939 if (!root[
"gravity"].isNull())
941 if (!root[
"scale"].isNull())
943 if (!root[
"anchor"].isNull())
945 if (!root[
"display"].isNull())
947 if (!root[
"mixing"].isNull())
949 if (!root[
"waveform"].isNull())
950 waveform = root[
"waveform"].asBool();
951 if (!root[
"scale_x"].isNull())
953 if (!root[
"scale_y"].isNull())
955 if (!root[
"location_x"].isNull())
957 if (!root[
"location_y"].isNull())
959 if (!root[
"alpha"].isNull())
961 if (!root[
"rotation"].isNull())
963 if (!root[
"time"].isNull())
965 if (!root[
"volume"].isNull())
967 if (!root[
"wave_color"].isNull())
969 if (!root[
"shear_x"].isNull())
971 if (!root[
"shear_y"].isNull())
973 if (!root[
"origin_x"].isNull())
975 if (!root[
"origin_y"].isNull())
977 if (!root[
"channel_filter"].isNull())
979 if (!root[
"channel_mapping"].isNull())
981 if (!root[
"has_audio"].isNull())
983 if (!root[
"has_video"].isNull())
985 if (!root[
"perspective_c1_x"].isNull())
987 if (!root[
"perspective_c1_y"].isNull())
989 if (!root[
"perspective_c2_x"].isNull())
991 if (!root[
"perspective_c2_y"].isNull())
993 if (!root[
"perspective_c3_x"].isNull())
995 if (!root[
"perspective_c3_y"].isNull())
997 if (!root[
"perspective_c4_x"].isNull())
999 if (!root[
"perspective_c4_y"].isNull())
1001 if (!root[
"effects"].isNull()) {
1007 for (
const auto existing_effect : root[
"effects"]) {
1009 if (existing_effect.isNull()) {
1015 if (!existing_effect[
"type"].isNull()) {
1018 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString()))) {
1029 if (!root[
"reader"].isNull())
1031 if (!root[
"reader"][
"type"].isNull())
1034 bool already_open =
false;
1038 already_open = reader->
IsOpen();
1045 std::string type = root[
"reader"][
"type"].asString();
1047 if (type ==
"FFmpegReader") {
1053 }
else if (type ==
"QtImageReader") {
1059 #ifdef USE_IMAGEMAGICK
1060 }
else if (type ==
"ImageReader") {
1063 reader =
new ImageReader(root[
"reader"][
"path"].asString(),
false);
1066 }
else if (type ==
"TextReader") {
1073 }
else if (type ==
"ChunkReader") {
1079 }
else if (type ==
"DummyReader") {
1085 }
else if (type ==
"Timeline") {
1095 allocated_reader = reader;
1106 final_cache.
Clear();
1110 void Clip::sort_effects()
1123 effects.push_back(effect);
1132 effect->ParentTimeline(parentTimeline);
1139 if (parentTimeline){
1141 effect->ParentTimeline(parentTimeline);
1147 std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1150 trackedObjectBBox->ParentClip(
this);
1160 final_cache.
Clear();
1166 effects.remove(effect);
1169 final_cache.
Clear();
1173 void Clip::apply_background(std::shared_ptr<openshot::Frame> frame, std::shared_ptr<openshot::Frame> background_frame) {
1175 std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1176 QPainter painter(background_canvas.get());
1177 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing,
true);
1180 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1181 painter.drawImage(0, 0, *frame->GetImage());
1185 frame->AddImage(background_canvas);
1189 void Clip::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number,
TimelineInfoStruct* options,
bool before_keyframes)
1191 for (
auto effect : effects)
1194 if (effect->info.apply_before_clip && before_keyframes) {
1195 effect->GetFrame(frame, frame->number);
1196 }
else if (!effect->info.apply_before_clip && !before_keyframes) {
1197 effect->GetFrame(frame, frame->number);
1201 if (
timeline != NULL && options != NULL) {
1210 bool Clip::isNear(
double a,
double b)
1212 return fabs(a - b) < 0.000001;
1216 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, QSize timeline_size) {
1218 if (!frame->has_image_data) {
1224 std::shared_ptr<QImage> source_image = frame->GetImage();
1225 std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(timeline_size.width(),
1226 timeline_size.height(),
1227 QImage::Format_RGBA8888_Premultiplied);
1228 background_canvas->fill(QColor(Qt::transparent));
1231 QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1234 QPainter painter(background_canvas.get());
1235 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing,
true);
1238 painter.setTransform(transform);
1241 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1242 painter.drawImage(0, 0, *source_image);
1249 std::stringstream frame_number_str;
1256 frame_number_str << frame->number;
1269 painter.setPen(QColor(
"#ffffff"));
1270 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1276 frame->AddImage(background_canvas);
1280 void Clip::apply_waveform(std::shared_ptr<Frame> frame, QSize timeline_size) {
1288 std::shared_ptr<QImage> source_image = frame->GetImage();
1292 "frame->number", frame->number,
1293 "Waveform()", Waveform(),
1294 "width", timeline_size.width(),
1295 "height", timeline_size.height());
1304 source_image = frame->GetWaveform(timeline_size.width(), timeline_size.height(), red, green, blue,
alpha);
1305 frame->AddImage(source_image);
1309 QSize Clip::scale_size(QSize source_size,
ScaleType source_scale,
int target_width,
int target_height) {
1310 switch (source_scale)
1313 source_size.scale(target_width, target_height, Qt::KeepAspectRatio);
1317 source_size.scale(target_width, target_height, Qt::IgnoreAspectRatio);
1321 source_size.scale(target_width, target_height, Qt::KeepAspectRatioByExpanding);;
1330 QTransform Clip::get_transform(std::shared_ptr<Frame> frame,
int width,
int height)
1333 std::shared_ptr<QImage> source_image = frame->GetImage();
1341 unsigned char *pixels = source_image->bits();
1344 for (
int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1348 pixels[byte_index + 0] *= alpha_value;
1349 pixels[byte_index + 1] *= alpha_value;
1350 pixels[byte_index + 2] *= alpha_value;
1351 pixels[byte_index + 3] *= alpha_value;
1356 "alpha_value", alpha_value,
1357 "frame->number", frame->number);
1361 QSize source_size = scale_size(source_image->size(),
scale, width, height);
1364 float parentObject_location_x = 0.0;
1365 float parentObject_location_y = 0.0;
1366 float parentObject_scale_x = 1.0;
1367 float parentObject_scale_y = 1.0;
1368 float parentObject_shear_x = 0.0;
1369 float parentObject_shear_y = 0.0;
1370 float parentObject_rotation = 0.0;
1376 long parent_frame_number = frame->number + parent_start_offset;
1379 parentObject_location_x = parentClipObject->
location_x.
GetValue(parent_frame_number);
1380 parentObject_location_y = parentClipObject->
location_y.
GetValue(parent_frame_number);
1381 parentObject_scale_x = parentClipObject->
scale_x.
GetValue(parent_frame_number);
1382 parentObject_scale_y = parentClipObject->
scale_y.
GetValue(parent_frame_number);
1383 parentObject_shear_x = parentClipObject->
shear_x.
GetValue(parent_frame_number);
1384 parentObject_shear_y = parentClipObject->
shear_y.
GetValue(parent_frame_number);
1385 parentObject_rotation = parentClipObject->
rotation.
GetValue(parent_frame_number);
1391 Clip* parentClip = (
Clip*) parentTrackedObject->ParentClip();
1396 long parent_frame_number = frame->number + parent_start_offset;
1399 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parent_frame_number);
1403 parentClip->
scale, width, height);
1406 int trackedWidth = trackedObjectProperties[
"w"] * trackedObjectProperties[
"sx"] * parent_size.width() *
1408 int trackedHeight = trackedObjectProperties[
"h"] * trackedObjectProperties[
"sy"] * parent_size.height() *
1412 source_size = scale_size(source_size,
scale, trackedWidth, trackedHeight);
1415 parentObject_location_x = parentClip->
location_x.
GetValue(parent_frame_number) + ((trackedObjectProperties[
"cx"] - 0.5) * parentClip->
scale_x.
GetValue(parent_frame_number));
1416 parentObject_location_y = parentClip->
location_y.
GetValue(parent_frame_number) + ((trackedObjectProperties[
"cy"] - 0.5) * parentClip->
scale_y.
GetValue(parent_frame_number));
1417 parentObject_rotation = trackedObjectProperties[
"r"] + parentClip->
rotation.
GetValue(parent_frame_number);
1430 if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1431 sx*= parentObject_scale_x;
1432 sy*= parentObject_scale_y;
1435 float scaled_source_width = source_size.width() * sx;
1436 float scaled_source_height = source_size.height() * sy;
1444 x = (width - scaled_source_width) / 2.0;
1447 x = width - scaled_source_width;
1450 y = (height - scaled_source_height) / 2.0;
1453 x = (width - scaled_source_width) / 2.0;
1454 y = (height - scaled_source_height) / 2.0;
1457 x = width - scaled_source_width;
1458 y = (height - scaled_source_height) / 2.0;
1461 y = (height - scaled_source_height);
1464 x = (width - scaled_source_width) / 2.0;
1465 y = (height - scaled_source_height);
1468 x = width - scaled_source_width;
1469 y = (height - scaled_source_height);
1475 "Clip::get_transform (Gravity)",
1476 "frame->number", frame->number,
1477 "source_clip->gravity",
gravity,
1478 "scaled_source_width", scaled_source_width,
1479 "scaled_source_height", scaled_source_height);
1481 QTransform transform;
1487 float shear_x_value =
shear_x.
GetValue(frame->number) + parentObject_shear_x;
1488 float shear_y_value =
shear_y.
GetValue(frame->number) + parentObject_shear_y;
1494 "Clip::get_transform (Build QTransform - if needed)",
1495 "frame->number", frame->number,
1498 "sx", sx,
"sy", sy);
1500 if (!isNear(x, 0) || !isNear(y, 0)) {
1502 transform.translate(x, y);
1504 if (!isNear(r, 0) || !isNear(shear_x_value, 0) || !isNear(shear_y_value, 0)) {
1506 float origin_x_offset = (scaled_source_width * origin_x_value);
1507 float origin_y_offset = (scaled_source_height * origin_y_value);
1508 transform.translate(origin_x_offset, origin_y_offset);
1509 transform.rotate(r);
1510 transform.shear(shear_x_value, shear_y_value);
1511 transform.translate(-origin_x_offset,-origin_y_offset);
1514 float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1515 float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1516 if (!isNear(source_width_scale, 1.0) || !isNear(source_height_scale, 1.0)) {
1517 transform.scale(source_width_scale, source_height_scale);
1524 int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1541 int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1543 return frame_number;