-
Notifications
You must be signed in to change notification settings - Fork 293
/
Copy pathcmx_3600.py
1215 lines (1037 loc) · 43.3 KB
/
cmx_3600.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#
# Copyright 2017 Pixar Animation Studios
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
"""OpenTimelineIO CMX 3600 EDL Adapter"""
# Note: this adapter is not an ideal model for new adapters, but it works.
# If you want to write your own adapter, please see:
# https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html#
# TODO: Flesh out Attribute Handler
# TODO: Add line numbers to errors and warnings
# TODO: currently tracks with linked audio/video will lose their linkage when
# read into OTIO.
import os
import re
import math
import collections
import opentimelineio as otio
class EDLParseError(otio.exceptions.OTIOError):
pass
# regex for parsing the playback speed of an M2 event
SPEED_EFFECT_RE = re.compile(
"(?P<name>.*?)\s*(?P<speed>[0-9\.]*)\s*(?P<tc>[0-9:]{11})$"
)
# these are all CMX_3600 transition codes
# the wipe is written in regex format because it is W### where the ### is
# a 'wipe code'
# @TODO: not currently read by the transition code
transition_regex_map = {
'C': 'cut',
'D': 'dissolve',
'W\d{3}': 'wipe',
'KB': 'key_background',
'K': 'key_foreground',
'KO': 'key_overlay'
}
# CMX_3600 supports some shorthand for channel assignments
# We name the actual tracks V and A1,A2,A3,etc.
# This channel_map tells you which track to use for each channel shorthand.
# Channels not listed here are used as track names verbatim.
channel_map = {
'A': ['A1'],
'AA': ['A1', 'A2'],
'B': ['V', 'A1'],
'A2/V': ['V', 'A2'],
'AA/V': ['V', 'A1', 'A2']
}
# Currently, the 'style' argument determines
# the comment string for the media reference:
# 'avid': '* FROM CLIP:' (default)
# 'nucoda': '* FROM FILE:'
# When adding a new style, please be sure to add sufficient tests
# to verify both the new and existing styles.
VALID_EDL_STYLES = ['avid', 'nucoda']
class EDLParser(object):
def __init__(self, edl_string, rate=24, ignore_timecode_mismatch=False):
self.timeline = otio.schema.Timeline()
# Start with no tracks. They will be added as we encounter them.
# This dict maps a track name (e.g "A2" or "V") to an OTIO Track.
self.tracks_by_name = {}
self.ignore_timecode_mismatch = ignore_timecode_mismatch
self.parse_edl(edl_string, rate=rate)
# TODO: Sort the tracks V, then A1,A2,etc.
def add_clip(self, line, comments, rate=24):
comment_handler = CommentHandler(comments)
clip_handler = ClipHandler(line, comment_handler.handled, rate=rate)
clip = clip_handler.clip
if comment_handler.unhandled:
clip.metadata.setdefault("cmx_3600", {})
clip.metadata['cmx_3600'].setdefault("comments", [])
clip.metadata['cmx_3600']['comments'] += (
comment_handler.unhandled
)
# each edit point between two clips is a transition. the default is a
# cut in the edl format the transition codes are for the transition
# into the clip
self.add_transition(
clip_handler,
clip_handler.transition_type,
clip_handler.transition_data
)
tracks = self.tracks_for_channel(clip_handler.channel_code)
for track in tracks:
edl_rate = clip_handler.edl_rate
record_in = otio.opentime.from_timecode(
clip_handler.record_tc_in,
edl_rate
)
record_out = otio.opentime.from_timecode(
clip_handler.record_tc_out,
edl_rate
)
src_duration = clip.duration()
rec_duration = record_out - record_in
if rec_duration != src_duration:
motion = comment_handler.handled.get('motion_effect')
freeze = comment_handler.handled.get('freeze_frame')
if motion is not None or freeze is not None:
# Adjust the clip to match the record duration
clip.source_range.duration = rec_duration
if freeze is not None:
clip.effects.append(otio.schema.FreezeFrame())
# XXX remove 'FF' suffix (writing edl will add it back)
if clip.name.endswith(' FF'):
clip.name = clip.name[:-3]
elif motion is not None:
fps = float(
SPEED_EFFECT_RE.match(motion).group("speed")
)
time_scalar = fps / rate
clip.effects.append(
otio.schema.LinearTimeWarp(time_scalar=time_scalar)
)
elif self.ignore_timecode_mismatch:
# Pretend there was no problem by adjusting the record_out.
# Note that we don't actually use record_out after this
# point in the code, since all of the subsequent math uses
# the clip's source_range. Adjusting the record_out is
# just to document what the implications of ignoring the
# mismatch here entails.
record_out = record_in + src_duration
else:
raise EDLParseError(
"Source and record duration don't match: {} != {}"
" for clip {}".format(
src_duration,
rec_duration,
clip.name
))
if track.source_range is None:
zero = otio.opentime.RationalTime(0, edl_rate)
track.source_range = otio.opentime.TimeRange(
start_time=zero - record_in,
duration=zero
)
track_end = track.duration() - track.source_range.start_time
if record_in < track_end:
if self.ignore_timecode_mismatch:
# shift it over
record_in = track_end
record_out = record_in + rec_duration
else:
raise EDLParseError(
"Overlapping record in value: {} for clip {}".format(
clip_handler.record_tc_in,
clip.name
))
# If the next clip is supposed to start beyond the end of the
# clips we've accumulated so far, then we need to add a Gap
# to fill that space. This can happen when an EDL has record
# timecodes that are sparse (e.g. from a single track of a
# multi-track composition).
if record_in > track_end and len(track) > 0:
gap = otio.schema.Gap()
gap.source_range = otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, edl_rate),
duration=record_in - track_end
)
track.append(gap)
track.source_range.duration += gap.duration()
track.append(clip)
track.source_range.duration += clip.duration()
def guess_kind_for_track_name(self, name):
if name.startswith("V"):
return otio.schema.TrackKind.Video
if name.startswith("A"):
return otio.schema.TrackKind.Audio
return otio.schema.TrackKind.Video
def tracks_for_channel(self, channel_code):
# Expand channel shorthand into a list of track names.
if channel_code in channel_map:
track_names = channel_map[channel_code]
else:
track_names = [channel_code]
# Create any channels we don't already have
for track_name in track_names:
if track_name not in self.tracks_by_name:
track = otio.schema.Track(
name=track_name,
kind=self.guess_kind_for_track_name(track_name)
)
self.tracks_by_name[track_name] = track
self.timeline.tracks.append(track)
# Return a list of actual tracks
return [self.tracks_by_name[c] for c in track_names]
def add_transition(self, clip_handler, transition, data):
if transition not in ['C']:
md = clip_handler.clip.metadata.setdefault("cmx_3600", {})
md["transition"] = transition
def parse_edl(self, edl_string, rate=24):
# edl 'events' can be comprised of an indeterminate amount of lines
# we are to translating 'events' to a single clip and transition
# then we add the transition and the clip to all channels the 'event'
# channel code is mapped to the transition given in the 'event'
# precedes the clip
# remove all blank lines from the edl
edl_lines = [
l for l in (l.strip() for l in edl_string.splitlines()) if l
]
while edl_lines:
# a basic for loop wont work cleanly since we need to look ahead at
# array elements to determine what type of 'event' we are looking
# at
line = edl_lines.pop(0)
if line.startswith('TITLE:'):
# this is the first line of interest in an edl
# it is required to be in the header
self.timeline.name = line.replace('TITLE:', '').strip()
elif line.startswith('FCM'):
# this can occur either in the header or before any 'event'
# in both cases we can ignore it since it is meant for tape
# timecode
pass
elif line.startswith('SPLIT'):
# this is the only comment preceding an 'event' that we care
# about in our context it simply means the next two clips will
# have the same comment data it is for reading purposes only
audio_delay = None
video_delay = None
if 'AUDIO DELAY' in line:
audio_delay = line.split()[-1].strip()
if 'VIDEO DELAY' in line:
video_delay = line.split()[-1].strip()
if audio_delay and video_delay:
raise EDLParseError(
'both audio and video delay declared after SPLIT.'
)
if not (audio_delay or video_delay):
raise EDLParseError(
'either audio or video delay declared after SPLIT.'
)
line_1 = edl_lines.pop(0)
line_2 = edl_lines.pop(0)
comments = []
while edl_lines:
if re.match('^\D', edl_lines[0]):
comments.append(edl_lines.pop(0))
else:
break
self.add_clip(line_1, comments, rate=rate)
self.add_clip(line_2, comments, rate=rate)
elif line[0].isdigit():
# all 'events' start_time with an edit decision. this is
# denoted by the line beginning with a padded integer 000-999
comments = []
while edl_lines:
# any non-numbered lines after an edit decision should be
# treated as 'comments'
# comments are string tags used by the reader to get extra
# information not able to be found in the restricted edl
# format
if re.match('^\D', edl_lines[0]):
comments.append(edl_lines.pop(0))
else:
break
self.add_clip(line, comments, rate=rate)
else:
raise EDLParseError('Unknown event type')
for track in self.timeline.tracks:
# if the source_range is the same as the available_range
# then we don't need to set it at all.
if track.source_range == track.available_range():
track.source_range = None
class ClipHandler(object):
def __init__(self, line, comment_data, rate=24):
self.clip_num = None
self.reel = None
self.channel_code = None
self.edl_rate = rate
self.transition_id = None
self.transition_data = None
self.source_tc_in = None
self.source_tc_out = None
self.record_tc_in = None
self.record_tc_out = None
self.parse(line)
self.clip = self.make_clip(comment_data)
def make_clip(self, comment_data):
clip = otio.schema.Clip()
clip.name = str(self.clip_num)
# BLACK/BL and BARS are called out as "Special Source Identifiers" in
# the documents referenced here:
# https://github.com/PixarAnimationStudios/OpenTimelineIO#cmx3600-edl
if self.reel in ['BL', 'BLACK']:
clip.media_reference = otio.schema.GeneratorReference()
# TODO: Replace with enum, once one exists
clip.media_reference.generator_kind = 'black'
elif self.reel == 'BARS':
clip.media_reference = otio.schema.GeneratorReference()
# TODO: Replace with enum, once one exists
clip.media_reference.generator_kind = 'SMPTEBars'
elif 'media_reference' in comment_data:
clip.media_reference = otio.schema.ExternalReference()
clip.media_reference.target_url = comment_data[
'media_reference'
]
else:
clip.media_reference = otio.schema.MissingReference()
# this could currently break without a 'FROM CLIP' comment.
# Without that there is no 'media_reference' Do we have a default
# clip name?
if 'clip_name' in comment_data:
clip.name = comment_data["clip_name"]
elif (
clip.media_reference and
hasattr(clip.media_reference, 'target_url') and
clip.media_reference.target_url is not None
):
clip.name = os.path.splitext(
os.path.basename(clip.media_reference.target_url)
)[0]
asc_sop = comment_data.get('asc_sop', None)
asc_sat = comment_data.get('asc_sat', None)
if asc_sop or asc_sat:
slope = (1, 1, 1)
offset = (0, 0, 0)
power = (1, 1, 1)
sat = 1.0
if asc_sop:
triple = r'([-+]?[\d.]+) ([-+]?[\d.]+) ([-+]?[\d.]+)'
m = re.match(
r'\(' + triple + '\)\s*\(' + triple + '\)\s*\(' + triple + '\)',
asc_sop
)
if m:
floats = [float(g) for g in m.groups()]
slope = [floats[0], floats[1], floats[2]]
offset = [floats[3], floats[4], floats[5]]
power = [floats[6], floats[7], floats[8]]
else:
raise EDLParseError(
'Invalid ASC_SOP found: {}'.format(asc_sop))
if asc_sat:
sat = float(asc_sat)
clip.metadata['cdl'] = {
'asc_sat': sat,
'asc_sop': {
'slope': slope,
'offset': offset,
'power': power
}
}
if 'locator' in comment_data:
# An example EDL locator line looks like this:
# * LOC: 01:00:01:14 RED ANIM FIX NEEDED
# We get the part after "LOC: " as the comment_data entry
# Given the fixed-width nature of these, we could be more
# strict about the field widths, but there are many
# variations of EDL, so if we are lenient then maybe we
# can handle more of them? Only real-world testing will
# determine this for sure...
m = re.match(
r'(\d\d:\d\d:\d\d:\d\d)\s+(\w*)\s+(.*)',
comment_data["locator"]
)
if m:
marker = otio.schema.Marker()
marker.marked_range = otio.opentime.TimeRange(
start_time=otio.opentime.from_timecode(
m.group(1),
self.edl_rate
),
duration=otio.opentime.RationalTime()
)
# always write the source value into metadata, in case it
# is not a valid enum somehow.
color_parsed_from_file = m.group(2)
marker.metadata = {
"cmx_3600": {
"color": color_parsed_from_file
}
}
# @TODO: if it is a valid
if hasattr(
otio.schema.MarkerColor,
color_parsed_from_file.upper()
):
marker.color = color_parsed_from_file.upper()
else:
marker.color = otio.schema.MarkerColor.RED
marker.name = m.group(3)
clip.markers.append(marker)
else:
# TODO: Should we report this as a warning somehow?
pass
clip.source_range = otio.opentime.range_from_start_end_time(
otio.opentime.from_timecode(self.source_tc_in, self.edl_rate),
otio.opentime.from_timecode(self.source_tc_out, self.edl_rate)
)
return clip
def parse(self, line):
fields = tuple(e.strip() for e in line.split() if e.strip())
field_count = len(fields)
if field_count == 9:
# has transition data
# this is for edits with timing or other needed info
# transition data for D and W*** transitions is a n integer that
# denotes frame count
# i haven't figured out how the key transitions (K, KB, KO) work
(
self.clip_num,
self.reel,
self.channel_code,
self.transition_type,
self.transition_data,
self.source_tc_in,
self.source_tc_out,
self.record_tc_in,
self.record_tc_out
) = fields
elif field_count == 8:
# no transition data
# this is for basic cuts
(
self.clip_num,
self.reel,
self.channel_code,
self.transition_type,
self.source_tc_in,
self.source_tc_out,
self.record_tc_in,
self.record_tc_out
) = fields
else:
raise EDLParseError(
'incorrect number of fields [{0}] in form statement: {1}'
''.format(field_count, line))
# Frame numbers (not just timecode) are ok
for prop in [
'source_tc_in',
'source_tc_out',
'record_tc_in',
'record_tc_out'
]:
if ':' not in getattr(self, prop):
setattr(
self,
prop,
otio.opentime.to_timecode(
otio.opentime.from_frames(
int(getattr(self, prop)),
self.edl_rate
),
self.edl_rate
)
)
class CommentHandler(object):
# this is the for that all comment 'id' tags take
regex_template = '\*?\s*{id}:?\s*(?P<comment_body>.*)'
# this should be a map of all known comments that we can read
# 'FROM CLIP' or 'FROM FILE' is a required comment to link media
# An exception is raised if both 'FROM CLIP' and 'FROM FILE' are found
# needs to be ordered so that FROM CLIP NAME gets matched before FROM CLIP
comment_id_map = collections.OrderedDict([
('FROM CLIP NAME', 'clip_name'),
('FROM CLIP', 'media_reference'),
('FROM FILE', 'media_reference'),
('LOC', 'locator'),
('ASC_SOP', 'asc_sop'),
('ASC_SAT', 'asc_sat'),
('M2', 'motion_effect'),
('\\* FREEZE FRAME', 'freeze_frame'),
])
def __init__(self, comments):
self.handled = {}
self.unhandled = []
for comment in comments:
self.parse(comment)
def parse(self, comment):
for comment_id, comment_type in self.comment_id_map.items():
regex = self.regex_template.format(id=comment_id)
match = re.match(regex, comment)
if match:
self.handled[comment_type] = match.group(
'comment_body'
).strip()
break
else:
stripped = comment.lstrip('*').strip()
if stripped:
self.unhandled.append(stripped)
def _expand_transitions(timeline):
"""Convert clips with metadata/transition == 'D' into OTIO transitions."""
tracks = timeline.tracks
remove_list = []
replace_list = []
append_list = []
for track in tracks:
track_iter = iter(track)
# avid inserts an extra clip for the source
prev_prev = None
prev = None
clip = next(track_iter, None)
next_clip = next(track_iter, None)
while clip is not None:
transition_type = clip.metadata.get('cmx_3600', {}).get(
'transition',
'C'
)
if transition_type == 'C':
# nothing to do, continue to the next iteration of the loop
prev_prev = prev
prev = clip
clip = next_clip
next_clip = next(track_iter, None)
continue
if transition_type not in ['D']:
raise EDLParseError(
"Transition type '{}' not supported by the CMX EDL reader "
"currently.".format(transition_type)
)
transition_duration = clip.duration()
# EDL doesn't have enough data to know where the cut point was, so
# this arbitrarily puts it in the middle of the transition
pre_cut = math.floor(transition_duration.value / 2)
post_cut = transition_duration.value - pre_cut
mid_tran_cut_pre_duration = otio.opentime.RationalTime(
pre_cut,
transition_duration.rate
)
mid_tran_cut_post_duration = otio.opentime.RationalTime(
post_cut,
transition_duration.rate
)
# expand the previous
expansion_clip = None
if prev and not prev_prev:
expansion_clip = prev
elif prev_prev:
expansion_clip = prev_prev
if prev:
remove_list.append((track, prev))
expansion_clip.source_range.duration += mid_tran_cut_pre_duration
# rebuild the clip as a transition
new_trx = otio.schema.Transition(
name=clip.name,
# only supported type at the moment
transition_type=otio.schema.TransitionTypes.SMPTE_Dissolve,
metadata=clip.metadata
)
new_trx.in_offset = mid_tran_cut_pre_duration
new_trx.out_offset = mid_tran_cut_post_duration
# in from to
replace_list.append((track, clip, new_trx))
# expand the next_clip
if next_clip:
next_clip.source_range.start_time -= mid_tran_cut_post_duration
next_clip.source_range.duration += mid_tran_cut_post_duration
else:
fill = otio.schema.Gap(
source_range=otio.opentime.TimeRange(
duration=mid_tran_cut_post_duration,
start_time=otio.opentime.RationalTime(
0,
transition_duration.rate
)
)
)
append_list.append((track, fill))
prev = clip
clip = next_clip
next_clip = next(track_iter, None)
for (track, from_clip, to_transition) in replace_list:
track[track.index(from_clip)] = to_transition
for (track, clip_to_remove) in list(set(remove_list)):
# if clip_to_remove in track:
track.remove(clip_to_remove)
for (track, clip) in append_list:
track.append(clip)
return timeline
def read_from_string(input_str, rate=24, ignore_timecode_mismatch=False):
"""Reads a CMX Edit Decision List (EDL) from a string.
Since EDLs don't contain metadata specifying the rate they are meant
for, you may need to specify the rate parameter (default is 24).
By default, read_from_string will throw an exception if it discovers
invalid timecode in the EDL. For example, if a clip's record timecode
overlaps with the previous cut. Since this is a common mistake in
many EDLs, you can specify ignore_timecode_mismatch=True, which will
supress these errors and attempt to guess at the correct record
timecode based on the source timecode and adjacent cuts.
For best results, you may wish to do something like this:
Example:
>>> try:
... timeline = otio.adapters.read_from_string("mymovie.edl", rate=30)
... except EDLParseError:
... print('Log a warning here')
... try:
... timeline = otio.adapters.read_from_string(
... "mymovie.edl",
... rate=30,
... ignore_timecode_mismatch=True)
... except EDLParseError:
... print('Log an error here')
"""
parser = EDLParser(
input_str,
rate=float(rate),
ignore_timecode_mismatch=ignore_timecode_mismatch
)
result = parser.timeline
result = _expand_transitions(result)
return result
def write_to_string(input_otio, rate=None, style='avid'):
# TODO: We should have convenience functions in Timeline for this?
# also only works for a single video track at the moment
video_tracks = [t for t in input_otio.tracks
if t.kind == otio.schema.TrackKind.Video]
audio_tracks = [t for t in input_otio.tracks
if t.kind == otio.schema.TrackKind.Audio]
if len(video_tracks) != 1:
raise otio.exceptions.NotSupportedError(
"Only a single video track is supported, got: {}".format(
len(video_tracks)
)
)
if len(audio_tracks) > 2:
raise otio.exceptions.NotSupportedError(
"No more than 2 audio tracks are supported."
)
# if audio_tracks:
# raise otio.exceptions.NotSupportedError(
# "No audio tracks are currently supported."
# )
# TODO: We should try to detect the frame rate and output an
# appropriate "FCM: NON-DROP FRAME" etc here.
writer = EDLWriter(
tracks=input_otio.tracks,
# Assume all rates are the same as the 1st track's
rate=rate or input_otio.tracks[0].duration().rate,
style=style
)
return writer.get_content_for_track_at_index(0, title=input_otio.name)
class EDLWriter(object):
def __init__(self, tracks, rate, style):
self._tracks = tracks
self._rate = rate
self._style = style
if style not in VALID_EDL_STYLES:
raise otio.exceptions.NotSupportedError(
"The EDL style '{}' is not supported.".format(
style
)
)
def get_content_for_track_at_index(self, idx, title):
track = self._tracks[idx]
# Add a gap if the last child is a transition.
if isinstance(track[-1], otio.schema.Transition):
gap = otio.schema.Gap(
source_range=otio.opentime.TimeRange(
start_time=track[-1].duration(),
duration=otio.opentime.RationalTime(0.0, self._rate)
)
)
track.append(gap)
# Note: Transitions in EDLs are unconventionally represented.
#
# Where a transition might normally be visualized like:
# |---57.0 Trans 43.0----|
# |------Clip1 102.0------|----------Clip2 143.0----------|Clip3 24.0|
#
# In an EDL it can be thought of more like this:
# |---0.0 Trans 100.0----|
# |Clip1 45.0|----------------Clip2 200.0-----------------|Clip3 24.0|
# Adjust cut points to match EDL event representation.
for idx, child in enumerate(track):
if isinstance(child, otio.schema.Transition):
if idx != 0:
# Shorten the a-side
track[idx - 1].source_range.duration -= child.in_offset
# Lengthen the b-side
track[idx + 1].source_range.start_time -= child.in_offset
track[idx + 1].source_range.duration += child.in_offset
# Just clean up the transition for goodness sake
in_offset = child.in_offset
child.in_offset = otio.opentime.RationalTime(0.0, self._rate)
child.out_offset += in_offset
# Group events into either simple clip/a-side or transition and b-side
# to match EDL edit/event representation and edit numbers.
events = []
for idx, child in enumerate(track):
if isinstance(child, otio.schema.Transition):
# Transition will be captured in subsequent iteration.
continue
prv = track[idx - 1] if idx > 0 else None
if isinstance(prv, otio.schema.Transition):
events.append(
DissolveEvent(
events[-1] if len(events) else None,
prv,
child,
self._tracks,
track.kind,
self._rate,
self._style
)
)
elif isinstance(child, otio.schema.Clip):
events.append(
Event(
child,
self._tracks,
track.kind,
self._rate,
self._style
)
)
elif isinstance(child, otio.schema.Gap):
# Gaps are represented as missing record timecode, no event
# needed.
pass
content = "TITLE: {}\n\n".format(title) if title else ''
# Convert each event/dissolve-event into plain text.
for idx, event in enumerate(events):
event.edit_number = idx + 1
content += event.to_edl_format() + '\n'
return content
def _supported_timing_effects(clip):
return [
fx for fx in clip.effects
if isinstance(fx, otio.schema.LinearTimeWarp)
]
def _relevant_timing_effect(clip):
# check to see if there is more than one timing effect
effects = _supported_timing_effects(clip)
if effects != clip.effects:
for thing in clip.effects:
if thing not in effects and isinstance(thing, otio.schema.TimeEffect):
raise otio.exceptions.NotSupportedError(
"Clip contains timing effects not supported by the EDL"
" adapter.\nClip: {}".format(str(clip)))
timing_effect = None
if effects:
timing_effect = effects[0]
if len(effects) > 1:
raise otio.exceptions.NotSupportedError(
"EDL Adapter only allows one timing effect / clip."
)
return timing_effect
class Event(object):
def __init__(
self,
clip,
tracks,
kind,
rate,
style
):
line = EventLine(kind, rate)
line.reel = _reel_from_clip(clip)
line.source_in = clip.source_range.start_time
line.source_out = clip.source_range.end_time_exclusive()
timing_effect = _relevant_timing_effect(clip)
if timing_effect:
if timing_effect.effect_name == "FreezeFrame":
line.source_out = line.source_in + otio.opentime.RationalTime(
1,
line.source_in.rate
)
elif timing_effect.effect_name == "LinearTimeWarp":
value = clip.trimmed_range().duration.value / timing_effect.time_scalar
line.source_out = (
line.source_in + otio.opentime.RationalTime(value, rate))
range_in_timeline = clip.transformed_time_range(
clip.trimmed_range(),
tracks
)
line.record_in = range_in_timeline.start_time
line.record_out = range_in_timeline.end_time_exclusive()
self.line = line
self.comments = _generate_comment_lines(
clip=clip,
style=style,
edl_rate=rate,
from_or_to='FROM'
)
self.clip = clip
self.source_out = line.source_out
self.record_out = line.record_out
self.reel = line.reel
def __str__(self):
return '{type}({name})'.format(
type=self.clip.schema_name(),
name=self.clip.name
)
def to_edl_format(self):
"""
Example output:
002 AX V C 00:00:00:00 00:00:00:05 00:00:00:05 00:00:00:10
* FROM CLIP NAME: test clip2
* FROM FILE: S:\\var\\tmp\\test.exr
"""
lines = [self.line.to_edl_format(self.edit_number)]
lines += self.comments if len(self.comments) else []
return "\n".join(lines)
class DissolveEvent(object):
def __init__(
self,
a_side_event,
transition,
b_side_clip,
tracks,
kind,
rate,
style
):
# Note: We don't make the A-Side event line here as it is represented
# by its own event (edit number).
cut_line = EventLine(kind, rate)
if a_side_event:
cut_line.reel = a_side_event.reel
cut_line.source_in = a_side_event.source_out
cut_line.source_out = a_side_event.source_out
cut_line.record_in = a_side_event.record_out
cut_line.record_out = a_side_event.record_out
self.from_comments = _generate_comment_lines(
clip=a_side_event.clip,
style=style,
edl_rate=rate,
from_or_to='FROM'
)
else:
cut_line.reel = 'BL'
cut_line.source_in = otio.opentime.RationalTime(0.0, rate)
cut_line.source_out = otio.opentime.RationalTime(0.0, rate)
cut_line.record_in = otio.opentime.RationalTime(0.0, rate)
cut_line.record_out = otio.opentime.RationalTime(0.0, rate)
self.cut_line = cut_line
dslve_line = EventLine(kind, rate)
dslve_line.reel = _reel_from_clip(b_side_clip)
dslve_line.source_in = b_side_clip.source_range.start_time
dslve_line.source_out = b_side_clip.source_range.end_time_exclusive()
range_in_timeline = b_side_clip.transformed_time_range(
b_side_clip.trimmed_range(),
tracks
)
dslve_line.record_in = range_in_timeline.start_time