diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/__init__.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py new file mode 100644 index 000000000..2615ce621 --- /dev/null +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -0,0 +1,130 @@ +import opentimelineio as otio + +# TODO: clip comparable??? ClipInfo +# source clip or clip ref? + +class ClipData: + """ClipData holds information from an OTIO clip that's necessary for + comparing differences. It also keeps some information associated with + the clip after comparisons are made, such as a matched ClipData and a note + about what has changed. + + source_clip = original OTIO clip the ClipData represents + full_name = full name of source_clip + name and version splits full_name on space + ex: full_name: clipA version1, name: clipA, version: version1 + """ + def __init__(self, source_clip, track_num, note=None): + self.full_name = source_clip.name + self.name, self.version = self.splitFullName(source_clip) + self.media_ref = source_clip.media_reference + self.source_range = source_clip.source_range + self.timeline_range = source_clip.trimmed_range_in_parent() + self.track_num = track_num + self.source_clip = source_clip + self.note = note + self.matched_clipData = None + + # split full name into name of clip and version by white space + # uses structure of "clipA v1" where clipA is the name and v1 is the version + def splitFullName(self, clip): + """Split full name into name and version by space. Returns None for + version if full name contains no spaces.""" + parts = clip.name.split(" ") + shortName = parts[0] + version = parts[1] if len(parts) > 1 else None + + return shortName, version + + def printData(self): + """Prints to console all parameters of ClipData""" + print("name: ", self.name) + print("version: ", self.version) + print("media ref: ", self.media_ref) + print("source start time: ", self.source_range.start_time.value, + " duration: ", self.source_range.duration.value) + print("timeline start time:", self.timeline_range.start_time.value, + " duration: ", self.timeline_range.duration.value) + if (self.note != ""): + print("note: ", self.note) + print("source clip: ", self.source.name) + + def sameName(self, cA): + """Compare names and returns if they are the same""" + if (self.name.lower() == cA.name.lower()): + return True + else: + return False + + # note: local and source duration should always match, can assume same + def sameDuration(self, cA): + """Compare duration within the timeline of this ClipData + against another ClipData""" + return self.timeline_range.duration.value == cA.timeline_range.duration.value + + + def checkSame(self, cA): + """Check if this ClipData is the exact same as another ClipData or if + it's the same just moved along the timeline. Updates note based on edits""" + isSame = False + # check names are same + if self.sameName(cA): + # check source range is same + if (self.source_range == cA.source_range): + # check in same place on timeline + if (self.timeline_range == cA.timeline_range): + isSame = True + # check duration is same but not necessarily in same place + # on timeline + # TODO: change to else? (does the elif always run?) + elif (self.sameDuration(cA)): + # Note: currently only checks for lateral shifts, doesn't + # check for reordering of clips + isSame = True + self.note = "shifted laterally in track" + return isSame + + def checkEdited(self, cA): + """Compare 2 ClipDatas and see if they have been edited""" + isEdited = False + + # Note: assumption that source range and timeline range duration always equal + # TODO: sometimes asserts get triggered, more investigation needed + # assert(self.source_range.duration.value == self.timeline_range.duration.value + # ), "clip source range and timeline range durations don't match" + # assert(cA.source_range.duration.value == cA.timeline_range.duration.value + # ), "clip source range and timeline range durations don't match" + + selfDur = self.source_range.duration + cADur = cA.source_range.duration + + selfSourceStart = self.source_range.start_time + cASourceStart = cA.source_range.start_time + + if (self.source_range != cA.source_range): + self.note = "source range changed" + isEdited = True + deltaFramesStr = str(abs(selfDur.to_frames() - cADur.to_frames())) + + if (selfDur.value == cADur.value): + self.note = "start time in source range changed" + + # clip duration shorter + elif (selfDur.value < cADur.value): + self.note = "trimmed " + deltaFramesStr + " frames" + + if (selfSourceStart.value == cASourceStart.value): + self.note = "trimmed tail by " + deltaFramesStr + " frames" + elif (selfSourceStart.value < cASourceStart.value): + self.note = "trimmed head by " + deltaFramesStr + " frames" + + # clip duration longer + elif (selfDur.value > cADur.value): + self.note = "lengthened " + deltaFramesStr + " frames" + + if (selfSourceStart.value == cASourceStart.value): + self.note = "lengthened tail by " + deltaFramesStr + " frames" + elif (selfSourceStart.value > cASourceStart.value): + self.note = "lengthened head by " + deltaFramesStr + " frames" + + return isEdited diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py new file mode 100644 index 000000000..9790730f8 --- /dev/null +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -0,0 +1,514 @@ +import copy +from collections import namedtuple + +import opentimelineio as otio + +from .clipData import ClipData +from . import makeOtio + +def diffTimelines(timelineA, timelineB): + '''Diff two OTIO timelines and identify how clips on video and/or audio tracks + changed from timeline A to timeline B. + Return an annotated otio file with the differences and print a text summary + to console. + + Parameters: + timelineA (otio.schema.Timeline()): timeline from the file you want to + compare against, ex. clip1 version 1 + timelineB (otio.schema.Timeline()): timeline from the file you want to + compare, ex. clip1 version 2 + + Returns: + outputTimeline (otio.schema.Timeline()): timeline with color coded clips + and marker annotations showing the differences between the input tracks + with the tracks from timeline B stacked on top of timeline A + ''' + hasVideo = False + hasAudio = False + + # check input timelines for video and audio tracks + if len(timelineA.video_tracks()) > 0 or len(timelineB.video_tracks()) > 0: + hasVideo = True + + if len(timelineA.audio_tracks()) > 0 or len(timelineB.audio_tracks()) > 0: + hasAudio = True + + makeTimelineSummary(timelineA, timelineB) + + outputTimeline = otio.schema.Timeline() + # process video tracks, audio tracks, or both + if hasVideo and hasAudio: + videoClipTable = categorizeClipsByTracks( + timelineA.video_tracks(), timelineB.video_tracks()) + audioClipTable = categorizeClipsByTracks( + timelineA.audio_tracks(), timelineB.audio_tracks()) + + makeSummary(videoClipTable, otio.schema.Track.Kind.Video, "summary") + makeSummary(audioClipTable, otio.schema.Track.Kind.Audio, "summary") + + videoTl = makeNewOtio(videoClipTable, otio.schema.Track.Kind.Video) + outputTimeline = makeNewOtio(audioClipTable, otio.schema.Track.Kind.Audio) + # combine + for t in videoTl.tracks: + outputTimeline.tracks.append(copy.deepcopy(t)) + + elif hasVideo: + videoClipTable = categorizeClipsByTracks( + timelineA.video_tracks(), timelineB.video_tracks()) + makeSummary(videoClipTable, otio.schema.Track.Kind.Video, "summary") + outputTimeline = makeNewOtio(videoClipTable, otio.schema.Track.Kind.Video) + + elif hasAudio: + audioClipTable = categorizeClipsByTracks( + timelineA.audio_tracks(), timelineB.audio_tracks()) + makeSummary(audioClipTable, "Audio", "summary") + outputTimeline = makeNewOtio(audioClipTable, otio.schema.Track.Kind.Audio) + + else: + print("No video or audio tracks found in both timelines.") + + return outputTimeline + + +def findClones(clips): + """Separate the cloned ClipDatas (ones that share the same name) from the + unique ClipDatas and return both + + Paramenters: + clips (list of ClipDatas): list of ClipDatas + + Returns: + clones (dictionary): dictionary of all clones in the group of ClipDatas + keys: name of clone + values: list of ClipDatas of that name + nonClones (list): list of unique clones in group of ClipDatas + """ + + clones = {} + nonClones = [] + names = [] + + for c in clips: + names.append(c.name) + + for c in clips: + if c.name in clones: + clones[c.name].append(c) + elif names.count(c.name) > 1: + clones[c.name] = [c] + else: + nonClones.append(c) + + return clones, nonClones + + +def sortClones(clipDatasA, clipDatasB): + """Identify cloned ClipDatas (ones that share the same name) across two + groups of ClipDatas and separate from the unique + ClipDatas (ones that only appear once in each group)""" + # find cloned clips and separate out from unique clips + clonesA, nonClonesA = findClones(clipDatasA) + clonesB, nonClonesB = findClones(clipDatasB) + + # move clips that are clones in the other files into the clones table + # leaves stricly unique clips in nonClones + # if a clip is a clone in the other timeline, put into clones table + for c in nonClonesA: + if c.name in clonesB.keys(): + clonesA[c.name] = [c] + nonClonesA.remove(c) + for c in nonClonesB: + if c.name in clonesA.keys(): + clonesB[c.name] = [c] + nonClonesB.remove(c) + + return (clonesA, nonClonesA), (clonesB, nonClonesB) + + +def compareClones(clonesA, clonesB): + """Compare two groups of cloned ClipDatas and categorize into + added, unchanged, or deleted""" + added = [] + unchanged = [] + deleted = [] + + for nameB in clonesB: + # if there are no clips in timeline A with the same name + # as cloneB, all of the clones of cloneB are new and added + if nameB not in clonesA: + added.extend(clonesB[nameB]) + + # name matched, there exists clones in both A and B, check if there are + # same clips + # Note: Potential categorization: first clone is "edited" and the rest are + # "added"/"deleted" -> depends on how want to define + # Note: currently, all clones that aren't the exact same get categorized as + # either "added" or "deleted" + else: + clipsA = clonesA[nameB] + clipsB = clonesB[nameB] + + for clipB in clipsB: + for clipA in clipsA: + isSame = clipB.checkSame(clipA) + if (isSame): + unchanged.append(clipB) + else: + if (clipB not in added): + added.append(clipB) + if (clipA not in deleted): + deleted.append(clipA) + + # same as above for deleted clips + for nameA in clonesA: + if nameA not in clonesB: + deleted.extend(clonesA[nameA]) + + return added, unchanged, deleted + + +def compareClips(clipDatasA, clipDatasB, nameType=""): + """Compare two groups of unique ClipDatas and categorize into + added, edited, unchanged, and deleted""" + namesA = {} + namesB = {} + + added = [] + edited = [] + unchanged = [] + deleted = [] + + # use full_name if nameType is specified, + # otherwise default to using name + if nameType.lower() == "full": + for c in clipDatasA: + namesA[c.full_name] = c + for c in clipDatasB: + namesB[c.full_name] = c + else: + for c in clipDatasA: + namesA[c.name] = c + for c in clipDatasB: + namesB[c.name] = c + + for cB in clipDatasB: + # check which name to use + clipDataBName = cB.name + if nameType.lower() == "full": + clipDataBName = cB.full_name + + # do comparisons + if clipDataBName not in namesA: + added.append(cB) + else: + if namesA[clipDataBName] is None: + print("has none pair") + + cB.matched_clipData = namesA[clipDataBName] + isSame = cB.checkSame(cB.matched_clipData) + if (isSame): + unchanged.append(cB) + else: + isEdited = cB.checkEdited(cB.matched_clipData) + if (isEdited): + edited.append(cB) + else: + print("======== not categorized ==========") + cA = namesA[clipDataBName] + print("Clips: ", cA.name, clipDataBName) + + for cA in clipDatasA: + clipDataAName = cA.name + if nameType.lower() == "full": + clipDataAName = cA.full_name + if clipDataAName not in namesB: + deleted.append(cA) + + return added, edited, unchanged, deleted + + +def compareTracks(trackA, trackB, trackNum): + """Compare clips in two OTIO tracks and categorize into + added, edited, same, and deleted""" + clipDatasA = [] + clipDatasB = [] + + for c in trackA.find_clips(): + # put clip info into ClipData + cd = ClipData(c, trackNum) + clipDatasA.append(cd) + + for c in trackB.find_clips(): + # put clip info into ClipData + cd = ClipData(c, trackNum) + clipDatasB.append(cd) + + (clonesA, nonClonesA), (clonesB, nonClonesB) = sortClones(clipDatasA, clipDatasB) + + # compare clips and put into categories + added = [] + edited = [] + unchanged = [] + deleted = [] + + # compare and categorize unique clips + added, edited, unchanged, deleted = compareClips(nonClonesA, nonClonesB) + + # compare and categorize cloned clips + addedClone, unchangedClone, deletedClone = compareClones(clonesA, clonesB) + added.extend(addedClone) + unchanged.extend(unchangedClone) + deleted.extend(deletedClone) + + return added, edited, unchanged, deleted + + +# TODO: update all "same" to "unchanged" + + +def checkMoved(allDel, allAdd): + """Identify ClipDatas that have moved between different tracks. + """ + # ones found as same = moved + # ones found as edited = moved and edited + + # want to compare full names to account for dif departments/takes + # ex. shotA (layout123) and shotA (anim123) should count as an add + # rather than a move + newAdd, moveEdit, moved, newDel = compareClips(allDel, allAdd, nameType="full") + + # removes clips that are moved in same track, just keep clips moved between tracks + moved = [clip for clip in moved if clip.track_num != + clip.matched_clipData.track_num] + + for clip in moved: + clip.note = "Moved from track: " + str(clip.matched_clipData.track_num) + for i in moveEdit: + if i.note == "": + i.note = "Moved from track: " + str(clip.matched_clipData.track_num) + else: + i.note += " and moved from track " + str(i.matched_clipData.track_num) + + return newAdd, moveEdit, moved, newDel + + +def sortMoved(clipTable): + """Put ClipDatas that have moved between tracks into their own category and + remove from their previous category""" + allAdd = [] + allEdit = [] + allSame = [] + allDel = [] + + for track in clipTable.keys(): + clipGroup = clipTable[track] + if "add" in clipGroup.keys(): + allAdd.extend(clipGroup["add"]) + if "delete" in clipGroup.keys(): + allDel.extend(clipGroup["delete"]) + if "same" in clipGroup.keys(): + allSame.extend(clipGroup["same"]) + if "edit" in clipGroup.keys(): + allEdit.extend(clipGroup["edit"]) + + clipGroup["move"] = [] + + # currently only adds moved clips to table, ignores moved and edited clips + add, moveEdit, moved, delete = checkMoved(allDel, allAdd) + + # currently moved clips are still marked as delete in timelineA + for cd in moved: + clipTable[cd.track_num]["add"].remove(cd) + clipTable[cd.track_num]["move"].append(cd) + # moved clips should be marked as moved in timelineA rather than deleted + # clipTable[cd.track_num]["delete"].remove(cd) + # clipTable[cd.pair.track_num]["moved"].append(cd.pair) + + return clipTable + + +def makeNewOtio(clipTable, trackType): + """Make a new annotated OTIO timeline showing the change from timeline A to + timeline B, with the tracks from timeline B stacked on top of + the tracks from timeline A + + Ex. New timeline showing the differences of timeline A and B with 2 tracks each + Track 2B + Track 1B + ======== + Track 2A + Track 1A + """ + newTl = otio.schema.Timeline(name="diffed") + tracksInA = [] + tracksInB = [] + + # make tracks A and B in output timeline + for trackNum in clipTable.keys(): + # use named tuple here since clip categories won't change anymore + SortedClipDatas = namedtuple( + 'ClipGroup', ['add', 'edit', 'same', 'delete', 'move']) + clipGroup = SortedClipDatas(clipTable[trackNum]["add"], + clipTable[trackNum]["edit"], + clipTable[trackNum]["same"], + clipTable[trackNum]["delete"], + clipTable[trackNum]["move"]) + + newTrackA = makeOtio.makeTrackA(clipGroup, trackNum, trackType) + tracksInA.append(newTrackA) + + newTrackB = makeOtio.makeTrackB(clipGroup, trackNum, trackType) + tracksInB.append(newTrackB) + + # write order to output timeline so that timeline B is on top for both + # video and audio + if trackType == otio.schema.Track.Kind.Video: + newTl.tracks.extend(tracksInA) + + newEmpty = makeOtio.makeSeparaterTrack(trackType) + newTl.tracks.append(newEmpty) + + newTl.tracks.extend(tracksInB) + elif trackType == otio.schema.Track.Kind.Audio: + newTl.tracks.extend(tracksInB) + + newEmpty = makeOtio.makeSeparaterTrack(trackType) + newTl.tracks.append(newEmpty) + + newTl.tracks.extend(tracksInA) + + return newTl + + +def categorizeClipsByTracks(tracksA, tracksB): + """Compare the clips in each track in tracksB against the corresponding track + in tracksA and categorize based on how they have changed. + Return a dictionary table of ClipDatas categorized by + added, edited, unchanged, deleted, and moved and ordered by track. + + Parameters: + tracksA (list of otio.schema.Track() elements): list of tracks from timeline A + tracksB (list of otio.schema.Track() elements): list of tracks from timeline B + + Returns: + clipTable (dictionary): dictionary holding categorized ClipDatas, organized + by the track number of the ClipDatas + dictionary keys: track number (int) + dictionary values: dictionary holding categorized + ClipDatas of that track + nested dictionary keys: category name (string) + nested dictionary values: list of ClipDatas that fall + into the category + + Ex: clipTable when tracksA and tracksB contain 3 tracks + {1 : {"add": [], "edit": [], "same": [], "delete": [], "move": []} + 2 : {"add": [], "edit": [], "same": [], "delete": [], "move": []} + 3 : {"add": [], "edit": [], "same": [], "delete": [], "move": []}} + """ + + clipTable = {} + + matchedTrackNum = min(len(tracksA), len(tracksB)) + totalTrackNum = max(len(tracksA), len(tracksB)) + + trackNumDiff = totalTrackNum - matchedTrackNum + shorterTracks = tracksA if len(tracksA) < len(tracksB) else tracksB + + for i in range(0, trackNumDiff): + # pad shorter tracks with empty tracks + shorterTracks.append(makeOtio.makeEmptyTrack(shorterTracks[0].kind)) + + for i in range(0, totalTrackNum): + currTrackA = tracksA[i] + currTrackB = tracksB[i] + trackNum = i + 1 + + add, edit, same, delete = compareTracks(currTrackA, currTrackB, trackNum) + + clipTable[trackNum] = {"add": add, "edit": edit, "same": same, "delete": delete} + + # recategorize added/deleted into moved + clipTable = sortMoved(clipTable) + + return clipTable + + +def makeSummary(clipTable, trackType, mode): + """Summarize what clips got changed and how they changed and print to console.""" + + print(trackType.upper(), "CLIPS") + print("===================================") + print(" Overview Summary ") + print("===================================") + + allAdd = [] + allEdit = [] + allSame = [] + allDel = [] + allMove = [] + + if mode == "summary": + for track in clipTable.keys(): + clipGroup = clipTable[track] + + allAdd.extend(clipGroup["add"] + ) if "add" in clipGroup.keys() else print("no add") + allDel.extend( + clipGroup["delete"]) if "delete" in clipGroup.keys() \ + else print("no del") + allSame.extend( + clipGroup["same"]) if "same" in clipGroup.keys() \ + else print("no same") + allEdit.extend( + clipGroup["edit"]) if "edit" in clipGroup.keys() \ + else print("no edit") + allMove.extend( + clipGroup["move"]) if "move" in clipGroup.keys() \ + else print("no move") + + print("total added:", len(allAdd)) + print("total edited:", len(allEdit)) + print("total moved:", len(allMove)) + print("total deleted:", len(allDel)) + + if mode == "perTrack": + # print by track + for track in clipTable.keys(): + clipGroup = clipTable[track] + print("================== Track", track, "==================") + for cat in clipGroup.keys(): + print("") + print(cat.upper(), ":", len(clipGroup[cat])) + if cat != "same": + for i in clipGroup[cat]: + print(i.name + ": " + i.note) if i.note is not None \ + else print(i.name) + print("") + + +def makeTimelineSummary(timelineA, timelineB): + """Summarize information about the two timelines compared and print to console.""" + print("Comparing Timeline B:", timelineB.name, "vs") + print(" Timeline A:", timelineA.name) + print("") + + if len(timelineA.video_tracks()) == 0: + print("No video tracks in A") + if len(timelineB.video_tracks()) == 0: + print("No video tracks in B") + + if len(timelineA.audio_tracks()) == 0: + print("No audio tracks in A") + if len(timelineB.audio_tracks()) == 0: + print("No audio tracks in B") + + # compare overall file duration + if (timelineB.duration() > timelineA.duration()): + delta = timelineB.duration().to_seconds() - timelineA.duration().to_seconds() + print(f"Timeline duration increased by {delta:.2f} seconds") + elif (timelineB.duration() < timelineA.duration()): + delta = timelineA.duration().to_seconds() - timelineB.duration().to_seconds() + print(f"Timeline duration decreased by {delta:.2f} seconds") + else: + print("Timeline duration did not change") + print("") \ No newline at end of file diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py new file mode 100644 index 000000000..f783c256a --- /dev/null +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -0,0 +1,191 @@ +import opentimelineio as otio +import copy +from .clipData import ClipData +from opentimelineio.core import Color + +# color-coding for clips in output timeline +addedClipsColor = Color.GREEN +editedClipsColor = Color.ORANGE +deletedClipsColor = Color.PINK +movedClipsColor = Color.PURPLE + + +def sortClips(trackClips): + """Sort ClipDatas based on start time on the timeline""" + return sorted(trackClips, + key=lambda clipData: clipData.timeline_range.start_time.value) + + +def addRavenColor(clip, color): + """Add color of clip to metadata of raven so clips are correctly + color-coded in raven viewer. Specific to raven only.""" + # parses name of color from otio.core.Color and puts into + # format that raven can read + color = color.name.upper() + + if "raven" not in clip.metadata: + clip.metadata["raven"] = {"color": None} + clip.metadata["raven"]["color"] = color + + return clip + + +def addMarker(newClip, clipData, color=None): + """Add marker of specified color and name to clip""" + newMarker = otio.schema.Marker() + newMarker.marked_range = clipData.source_range + + # parses name of color from otio.core.Color and puts into + # format that markers can read + if color is not None: + colorName = color.name.upper() + newMarker.color = colorName + + if isinstance(clipData, ClipData) and clipData.note is not None: + newMarker.name = clipData.note + + newClip.markers.append(newMarker) + + return newClip + + +def makeSeparaterTrack(trackType): + """Make empty track that separates the timeline A tracks + from the timeline B tracks""" + return otio.schema.Track(name="=====================", kind=trackType) + +def makeEmptyTrack(trackType): + """Make empty track""" + return otio.schema.Track(kind=trackType) + +def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False): + """Make OTIO track from ClipDatas with option to add markers + and color to all clips on track""" + # make new blank track with name and kind from parameters + track = otio.schema.Track(name=trackName, kind=trackKind) + + # sort clips by start time in timeline + sortedClips = sortClips(trackClips) + + currentEnd = 0 + # add clip to timeline + for clipData in sortedClips: + if clipData is not None: + # add gap if necessary + tlStart = clipData.timeline_range.start_time.value + tlDuration = clipData.timeline_range.duration.value + tlRate = clipData.timeline_range.start_time.rate + + delta = tlStart - currentEnd + + if (delta > 0): + gapDur = otio.opentime.RationalTime(delta, tlRate) + gap = otio.schema.Gap(duration=gapDur) + track.append(gap) + + currentEnd = tlStart + tlDuration + else: + currentEnd += tlDuration + + # add clip to track + newClip = copy.deepcopy(clipData.source_clip) + if clipColor is not None: + newClip = addRavenColor(newClip, clipColor) + newClip.color = clipColor + + if markersOn: + newClip = addMarker(newClip, clipData, clipColor) + track.append(newClip) + + return track + + +def makeTrackB(clipGroup, trackNum, trackKind): + """Make an annotated track from timeline B. Shows added and edited clips + as well as clips that have moved between tracks. + + Algorithm makes individual tracks for each clip category the track contains, + then flattens them to form the final track. Since blanks are left in all of + the individual tracks, flattening should allow all clips to simply + slot down into place on the flattened track + + Ex. track 1 has added and unchanged clips + Algorithm steps: + 1) Make a track containing only the unchanged clips of track 1 + 2) Make another track containing only the added clips of track 1 and color + them green + 3) Flatten the added clips track on top of the unchanged clips track to + create a track containing both + """ + + # for each category of clips, make an indivdual track and color code accordingly + tSame = makeTrack("same", trackKind, clipGroup.same) + tAdd = makeTrack("added", trackKind, clipGroup.add, addedClipsColor) + tEdited = makeTrack("edited", trackKind, clipGroup.edit, + editedClipsColor, markersOn=True) + tMoved = makeTrack("moved", trackKind, clipGroup.move, + movedClipsColor, markersOn=True) + + # put all the tracks into a list and flatten them down to a single track + # that contains all the color-coded clips + + flatB = otio.core.flatten_stack([tSame, tEdited, tAdd, tMoved]) + + # update track name and kind + if trackKind == otio.schema.Track.Kind.Video: + flatB.name = trackKind + " B" + str(trackNum) + elif trackKind == otio.schema.Track.Kind.Audio: + flatB.name = trackKind + " B" + str(trackNum) + flatB.kind = trackKind + + return flatB + +def getMatchedClips(clipGroup): + pairedClips = [] + for clipData in clipGroup: + if clipData.matched_clipData is None: + pairedClips.append(clipData) + else: + pairedClips.append(clipData.matched_clipData) + return pairedClips + +def makeTrackA(clipGroup, trackNum, trackKind): + """Make an annotated track from timeline A. Shows deleted clips and the original clips + corresponding to clips edited in timeline B. + + Algorithm makes individual tracks for each clip category the track contains, + then flattens them to form the final track. Since blanks are left in all of + the individual tracks, flattening should allow all clips to simmply slot down + into place on the flattened track + + Ex. track 1 has deleted and unchanged clips + Algorithm steps: + 1) Make a track containing only the unchanged clips of track 1 + 2) Make another track containing only the deleted clips of track 1 and color + them red + 3) Flatten the deleted clips track on top of the unchanged clips track + to create a track containing both + """ + + # for each category of clips, make an indivdual track and color code accordingly + # grab the original pair from all the edit and same clipDatas since they only + # save the ones in timeline B + originalEdited = getMatchedClips(clipGroup.edit) + originalUnchanged = getMatchedClips(clipGroup.same) + + tSame = makeTrack("same", trackKind, originalUnchanged) + tEdited = makeTrack("edited", trackKind, originalEdited, editedClipsColor) + tDel = makeTrack("deleted", trackKind, clipGroup.delete, deletedClipsColor) + + # put all the tracks into a list and flatten them down to a single track + # that contains all the color-coded clips + flatA = otio.core.flatten_stack([tSame, tEdited, tDel]) + + # update track name and kind + if trackKind == otio.schema.Track.Kind.Video: + flatA.name = trackKind + " A" + str(trackNum) + elif trackKind == otio.schema.Track.Kind.Audio: + flatA.name = trackKind + " A" + str(trackNum) + flatA.kind = trackKind + + return flatA diff --git a/src/py-opentimelineio/opentimelineio/console/otiotool.py b/src/py-opentimelineio/opentimelineio/console/otiotool.py index aca4bf1e5..78f7de8aa 100755 --- a/src/py-opentimelineio/opentimelineio/console/otiotool.py +++ b/src/py-opentimelineio/opentimelineio/console/otiotool.py @@ -23,6 +23,8 @@ import opentimelineio as otio +from .otiodiff import getDiff + def main(): """otiotool main program. @@ -115,6 +117,11 @@ def main(): for timeline in timelines: copy_media_to_folder(timeline, args.copy_media_to_folder) + # Phase 5.5 Diff two timelines + + if args.diff: + timelines = [diff_otio(timelines)] + # Phase 6: Remove/Redaction if args.remove_metadata_key: @@ -210,6 +217,13 @@ def parse_arguments(): If specified, the --redact option, will remove ALL metadata and rename all objects in the OTIO with generic names (e.g. "Track 1", "Clip 17", etc.) +5.5 Diff + The --diff option allows you to compare two OTIO files. It generates an + OTIO file annotated with the differences between their clips as well as a + text summary report in the console. Ordering of files given to --input matters + as diff compares the second file to the first. + --diff can't be used concurrently with --stack or --concat + 6. Inspect Options such as --stats, --list-clips, --list-tracks, --list-media, --verify-media, --list-markers, --verify-ranges, and --inspect @@ -234,7 +248,11 @@ def parse_arguments(): otiotool -i playlist.otio --verify-media Inspect specific audio clips in detail: -otiotool -i playlist.otio --only-audio --list-tracks --inspect "Interview" +otiotool -i playlist.otio --audio-only --list-tracks --inspect "Interview" + +Diff fileB against fileA +(ordering matters where fileA is the file fileBcompares against): +otiotool -i fileA.otio fileB.otio --diff --o display.otio """, formatter_class=argparse.RawDescriptionHelpFormatter ) @@ -451,6 +469,13 @@ def parse_arguments(): are supported. Use '-' to write OTIO to standard output.""" ) + parser.add_argument( + "--diff", + action="store_true", + help="""Diff and compare two otio files. Input file type must be .otio + and input file order matters""" + ) + args = parser.parse_args() # At least one of these must be specified @@ -489,6 +514,20 @@ def read_inputs(input_paths): return timelines +def diff_otio(timelines): + """Return an annotated timeline showing how clips changed from the first to + the second timeline""" + assert (len(timelines) >= 2 + ), "Less than 2 timelines given. 2 timelines are required" + " to perform a diff" + + if len(timelines) != 2: + print("Warning: more than 2 timelines provided as input. Only the first" + " two timelines will be diffed.") + else: + return getDiff.diffTimelines(timelines[0], timelines[1]) + + def keep_only_video_tracks(timeline): """Remove all tracks except for video tracks from a timeline.""" timeline.tracks[:] = timeline.video_tracks() diff --git a/tests/test_otiodiff.py b/tests/test_otiodiff.py new file mode 100644 index 000000000..35151e4f9 --- /dev/null +++ b/tests/test_otiodiff.py @@ -0,0 +1,736 @@ +import unittest +import opentimelineio as otio + +from opentimelineio.console.otiodiff.clipData import ClipData +# import opentimelineio.console.otiodiff.makeOtio as makeOtio +import opentimelineio.console.otiodiff.getDiff as getDiff + +class TestClipData(unittest.TestCase): + # check if the names of two ClipDatas are the same + def test_same_name(self): + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert clipDataB.sameName(clipDataA) + + def test_different_name(self): + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name="testName2 testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert not clipDataB.sameName(clipDataA) + + def test_same_duration(self): + # check that length of clip is the same + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert clipDataB.sameDuration(clipDataA) + + def test_different_duration(self): + # check that length of clip is the different + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(20, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert not clipDataB.sameDuration(clipDataA) + + def test_check_same(self): + # check that two exact same clips are the same + # check that two exact same clips but moved in the timeline are the same + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert clipDataB.checkSame(clipDataA) + + def test_check_same_if_move(self): + # check that two exact same clips but moved in the timeline are the same + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + gapDur = otio.opentime.RationalTime(5, 24) + gap = otio.schema.Gap(duration=gapDur) + + trackA.append(clipA) + trackB.extend([gap, clipB]) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert clipDataB.checkSame(clipDataA) + assert clipDataB.note == "shifted laterally in track" + + def test_check_not_same(self): + # check that two clips with different names are not the same + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name="testName2 testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert not clipDataB.checkSame(clipDataA) + assert clipDataB.note is None + + def test_check_not_same2(self): + # check that two clips with different source range + # start durations are not the same + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(20, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert not clipDataB.checkSame(clipDataA) + assert clipDataB.note is None + + def test_check_not_same3(self): + # check that two clips with different source range start times are not the same + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert not clipDataB.checkSame(clipDataA) + assert clipDataB.note is None + + def test_check_edited_trimmed_head(self): + # check for trim head/tail and lengthen head/tail + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24)), + ) + clipB = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(90, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert clipDataB.checkEdited(clipDataA) + print("note is:", clipDataB.note) + assert clipDataB.note == "trimmed head by 10 frames" + + def test_check_edited_trimmed_tail(self): + # check for trim head/tail and lengthen head/tail + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24)), + ) + clipB = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(90, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert clipDataB.checkEdited(clipDataA) + assert clipDataB.note == "trimmed tail by 10 frames" + + def test_check_edited_lengthened_head(self): + # check for trim head/tail and lengthen head/tail + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(20, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert clipDataB.checkEdited(clipDataA) + print("note:", clipDataB.note) + assert clipDataB.note == "lengthened head by 10 frames" + + def test_check_edited_lengthened_tail(self): + # check for trim head/tail and lengthen head/tail + clipA = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(20, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert clipDataB.checkEdited(clipDataA) + assert clipDataB.note == "lengthened tail by 10 frames" + + # TODO: make test where clip has null source range + + +class TestGetDif(unittest.TestCase): + # TODO: test case for timelines with unmatched track nums + # test case for timeline with matched track nums + + def test_single_track(self): + pass + + def test_multi_track_matched(self): + pass + + def test_multi_track_unmatched_more_A(self): + pass + + def test_multi_track_unmatched_more_B(self): + pass + + + def test_find_clones(self): + clipA = otio.schema.Clip( + name="clipA testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipB = otio.schema.Clip( + name="clipB testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipC = otio.schema.Clip( + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(20, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipCClone = otio.schema.Clip( + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(30, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipD = otio.schema.Clip( + name="clipD testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(40, 24), + otio.opentime.RationalTime(10, 24))), + ) + trackA = otio.schema.Track() + + trackA.extend([clipA, clipB, clipC, clipCClone, clipD]) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + clipDataC = ClipData(clipC, 1) + clipDataCClone = ClipData(clipCClone, 1) + clipDataD = ClipData(clipD, 1) + + testClips = [clipDataA, clipDataB, clipDataC, clipDataCClone, clipDataD] + clones, nonClones = getDiff.findClones(testClips) + + correctClones = {clipDataC.name: [clipDataC, clipDataCClone]} + correctNonClones = [clipDataA, clipDataB, clipDataD] + + assert (clones == correctClones + ), "Not all cloned clips correctly identified" + assert (nonClones == correctNonClones + ), "Not all unique clips correctly identified" + + def test_sort_clones_clones_in_both(self): + # SETUP + clipA = otio.schema.Clip( + name="clipA testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipB = otio.schema.Clip( + name="clipB testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipC = otio.schema.Clip( + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(20, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipCClone = otio.schema.Clip( + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(30, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipD = otio.schema.Clip( + name="clipD testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(40, 24), + otio.opentime.RationalTime(10, 24))), + ) + trackA = otio.schema.Track() + + trackA.extend([clipA, clipB, clipC, clipCClone, clipD]) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + clipDataC = ClipData(clipC, 1) + clipDataCClone = ClipData(clipCClone, 1) + clipDataD = ClipData(clipD, 1) + + clipDatasA = [clipDataA, clipDataB, clipDataC, clipDataCClone] + clipDatasB = [clipDataB, clipDataC, clipDataCClone, clipDataD] + + # EXERCISE + sortedClonesA, sortedClonesB = getDiff.sortClones(clipDatasA, clipDatasB) + + # VERIFY + clonesA, nonClonesA = sortedClonesA + clonesB, nonClonesB = sortedClonesB + + assert (len(clonesA) == 1 + ), "Number of clones found in trackA doesn't match" + assert (len(nonClonesA) == 2 + ), "Number of non-clones found in trackA doesn't match" + assert (len(clonesB) == 1 + ), "Number of clones found in trackB doesn't match" + assert (len(nonClonesB) == 2 + ), "Number of non-clones found in trackB doesn't match" + + def test_sort_clones_clones_in_one(self): + # SETUP + clipA = otio.schema.Clip( + name="clipA testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipB = otio.schema.Clip( + name="clipB testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipC = otio.schema.Clip( + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(20, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipCClone = otio.schema.Clip( + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(30, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipD = otio.schema.Clip( + name="clipD testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(40, 24), + otio.opentime.RationalTime(10, 24))), + ) + trackA = otio.schema.Track() + + trackA.extend([clipA, clipB, clipC, clipCClone, clipD]) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + clipDataC = ClipData(clipC, 1) + clipDataCClone = ClipData(clipCClone, 1) + clipDataD = ClipData(clipD, 1) + + clipDatasA = [clipDataA, clipDataB, clipDataC, clipDataCClone] + clipDatasB = [clipDataA, clipDataB, clipDataD] + + # EXERCISE + sortedClonesA, sortedClonesB = getDiff.sortClones(clipDatasA, clipDatasB) + + # VERIFY + clonesA, nonClonesA = sortedClonesA + clonesB, nonClonesB = sortedClonesB + + assert (len(clonesA) == 1 + ), "Number of clones found in trackA doesn't match" + assert (len(nonClonesA) == 2 + ), "Number of non-clones found in trackA doesn't match" + assert (len(clonesB) == 0 + ), "Number of clones found in trackB doesn't match" + assert (len(nonClonesB) == 3 + ), "Number of non-clones found in trackB doesn't match" + + def test_sort_clones_clones_in_one_single_in_other(self): + # SETUP + clipA = otio.schema.Clip( + name="clipA testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipB = otio.schema.Clip( + name="clipB testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipC = otio.schema.Clip( + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(20, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipCClone = otio.schema.Clip( + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(30, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipD = otio.schema.Clip( + name="clipD testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(40, 24), + otio.opentime.RationalTime(10, 24))), + ) + trackA = otio.schema.Track() + + trackA.extend([clipA, clipB, clipC, clipCClone, clipD]) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + clipDataC = ClipData(clipC, 1) + clipDataCClone = ClipData(clipCClone, 1) + clipDataD = ClipData(clipD, 1) + + clipDatasA = [clipDataA, clipDataB, clipDataC, clipDataCClone] + clipDatasB = [clipDataB, clipDataC, clipDataD] + + # EXERCISE + sortedClonesA, sortedClonesB = getDiff.sortClones(clipDatasA, clipDatasB) + + # VERIFY + clonesA, nonClonesA = sortedClonesA + clonesB, nonClonesB = sortedClonesB + + assert (len(clonesA) == 1 + ), "Number of clones found in trackA doesn't match" + assert (len(nonClonesA) == 2 + ), "Number of non-clones found in trackA doesn't match" + assert (len(clonesB) == 1 + ), "Number of clones found in trackB doesn't match" + assert (len(nonClonesB) == 2 + ), "Number of non-clones found in trackB doesn't match" + + +class TestMakeOtio(unittest.TestCase): + # TODO: test sort clips + + # TODO: test make track + pass + + +if __name__ == '__main__': + unittest.main()