1
+ package com .conveyal .datatools .manager .jobs .feedmerge ;
2
+
3
+ import com .conveyal .datatools .manager .jobs .MergeFeedsJob ;
4
+ import com .conveyal .gtfs .error .NewGTFSError ;
5
+ import com .conveyal .gtfs .loader .Table ;
6
+ import org .slf4j .Logger ;
7
+ import org .slf4j .LoggerFactory ;
8
+
9
+ import java .io .IOException ;
10
+ import java .util .Set ;
11
+ import java .util .zip .ZipOutputStream ;
12
+
13
+ import static com .conveyal .datatools .manager .jobs .feedmerge .MergeFeedsType .SERVICE_PERIOD ;
14
+ import static com .conveyal .datatools .manager .utils .MergeFeedUtils .getTableScopedValue ;
15
+ import static com .conveyal .datatools .manager .utils .MergeFeedUtils .hasDuplicateError ;
16
+
17
+ public class CalendarAttributesMergeLineContext extends MergeLineContext {
18
+ private static final Logger LOG = LoggerFactory .getLogger (CalendarAttributesMergeLineContext .class );
19
+
20
+ public CalendarAttributesMergeLineContext (MergeFeedsJob job , Table table , ZipOutputStream out ) throws IOException {
21
+ super (job , table , out );
22
+ }
23
+
24
+ @ Override
25
+ public boolean checkFieldsForMergeConflicts (Set <NewGTFSError > idErrors , FieldContext fieldContext ) {
26
+ return checkCalendarIds (idErrors , fieldContext );
27
+ }
28
+
29
+ @ Override
30
+ public void afterRowWrite () throws IOException {
31
+ // If the current row is for a calendar service_id that is marked for cloning/renaming, clone the
32
+ // values, change the ID, extend the start/end dates to the feed's full range, and write the
33
+ // additional line to the file.
34
+ addClonedServiceId ();
35
+ }
36
+
37
+ private boolean checkCalendarIds (Set <NewGTFSError > idErrors , FieldContext fieldContext ) {
38
+ boolean shouldSkipRecord = false ;
39
+
40
+ // If any service_id in the active feed matches with the future
41
+ // feed, it should be modified and all associated trip records
42
+ // must also be changed with the modified service_id.
43
+ // TODO How can we check that calendar_dates entries are
44
+ // duplicates? I think we would need to consider the
45
+ // service_id:exception_type:date as the unique key and include any
46
+ // all entries as long as they are unique on this key.
47
+ if (isHandlingActiveFeed () && hasDuplicateError (idErrors )) {
48
+ // Modify service_id and ensure that referencing trips
49
+ // have service_id updated.
50
+ updateAndRemapOutput (fieldContext );
51
+ }
52
+
53
+ // Skip record (based on remapped id if necessary) if it was skipped in the calendar table.
54
+ String keyInCalendarTable = getTableScopedValue (Table .CALENDAR , getIdScope (), keyValue );
55
+ if (mergeFeedsResult .skippedIds .contains (keyInCalendarTable )) {
56
+ LOG .warn (
57
+ "Skipping calendar entry {} because it was skipped in the merged calendar table." ,
58
+ keyValue );
59
+ shouldSkipRecord = true ;
60
+ }
61
+
62
+ return !shouldSkipRecord ;
63
+ }
64
+
65
+ /**
66
+ * Adds a cloned service id for trips with the same signature in both the active & future feeds.
67
+ * The cloned service id spans from the start date in the active feed until the end date in the future feed.
68
+ * @throws IOException
69
+ */
70
+ public void addClonedServiceId () throws IOException {
71
+ if (isHandlingFutureFeed () && job .mergeType .equals (SERVICE_PERIOD )) {
72
+ String originalServiceId = keyValue ;
73
+ if (job .serviceIdsToCloneRenameAndExtend .contains (originalServiceId )) {
74
+ String [] clonedValues = getOriginalRowValues ().clone ();
75
+ String newServiceId = clonedValues [keyFieldIndex ] = String .join (":" , getIdScope (), originalServiceId );
76
+
77
+ referenceTracker .checkReferencesAndUniqueness (
78
+ keyValue ,
79
+ getLineNumber (),
80
+ table .fields [0 ],
81
+ newServiceId ,
82
+ table ,
83
+ keyField ,
84
+ table .getOrderFieldName ()
85
+ );
86
+ writeValuesToTable (clonedValues , true );
87
+ }
88
+ }
89
+ }
90
+ }
0 commit comments