136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
|
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
|
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
|
}
/*
** Text of boundary markers for merge conflicts.
*/
static const char *const mergeMarker[] = {
/*123456789 123456789 123456789 123456789 123456789 123456789 123456789*/
"<<<<<<< BEGIN MERGE CONFLICT: local copy shown first <<<<<<<<<<<<<<<\n",
"||||||| COMMON ANCESTOR content follows ||||||||||||||||||||||||||||\n",
"======= MERGED IN content follows ==================================\n",
">>>>>>> END MERGE CONFLICT >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"
"<<<<<<< BEGIN MERGE CONFLICT: local copy shown first <<<<<<<<<<<<<<<",
"||||||| COMMON ANCESTOR content follows ||||||||||||||||||||||||||||",
"======= MERGED IN content follows ==================================",
">>>>>>> END MERGE CONFLICT >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
};
/*
** Return true if the input blob contains any CR/LF pairs on the first
** ten lines. This should be enough to detect files that use mainly CR/LF
** line endings without causing a performance impact for LF only files.
*/
int contains_crlf(Blob *p){
int i;
int j = 0;
const int maxL = 10; /* Max lines to check */
const char *z = blob_buffer(p);
int n = blob_size(p)+1;
for(i=1; i<n; ){
if( z[i-1]=='\r' && z[i]=='\n' ) return 1;
while( i<n && z[i]!='\n' ){ i++; }
j++;
if( j>maxL ) return 0;
}
return 0;
}
/*
** Ensure that the text in pBlob ends with a new line.
** If useCrLf is true adds "\r\n" otherwise '\n'.
*/
void ensure_line_end(Blob *pBlob, int useCrLf){
if( pBlob->nUsed<=0 ) return;
if( pBlob->aData[pBlob->nUsed-1]!='\n' ){
if( useCrLf ) blob_append_char(pBlob, '\r');
blob_append_char(pBlob, '\n');
}
}
/*
** Do a three-way merge. Initialize pOut to contain the result.
**
** The merge is an edit against pV2. Both pV1 and pV2 have a
** common origin at pPivot. Apply the changes of pPivot ==> pV1
** to pV2.
|
164
165
166
167
168
169
170
171
172
173
174
175
176
177
|
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
|
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
|
int *aC2; /* Changes from pPivot to pV2 */
int i1, i2; /* Index into aC1[] and aC2[] */
int nCpy, nDel, nIns; /* Number of lines to copy, delete, or insert */
int limit1, limit2; /* Sizes of aC1[] and aC2[] */
int nConflict = 0; /* Number of merge conflicts seen so far */
blob_zero(pOut); /* Merge results stored in pOut */
/* If both pV1 and pV2 start with a UTF-8 byte-order-mark (BOM),
** keep it in the output. This should be secure enough not to cause
** unintended changes to the merged file and consistent with what
** users are using in their source files.
*/
if( starts_with_utf8_bom(pV1, 0) && starts_with_utf8_bom(pV2, 0) ){
blob_append(pOut, (char*)get_utf8_bom(0), -1);
}
/* Check once to see if both pV1 and pV2 contains CR/LF endings.
** If true, CR/LF pair will be used later to append the
** boundary markers for merge conflicts.
*/
int useCrLf = 0;
if( contains_crlf(pV1) && contains_crlf(pV2) ){
useCrLf = 1;
}
/* Compute the edits that occur from pPivot => pV1 (into aC1)
** and pPivot => pV2 (into aC2). Each of the aC1 and aC2 arrays is
** an array of integer triples. Within each triple, the first integer
** is the number of lines of text to copy directly from the pivot,
** the second integer is the number of lines of text to omit from the
** pivot, and the third integer is the number of lines of text that are
|
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
|
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
|
+
+
+
+
+
+
+
|
int sz = 1; /* Size of the conflict in lines */
nConflict++;
while( !ends_at_CPY(&aC1[i1], sz) || !ends_at_CPY(&aC2[i2], sz) ){
sz++;
}
DEBUG( printf("CONFLICT %d\n", sz); )
blob_append(pOut, mergeMarker[0], -1);
ensure_line_end(pOut, useCrLf);
i1 = output_one_side(pOut, pV1, aC1, i1, sz);
ensure_line_end(pOut, useCrLf);
blob_append(pOut, mergeMarker[1], -1);
ensure_line_end(pOut, useCrLf);
blob_copy_lines(pOut, pPivot, sz);
ensure_line_end(pOut, useCrLf);
blob_append(pOut, mergeMarker[2], -1);
ensure_line_end(pOut, useCrLf);
i2 = output_one_side(pOut, pV2, aC2, i2, sz);
ensure_line_end(pOut, useCrLf);
blob_append(pOut, mergeMarker[3], -1);
ensure_line_end(pOut, useCrLf);
}
/* If we are finished with an edit triple, advance to the next
** triple.
*/
if( i1<limit1 && aC1[i1]==0 && aC1[i1+1]==0 && aC1[i1+2]==0 ) i1+=3;
if( i2<limit2 && aC2[i2]==0 && aC2[i2+1]==0 && aC2[i2+2]==0 ) i2+=3;
|
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
|
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
|
-
+
+
-
+
|
int n = blob_size(p) - len + 1;
assert( len==(int)strlen(mergeMarker[1]) );
assert( len==(int)strlen(mergeMarker[2]) );
assert( len==(int)strlen(mergeMarker[3]) );
assert( count(mergeMarker)==4 );
for(i=0; i<n; ){
for(j=0; j<4; j++){
if( memcmp(&z[i], mergeMarker[j], len)==0 ) return 1;
if( (memcmp(&z[i], mergeMarker[j], len)==0)
&& (i+1==n || z[i+len]=='\n' || z[i+len]=='\r') ) return 1;
}
while( i<n && z[i]!='\n' ){ i++; }
while( i<n && z[i]=='\n' ){ i++; }
while( i<n && (z[i]=='\n' || z[i]=='\r') ){ i++; }
}
return 0;
}
/*
** Return true if the named file contains an unresolved merge marker line.
*/
|