Bug Summary

File:src/repo_write.c
Warning:line 1604, column 3
Value stored to 'keyused' is never read

Annotated Source Code

1/*
2 * Copyright (c) 2007-2011, Novell Inc.
3 *
4 * This program is licensed under the BSD license, read LICENSE.BSD
5 * for further information
6 */
7
8/*
9 * repo_write.c
10 *
11 * Write Repo data out to a file in solv format
12 *
13 * See doc/README.format for a description
14 * of the binary file format
15 *
16 */
17
18#include <sys/types.h>
19#include <limits.h>
20#include <fcntl.h>
21#include <stdio.h>
22#include <stdlib.h>
23#include <string.h>
24#include <assert.h>
25#include <errno(*__errno_location ()).h>
26
27#include "pool.h"
28#include "util.h"
29#include "repo_write.h"
30#include "repopage.h"
31
32/*------------------------------------------------------------------*/
33/* Id map optimizations */
34
35typedef struct needid {
36 Id need;
37 Id map;
38} NeedId;
39
40
41#define RELOFF(id)(needid[0].map + ((id) ^ 0x80000000)) (needid[0].map + GETRELID(id)((id) ^ 0x80000000))
42
43/*
44 * increment need Id
45 * idarray: array of Ids, ID_NULL terminated
46 * needid: array of Id->NeedId
47 *
48 * return size of array (including trailing zero)
49 *
50 */
51
52static void
53incneedid(Pool *pool, Id id, NeedId *needid)
54{
55 while (ISRELDEP(id)(((id) & 0x80000000) != 0))
56 {
57 Reldep *rd = GETRELDEP(pool, id)((pool)->rels + ((id) ^ 0x80000000));
58 needid[RELOFF(id)(needid[0].map + ((id) ^ 0x80000000))].need++;
59 if (ISRELDEP(rd->evr)(((rd->evr) & 0x80000000) != 0))
60 incneedid(pool, rd->evr, needid);
61 else
62 needid[rd->evr].need++;
63 id = rd->name;
64 }
65 needid[id].need++;
66}
67
68static int
69incneedidarray(Pool *pool, Id *idarray, NeedId *needid)
70{
71 Id id;
72 int n = 0;
73
74 if (!idarray)
75 return 0;
76 while ((id = *idarray++) != 0)
77 {
78 n++;
79 while (ISRELDEP(id)(((id) & 0x80000000) != 0))
80 {
81 Reldep *rd = GETRELDEP(pool, id)((pool)->rels + ((id) ^ 0x80000000));
82 needid[RELOFF(id)(needid[0].map + ((id) ^ 0x80000000))].need++;
83 if (ISRELDEP(rd->evr)(((rd->evr) & 0x80000000) != 0))
84 incneedid(pool, rd->evr, needid);
85 else
86 needid[rd->evr].need++;
87 id = rd->name;
88 }
89 needid[id].need++;
90 }
91 return n + 1;
92}
93
94
95/*
96 *
97 */
98
99static int
100needid_cmp_need(const void *ap, const void *bp, void *dp)
101{
102 const NeedId *a = ap;
103 const NeedId *b = bp;
104 int r;
105 r = b->need - a->need;
106 if (r)
107 return r;
108 return a->map - b->map;
109}
110
111static int
112needid_cmp_need_s(const void *ap, const void *bp, void *dp)
113{
114 const NeedId *a = ap;
115 const NeedId *b = bp;
116 Stringpool *spool = dp;
117 const char *as;
118 const char *bs;
119
120 int r;
121 r = b->need - a->need;
122 if (r)
123 return r;
124 as = spool->stringspace + spool->strings[a->map];
125 bs = spool->stringspace + spool->strings[b->map];
126 return strcmp(as, bs);
127}
128
129
130/*------------------------------------------------------------------*/
131/* output helper routines, used for writing the header */
132/* (the data itself is accumulated in memory and written with
133 * write_blob) */
134
135/*
136 * unsigned 32-bit
137 */
138
139static void
140write_u32(Repodata *data, unsigned int x)
141{
142 FILE *fp = data->fp;
143 if (data->error)
144 return;
145 if (putc(x >> 24, fp)_IO_putc (x >> 24, fp) == EOF(-1) ||
146 putc(x >> 16, fp)_IO_putc (x >> 16, fp) == EOF(-1) ||
147 putc(x >> 8, fp)_IO_putc (x >> 8, fp) == EOF(-1) ||
148 putc(x, fp)_IO_putc (x, fp) == EOF(-1))
149 {
150 data->error = pool_error(data->repo->pool, -1, "write error u32: %s", strerror(errno(*__errno_location ())));
151 }
152}
153
154
155/*
156 * unsigned 8-bit
157 */
158
159static void
160write_u8(Repodata *data, unsigned int x)
161{
162 if (data->error)
163 return;
164 if (putc(x, data->fp)_IO_putc (x, data->fp) == EOF(-1))
165 {
166 data->error = pool_error(data->repo->pool, -1, "write error u8: %s", strerror(errno(*__errno_location ())));
167 }
168}
169
170/*
171 * data blob
172 */
173
174static void
175write_blob(Repodata *data, void *blob, int len)
176{
177 if (data->error)
178 return;
179 if (len && fwrite(blob, len, 1, data->fp) != 1)
180 {
181 data->error = pool_error(data->repo->pool, -1, "write error blob: %s", strerror(errno(*__errno_location ())));
182 }
183}
184
185/*
186 * Id
187 */
188
189static void
190write_id(Repodata *data, Id x)
191{
192 FILE *fp = data->fp;
193 if (data->error)
194 return;
195 if (x >= (1 << 14))
196 {
197 if (x >= (1 << 28))
198 putc((x >> 28) | 128, fp)_IO_putc ((x >> 28) | 128, fp);
199 if (x >= (1 << 21))
200 putc((x >> 21) | 128, fp)_IO_putc ((x >> 21) | 128, fp);
201 putc((x >> 14) | 128, fp)_IO_putc ((x >> 14) | 128, fp);
202 }
203 if (x >= (1 << 7))
204 putc((x >> 7) | 128, fp)_IO_putc ((x >> 7) | 128, fp);
205 if (putc(x & 127, fp)_IO_putc (x & 127, fp) == EOF(-1))
206 {
207 data->error = pool_error(data->repo->pool, -1, "write error id: %s", strerror(errno(*__errno_location ())));
208 }
209}
210
211static inline void
212write_id_eof(Repodata *data, Id x, int eof)
213{
214 if (x >= 64)
215 x = (x & 63) | ((x & ~63) << 1);
216 write_id(data, x | (eof ? 0 : 64));
217}
218
219
220
221static inline void
222write_str(Repodata *data, const char *str)
223{
224 if (data->error)
225 return;
226 if (fputs(str, data->fp) == EOF(-1) || putc(0, data->fp)_IO_putc (0, data->fp) == EOF(-1))
227 {
228 data->error = pool_error(data->repo->pool, -1, "write error str: %s", strerror(errno(*__errno_location ())));
229 }
230}
231
232/*
233 * Array of Ids
234 */
235
236static void
237write_idarray(Repodata *data, Pool *pool, NeedId *needid, Id *ids)
238{
239 Id id;
240 if (!ids)
241 return;
242 if (!*ids)
243 {
244 write_u8(data, 0);
245 return;
246 }
247 for (;;)
248 {
249 id = *ids++;
250 if (needid)
251 id = needid[ISRELDEP(id)(((id) & 0x80000000) != 0) ? RELOFF(id)(needid[0].map + ((id) ^ 0x80000000)) : id].need;
252 if (id >= 64)
253 id = (id & 63) | ((id & ~63) << 1);
254 if (!*ids)
255 {
256 write_id(data, id);
257 return;
258 }
259 write_id(data, id | 64);
260 }
261}
262
263static int
264cmp_ids(const void *pa, const void *pb, void *dp)
265{
266 Id a = *(Id *)pa;
267 Id b = *(Id *)pb;
268 return a - b;
269}
270
271#if 0
272static void
273write_idarray_sort(Repodata *data, Pool *pool, NeedId *needid, Id *ids, Id marker)
274{
275 int len, i;
276 Id lids[64], *sids;
277
278 if (!ids)
279 return;
280 if (!*ids)
281 {
282 write_u8(data, 0);
283 return;
284 }
285 for (len = 0; len < 64 && ids[len]; len++)
286 {
287 Id id = ids[len];
288 if (needid)
289 id = needid[ISRELDEP(id)(((id) & 0x80000000) != 0) ? RELOFF(id)(needid[0].map + ((id) ^ 0x80000000)) : id].need;
290 lids[len] = id;
291 }
292 if (ids[len])
293 {
294 for (i = len + 1; ids[i]; i++)
295 ;
296 sids = solv_malloc2(i, sizeof(Id));
297 memcpy(sids, lids, 64 * sizeof(Id));
298 for (; ids[len]; len++)
299 {
300 Id id = ids[len];
301 if (needid)
302 id = needid[ISRELDEP(id)(((id) & 0x80000000) != 0) ? RELOFF(id)(needid[0].map + ((id) ^ 0x80000000)) : id].need;
303 sids[len] = id;
304 }
305 }
306 else
307 sids = lids;
308
309 /* That bloody solvable:prereqmarker needs to stay in position :-( */
310 if (needid)
311 marker = needid[marker].need;
312 for (i = 0; i < len; i++)
313 if (sids[i] == marker)
314 break;
315 if (i > 1)
316 solv_sort(sids, i, sizeof(Id), cmp_ids, 0);
317 if ((len - i) > 2)
318 solv_sort(sids + i + 1, len - i - 1, sizeof(Id), cmp_ids, 0);
319
320 Id id, old = 0;
321
322 /* The differencing above produces many runs of ones and twos. I tried
323 fairly elaborate schemes to RLE those, but they give only very mediocre
324 improvements in compression, as coding the escapes costs quite some
325 space. Even if they are coded only as bits in IDs. The best improvement
326 was about 2.7% for the whole .solv file. It's probably better to
327 invest some complexity into sharing idarrays, than RLEing. */
328 for (i = 0; i < len - 1; i++)
329 {
330 id = sids[i];
331 /* Ugly PREREQ handling. A "difference" of 0 is the prereq marker,
332 hence all real differences are offsetted by 1. Otherwise we would
333 have to handle negative differences, which would cost code space for
334 the encoding of the sign. We loose the exact mapping of prereq here,
335 but we know the result, so we can recover from that in the reader. */
336 if (id == marker)
337 id = old = 0;
338 else
339 {
340 id = id - old + 1;
341 old = sids[i];
342 }
343 /* XXX If difference is zero we have multiple equal elements,
344 we might want to skip writing them out. */
345 if (id >= 64)
346 id = (id & 63) | ((id & ~63) << 1);
347 write_id(data, id | 64);
348 }
349 id = sids[i];
350 if (id == marker)
351 id = 0;
352 else
353 id = id - old + 1;
354 if (id >= 64)
355 id = (id & 63) | ((id & ~63) << 1);
356 write_id(data, id);
357 if (sids != lids)
358 solv_free(sids);
359}
360#endif
361
362
363struct extdata {
364 unsigned char *buf;
365 int len;
366};
367
368struct cbdata {
369 Repo *repo;
370 Repodata *target;
371
372 Stringpool *ownspool;
373 Dirpool *owndirpool;
374
375 Id *keymap;
376 int nkeymap;
377 Id *keymapstart;
378
379 NeedId *needid;
380
381 Id *schema; /* schema construction space */
382 Id *sp; /* pointer in above */
383 Id *oldschema, *oldsp;
384
385 Id *solvschemata;
386 Id *subschemata;
387 int nsubschemata;
388 int current_sub;
389
390 struct extdata *extdata;
391
392 Id *dirused;
393
394 Id vstart;
395
396 Id maxdata;
397 Id lastlen;
398
399 int doingsolvables; /* working on solvables data */
400 int filelistmode;
401};
402
403#define NEEDED_BLOCK1023 1023
404#define SCHEMATA_BLOCK31 31
405#define SCHEMATADATA_BLOCK255 255
406#define EXTDATA_BLOCK4095 4095
407
408static inline void
409data_addid(struct extdata *xd, Id sx)
410{
411 unsigned int x = (unsigned int)sx;
412 unsigned char *dp;
413
414 xd->buf = solv_extend(xd->buf, xd->len, 5, 1, EXTDATA_BLOCK4095);
415 dp = xd->buf + xd->len;
416
417 if (x >= (1 << 14))
418 {
419 if (x >= (1 << 28))
420 *dp++ = (x >> 28) | 128;
421 if (x >= (1 << 21))
422 *dp++ = (x >> 21) | 128;
423 *dp++ = (x >> 14) | 128;
424 }
425 if (x >= (1 << 7))
426 *dp++ = (x >> 7) | 128;
427 *dp++ = x & 127;
428 xd->len = dp - xd->buf;
429}
430
431static inline void
432data_addideof(struct extdata *xd, Id sx, int eof)
433{
434 unsigned int x = (unsigned int)sx;
435 unsigned char *dp;
436
437 xd->buf = solv_extend(xd->buf, xd->len, 5, 1, EXTDATA_BLOCK4095);
438 dp = xd->buf + xd->len;
439
440 if (x >= (1 << 13))
441 {
442 if (x >= (1 << 27))
443 *dp++ = (x >> 27) | 128;
444 if (x >= (1 << 20))
445 *dp++ = (x >> 20) | 128;
446 *dp++ = (x >> 13) | 128;
447 }
448 if (x >= (1 << 6))
449 *dp++ = (x >> 6) | 128;
450 *dp++ = eof ? (x & 63) : (x & 63) | 64;
451 xd->len = dp - xd->buf;
452}
453
454static inline int
455data_addideof_len(Id sx)
456{
457 unsigned int x = (unsigned int)sx;
458 if (x >= (1 << 13))
459 {
460 if (x >= (1 << 27))
461 return 5;
462 return x >= (1 << 20) ? 4 : 3;
463 }
464 return x >= (1 << 6) ? 2 : 1;
465}
466
467static void
468data_addid64(struct extdata *xd, unsigned int x, unsigned int hx)
469{
470 if (hx)
471 {
472 if (hx > 7)
473 {
474 data_addid(xd, (Id)(hx >> 3));
475 xd->buf[xd->len - 1] |= 128;
476 hx &= 7;
477 }
478 data_addid(xd, (Id)(x | 0x80000000));
479 xd->buf[xd->len - 5] = (x >> 28) | (hx << 4) | 128;
480 }
481 else
482 data_addid(xd, (Id)x);
483}
484
485static void
486data_addidarray_sort(struct extdata *xd, Pool *pool, NeedId *needid, Id *ids, Id marker)
487{
488 int len, i;
489 Id lids[64], *sids;
490 Id id, old;
491
492 if (!ids)
493 return;
494 if (!*ids)
495 {
496 data_addid(xd, 0);
497 return;
498 }
499 for (len = 0; len < 64 && ids[len]; len++)
500 {
501 Id id = ids[len];
502 if (needid)
503 id = needid[ISRELDEP(id)(((id) & 0x80000000) != 0) ? RELOFF(id)(needid[0].map + ((id) ^ 0x80000000)) : id].need;
504 lids[len] = id;
505 }
506 if (ids[len])
507 {
508 for (i = len + 1; ids[i]; i++)
509 ;
510 sids = solv_malloc2(i, sizeof(Id));
511 memcpy(sids, lids, 64 * sizeof(Id));
512 for (; ids[len]; len++)
513 {
514 Id id = ids[len];
515 if (needid)
516 id = needid[ISRELDEP(id)(((id) & 0x80000000) != 0) ? RELOFF(id)(needid[0].map + ((id) ^ 0x80000000)) : id].need;
517 sids[len] = id;
518 }
519 }
520 else
521 sids = lids;
522
523 /* That bloody solvable:prereqmarker needs to stay in position :-( */
524 if (needid)
525 marker = needid[marker].need;
526 for (i = 0; i < len; i++)
527 if (sids[i] == marker)
528 break;
529 if (i > 1)
530 solv_sort(sids, i, sizeof(Id), cmp_ids, 0);
531 if ((len - i) > 2)
532 solv_sort(sids + i + 1, len - i - 1, sizeof(Id), cmp_ids, 0);
533
534 old = 0;
535
536 /* The differencing above produces many runs of ones and twos. I tried
537 fairly elaborate schemes to RLE those, but they give only very mediocre
538 improvements in compression, as coding the escapes costs quite some
539 space. Even if they are coded only as bits in IDs. The best improvement
540 was about 2.7% for the whole .solv file. It's probably better to
541 invest some complexity into sharing idarrays, than RLEing. */
542 for (i = 0; i < len - 1; i++)
543 {
544 id = sids[i];
545 /* Ugly PREREQ handling. A "difference" of 0 is the prereq marker,
546 hence all real differences are offsetted by 1. Otherwise we would
547 have to handle negative differences, which would cost code space for
548 the encoding of the sign. We loose the exact mapping of prereq here,
549 but we know the result, so we can recover from that in the reader. */
550 if (id == marker)
551 id = old = 0;
552 else
553 {
554 id = id - old + 1;
555 old = sids[i];
556 }
557 /* XXX If difference is zero we have multiple equal elements,
558 we might want to skip writing them out. */
559 data_addideof(xd, id, 0);
560 }
561 id = sids[i];
562 if (id == marker)
563 id = 0;
564 else
565 id = id - old + 1;
566 data_addideof(xd, id, 1);
567 if (sids != lids)
568 solv_free(sids);
569}
570
571static inline void
572data_addblob(struct extdata *xd, unsigned char *blob, int len)
573{
574 xd->buf = solv_extend(xd->buf, xd->len, len, 1, EXTDATA_BLOCK4095);
575 memcpy(xd->buf + xd->len, blob, len);
576 xd->len += len;
577}
578
579static inline void
580data_addu32(struct extdata *xd, unsigned int num)
581{
582 unsigned char d[4];
583 d[0] = num >> 24;
584 d[1] = num >> 16;
585 d[2] = num >> 8;
586 d[3] = num;
587 data_addblob(xd, d, 4);
588}
589
590static Id
591putinownpool(struct cbdata *cbdata, Stringpool *ss, Id id)
592{
593 const char *str = stringpool_id2str(ss, id);
594 id = stringpool_str2id(cbdata->ownspool, str, 1);
595 if (id >= cbdata->needid[0].map)
596 {
597 int oldoff = cbdata->needid[0].map;
598 int newoff = (id + 1 + NEEDED_BLOCK1023) & ~NEEDED_BLOCK1023;
599 int nrels = cbdata->repo->pool->nrels;
600 cbdata->needid = solv_realloc2(cbdata->needid, newoff + nrels, sizeof(NeedId));
601 if (nrels)
602 memmove(cbdata->needid + newoff, cbdata->needid + oldoff, nrels * sizeof(NeedId));
603 memset(cbdata->needid + oldoff, 0, (newoff - oldoff) * sizeof(NeedId));
604 cbdata->needid[0].map = newoff;
605 }
606 return id;
607}
608
609static Id
610putinowndirpool(struct cbdata *cbdata, Repodata *data, Dirpool *dp, Id dir)
611{
612 Id compid, parent;
613
614 parent = dirpool_parent(dp, dir);
615 if (parent)
616 parent = putinowndirpool(cbdata, data, dp, parent);
617 compid = dp->dirs[dir];
618 if (cbdata->ownspool && compid > 1)
619 compid = putinownpool(cbdata, data->localpool ? &data->spool : &data->repo->pool->ss, compid);
620 return dirpool_add_dir(cbdata->owndirpool, parent, compid, 1);
621}
622
623/*
624 * collect usage information about the dirs
625 * 1: dir used, no child of dir used
626 * 2: dir used as parent of another used dir
627 */
628static inline void
629setdirused(struct cbdata *cbdata, Dirpool *dp, Id dir)
630{
631 if (cbdata->dirused[dir])
632 return;
633 cbdata->dirused[dir] = 1;
634 while ((dir = dirpool_parent(dp, dir)) != 0)
635 {
636 if (cbdata->dirused[dir] == 2)
637 return;
638 if (cbdata->dirused[dir])
639 {
640 cbdata->dirused[dir] = 2;
641 return;
642 }
643 cbdata->dirused[dir] = 2;
644 }
645 cbdata->dirused[0] = 2;
646}
647
648/*
649 * pass 1 callback:
650 * collect key/id/dirid usage information, create needed schemas
651 */
652static int
653repo_write_collect_needed(struct cbdata *cbdata, Repo *repo, Repodata *data, Repokey *key, KeyValue *kv)
654{
655 Id id;
656 int rm;
657
658 if (key->name == REPOSITORY_SOLVABLES)
659 return SEARCH_NEXT_KEY1; /* we do not want this one */
660
661 /* hack: ignore some keys, see BUGS */
662 if (data->repodataid != data->repo->nrepodata - 1)
663 if (key->name == REPOSITORY_ADDEDFILEPROVIDES || key->name == REPOSITORY_EXTERNAL || key->name == REPOSITORY_LOCATION || key->name == REPOSITORY_KEYS || key->name == REPOSITORY_TOOLVERSION)
664 return SEARCH_NEXT_KEY1;
665
666 rm = cbdata->keymap[cbdata->keymapstart[data->repodataid] + (key - data->keys)];
667 if (!rm)
668 return SEARCH_NEXT_KEY1; /* we do not want this one */
669
670 /* record key in schema */
671 if ((key->type != REPOKEY_TYPE_FIXARRAY || kv->eof == 0)
672 && (cbdata->sp == cbdata->schema || cbdata->sp[-1] != rm))
673 *cbdata->sp++ = rm;
674
675 switch(key->type)
676 {
677 case REPOKEY_TYPE_ID:
678 case REPOKEY_TYPE_IDARRAY:
679 id = kv->id;
680 if (!ISRELDEP(id)(((id) & 0x80000000) != 0) && cbdata->ownspool && id > 1)
681 id = putinownpool(cbdata, data->localpool ? &data->spool : &repo->pool->ss, id);
682 incneedid(repo->pool, id, cbdata->needid);
683 break;
684 case REPOKEY_TYPE_DIR:
685 case REPOKEY_TYPE_DIRNUMNUMARRAY:
686 case REPOKEY_TYPE_DIRSTRARRAY:
687 id = kv->id;
688 if (cbdata->owndirpool)
689 putinowndirpool(cbdata, data, &data->dirpool, id);
690 else
691 setdirused(cbdata, &data->dirpool, id);
692 break;
693 case REPOKEY_TYPE_FIXARRAY:
694 if (kv->eof == 0)
695 {
696 if (cbdata->oldschema)
697 {
698 cbdata->target->error = pool_error(cbdata->repo->pool, -1, "nested fixarray structs not yet implemented");
699 return SEARCH_NEXT_KEY1;
700 }
701 cbdata->oldschema = cbdata->schema;
702 cbdata->oldsp = cbdata->sp;
703 cbdata->schema = solv_calloc(cbdata->target->nkeys, sizeof(Id));
704 cbdata->sp = cbdata->schema;
705 }
706 else if (kv->eof == 1)
707 {
708 cbdata->current_sub++;
709 *cbdata->sp = 0;
710 cbdata->subschemata = solv_extend(cbdata->subschemata, cbdata->nsubschemata, 1, sizeof(Id), SCHEMATA_BLOCK31);
711 cbdata->subschemata[cbdata->nsubschemata++] = repodata_schema2id(cbdata->target, cbdata->schema, 1);
712#if 0
713 fprintf(stderrstderr, "Have schema %d\n", cbdata->subschemata[cbdata->nsubschemata-1]);
714#endif
715 cbdata->sp = cbdata->schema;
716 }
717 else
718 {
719 solv_free(cbdata->schema);
720 cbdata->schema = cbdata->oldschema;
721 cbdata->sp = cbdata->oldsp;
722 cbdata->oldsp = cbdata->oldschema = 0;
723 }
724 break;
725 case REPOKEY_TYPE_FLEXARRAY:
726 if (kv->entry == 0)
727 {
728 if (kv->eof != 2)
729 *cbdata->sp++ = 0; /* mark start */
730 }
731 else
732 {
733 /* just finished a schema, rewind */
734 Id *sp = cbdata->sp - 1;
735 *sp = 0;
736 while (sp[-1])
737 sp--;
738 cbdata->subschemata = solv_extend(cbdata->subschemata, cbdata->nsubschemata, 1, sizeof(Id), SCHEMATA_BLOCK31);
739 cbdata->subschemata[cbdata->nsubschemata++] = repodata_schema2id(cbdata->target, sp, 1);
740 cbdata->sp = kv->eof == 2 ? sp - 1: sp;
741 }
742 break;
743 default:
744 break;
745 }
746 return 0;
747}
748
749static int
750repo_write_cb_needed(void *vcbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv)
751{
752 struct cbdata *cbdata = vcbdata;
753 Repo *repo = data->repo;
754
755#if 0
756 if (s)
757 fprintf(stderrstderr, "solvable %d (%s): key (%d)%s %d\n", s ? s - repo->pool->solvables : 0, s ? pool_id2str(repo->pool, s->name) : "", key->name, pool_id2str(repo->pool, key->name), key->type);
758#endif
759 return repo_write_collect_needed(cbdata, repo, data, key, kv);
760}
761
762
763/*
764 * pass 2 callback:
765 * encode all of the data into the correct buffers
766 */
767
768static int
769repo_write_adddata(struct cbdata *cbdata, Repodata *data, Repokey *key, KeyValue *kv)
770{
771 int rm;
772 Id id;
773 unsigned int u32;
774 unsigned char v[4];
775 struct extdata *xd;
776 NeedId *needid;
777
778 if (key->name == REPOSITORY_SOLVABLES)
779 return SEARCH_NEXT_KEY1;
780
781 /* hack: ignore some keys, see BUGS */
782 if (data->repodataid != data->repo->nrepodata - 1)
783 if (key->name == REPOSITORY_ADDEDFILEPROVIDES || key->name == REPOSITORY_EXTERNAL || key->name == REPOSITORY_LOCATION || key->name == REPOSITORY_KEYS || key->name == REPOSITORY_TOOLVERSION)
784 return SEARCH_NEXT_KEY1;
785
786 rm = cbdata->keymap[cbdata->keymapstart[data->repodataid] + (key - data->keys)];
787 if (!rm)
788 return SEARCH_NEXT_KEY1; /* we do not want this one */
789
790 if (cbdata->target->keys[rm].storage == KEY_STORAGE_VERTICAL_OFFSET3)
791 {
792 xd = cbdata->extdata + rm; /* vertical buffer */
793 if (cbdata->vstart == -1)
794 cbdata->vstart = xd->len;
795 }
796 else
797 xd = cbdata->extdata + 0; /* incore buffer */
798 switch(key->type)
799 {
800 case REPOKEY_TYPE_VOID:
801 case REPOKEY_TYPE_CONSTANT:
802 case REPOKEY_TYPE_CONSTANTID:
803 break;
804 case REPOKEY_TYPE_ID:
805 id = kv->id;
806 if (!ISRELDEP(id)(((id) & 0x80000000) != 0) && cbdata->ownspool && id > 1)
807 id = putinownpool(cbdata, data->localpool ? &data->spool : &data->repo->pool->ss, id);
808 needid = cbdata->needid;
809 id = needid[ISRELDEP(id)(((id) & 0x80000000) != 0) ? RELOFF(id)(needid[0].map + ((id) ^ 0x80000000)) : id].need;
810 data_addid(xd, id);
811 break;
812 case REPOKEY_TYPE_IDARRAY:
813 id = kv->id;
814 if (!ISRELDEP(id)(((id) & 0x80000000) != 0) && cbdata->ownspool && id > 1)
815 id = putinownpool(cbdata, data->localpool ? &data->spool : &data->repo->pool->ss, id);
816 needid = cbdata->needid;
817 id = needid[ISRELDEP(id)(((id) & 0x80000000) != 0) ? RELOFF(id)(needid[0].map + ((id) ^ 0x80000000)) : id].need;
818 data_addideof(xd, id, kv->eof);
819 break;
820 case REPOKEY_TYPE_STR:
821 data_addblob(xd, (unsigned char *)kv->str, strlen(kv->str) + 1);
822 break;
823 case REPOKEY_TYPE_MD5:
824 data_addblob(xd, (unsigned char *)kv->str, SIZEOF_MD516);
825 break;
826 case REPOKEY_TYPE_SHA1:
827 data_addblob(xd, (unsigned char *)kv->str, SIZEOF_SHA120);
828 break;
829 case REPOKEY_TYPE_SHA224:
830 data_addblob(xd, (unsigned char *)kv->str, SIZEOF_SHA22428);
831 break;
832 case REPOKEY_TYPE_SHA256:
833 data_addblob(xd, (unsigned char *)kv->str, SIZEOF_SHA25632);
834 break;
835 case REPOKEY_TYPE_SHA384:
836 data_addblob(xd, (unsigned char *)kv->str, SIZEOF_SHA38448);
837 break;
838 case REPOKEY_TYPE_SHA512:
839 data_addblob(xd, (unsigned char *)kv->str, SIZEOF_SHA51264);
840 break;
841 case REPOKEY_TYPE_U32:
842 u32 = kv->num;
843 v[0] = u32 >> 24;
844 v[1] = u32 >> 16;
845 v[2] = u32 >> 8;
846 v[3] = u32;
847 data_addblob(xd, v, 4);
848 break;
849 case REPOKEY_TYPE_NUM:
850 data_addid64(xd, kv->num, kv->num2);
851 break;
852 case REPOKEY_TYPE_DIR:
853 id = kv->id;
854 if (cbdata->owndirpool)
855 id = putinowndirpool(cbdata, data, &data->dirpool, id);
856 id = cbdata->dirused[id];
857 data_addid(xd, id);
858 break;
859 case REPOKEY_TYPE_BINARY:
860 data_addid(xd, kv->num);
861 if (kv->num)
862 data_addblob(xd, (unsigned char *)kv->str, kv->num);
863 break;
864 case REPOKEY_TYPE_DIRNUMNUMARRAY:
865 id = kv->id;
866 if (cbdata->owndirpool)
867 id = putinowndirpool(cbdata, data, &data->dirpool, id);
868 id = cbdata->dirused[id];
869 data_addid(xd, id);
870 data_addid(xd, kv->num);
871 data_addideof(xd, kv->num2, kv->eof);
872 break;
873 case REPOKEY_TYPE_DIRSTRARRAY:
874 id = kv->id;
875 if (cbdata->owndirpool)
876 id = putinowndirpool(cbdata, data, &data->dirpool, id);
877 id = cbdata->dirused[id];
878 if (cbdata->filelistmode > 0)
879 {
880 xd->len += data_addideof_len(id) + strlen(kv->str) + 1;
881 break;
882 }
883 data_addideof(xd, id, kv->eof);
884 data_addblob(xd, (unsigned char *)kv->str, strlen(kv->str) + 1);
885 if (cbdata->filelistmode < 0)
886 return 0;
887 break;
888 case REPOKEY_TYPE_FIXARRAY:
889 if (kv->eof == 0)
890 {
891 if (kv->num)
892 {
893 data_addid(xd, kv->num);
894 data_addid(xd, cbdata->subschemata[cbdata->current_sub]);
895#if 0
896 fprintf(stderrstderr, "writing %d %d\n", kv->num, cbdata->subschemata[cbdata->current_sub]);
897#endif
898 }
899 }
900 else if (kv->eof == 1)
901 {
902 cbdata->current_sub++;
903 }
904 break;
905 case REPOKEY_TYPE_FLEXARRAY:
906 if (!kv->entry)
907 data_addid(xd, kv->num);
908 if (kv->eof != 2)
909 data_addid(xd, cbdata->subschemata[cbdata->current_sub++]);
910 if (xd == cbdata->extdata + 0 && !kv->parent && !cbdata->doingsolvables)
911 {
912 if (xd->len - cbdata->lastlen > cbdata->maxdata)
913 cbdata->maxdata = xd->len - cbdata->lastlen;
914 cbdata->lastlen = xd->len;
915 }
916 break;
917 default:
918 cbdata->target->error = pool_error(cbdata->repo->pool, -1, "unknown type for %d: %d\n", key->name, key->type);
919 break;
920 }
921 if (cbdata->target->keys[rm].storage == KEY_STORAGE_VERTICAL_OFFSET3 && kv->eof)
922 {
923 /* we can re-use old data in the blob here! */
924 data_addid(cbdata->extdata + 0, cbdata->vstart); /* add offset into incore data */
925 data_addid(cbdata->extdata + 0, xd->len - cbdata->vstart); /* add length into incore data */
926 cbdata->vstart = -1;
927 }
928 return 0;
929}
930
931static int
932repo_write_cb_adddata(void *vcbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv)
933{
934 struct cbdata *cbdata = vcbdata;
935 return repo_write_adddata(cbdata, data, key, kv);
936}
937
938/* traverse through directory with first child "dir" */
939static int
940traverse_dirs(Dirpool *dp, Id *dirmap, Id n, Id dir, Id *used)
941{
942 Id sib, child;
943 Id parent, lastn;
944
945 parent = n;
946 /* special case for '/', which has to come first */
947 if (parent == 1)
948 dirmap[n++] = 1;
949 for (sib = dir; sib; sib = dirpool_sibling(dp, sib))
950 {
951 if (used && !used[sib])
952 continue;
953 if (sib == 1 && parent == 1)
954 continue; /* already did that one above */
955 dirmap[n++] = sib;
956 }
957
958 /* check if our block has some content */
959 if (parent == n)
960 return n - 1; /* nope, drop parent id again */
961
962 /* now go through all the siblings we just added and
963 * do recursive calls on them */
964 lastn = n;
965 for (; parent < lastn; parent++)
966 {
967 sib = dirmap[parent];
968 if (used && used[sib] != 2) /* 2: used as parent */
969 continue;
970 child = dirpool_child(dp, sib);
971 if (child)
972 {
973 dirmap[n++] = -parent; /* start new block */
974 n = traverse_dirs(dp, dirmap, n, child, used);
975 }
976 }
977 return n;
978}
979
980static void
981write_compressed_page(Repodata *data, unsigned char *page, int len)
982{
983 int clen;
984 unsigned char cpage[REPOPAGE_BLOBSIZE(1 << 15)];
985
986 clen = repopagestore_compress_page(page, len, cpage, len - 1);
987 if (!clen)
988 {
989 write_u32(data, len * 2);
990 write_blob(data, page, len);
991 }
992 else
993 {
994 write_u32(data, clen * 2 + 1);
995 write_blob(data, cpage, clen);
996 }
997}
998
999static Id verticals[] = {
1000 SOLVABLE_AUTHORS,
1001 SOLVABLE_DESCRIPTION,
1002 SOLVABLE_MESSAGEDEL,
1003 SOLVABLE_MESSAGEINS,
1004 SOLVABLE_EULA,
1005 SOLVABLE_DISKUSAGE,
1006 SOLVABLE_FILELIST,
1007 SOLVABLE_CHECKSUM,
1008 DELTA_CHECKSUM,
1009 DELTA_SEQ_NUM,
1010 SOLVABLE_PKGID,
1011 SOLVABLE_HDRID,
1012 SOLVABLE_LEADSIGID,
1013 SOLVABLE_CHANGELOG_AUTHOR,
1014 SOLVABLE_CHANGELOG_TEXT,
1015 0
1016};
1017
1018static char *languagetags[] = {
1019 "solvable:summary:",
1020 "solvable:description:",
1021 "solvable:messageins:",
1022 "solvable:messagedel:",
1023 "solvable:eula:",
1024 0
1025};
1026
1027int
1028repo_write_stdkeyfilter(Repo *repo, Repokey *key, void *kfdata)
1029{
1030 const char *keyname;
1031 int i;
1032
1033 for (i = 0; verticals[i]; i++)
1034 if (key->name == verticals[i])
1035 return KEY_STORAGE_VERTICAL_OFFSET3;
1036 keyname = pool_id2str(repo->pool, key->name);
1037 for (i = 0; languagetags[i] != 0; i++)
1038 if (!strncmp(keyname, languagetags[i], strlen(languagetags[i])))
1039 return KEY_STORAGE_VERTICAL_OFFSET3;
1040 return KEY_STORAGE_INCORE2;
1041}
1042
1043/*
1044 * return true if the repodata contains the filelist (and just
1045 * the filelist). The same code is used in the dataiterator. The way
1046 * it is used is completely wrong, of course, as having the filelist
1047 * key does not mean it is used for a specific solvable. Nevertheless
1048 * it is better to have it than to write broken solv files.
1049 */
1050static inline int
1051is_filelist_extension(Repodata *data)
1052{
1053 int j;
1054 for (j = 1; j < data->nkeys; j++)
1055 if (data->keys[j].name != REPOSITORY_SOLVABLES && data->keys[j].name != SOLVABLE_FILELIST)
1056 return 0;
1057 return 1;
1058}
1059
1060
1061static int
1062write_compressed_extdata(Repodata *target, struct extdata *xd, unsigned char *vpage, int lpage)
1063{
1064 unsigned char *dp = xd->buf;
1065 int l = xd->len;
1066 while (l)
1067 {
1068 int ll = REPOPAGE_BLOBSIZE(1 << 15) - lpage;
1069 if (l < ll)
1070 ll = l;
1071 memcpy(vpage + lpage, dp, ll);
1072 dp += ll;
1073 lpage += ll;
1074 l -= ll;
1075 if (lpage == REPOPAGE_BLOBSIZE(1 << 15))
1076 {
1077 write_compressed_page(target, vpage, lpage);
1078 lpage = 0;
1079 }
1080 }
1081 return lpage;
1082}
1083
1084/*
1085 * Repo
1086 */
1087
1088/*
1089 * the code works the following way:
1090 *
1091 * 1) find which keys should be written
1092 * 2) collect usage information for keys/ids/dirids, create schema
1093 * data
1094 * 3) use usage information to create mapping tables, so that often
1095 * used ids get a lower number
1096 * 4) encode data into buffers using the mapping tables
1097 * 5) write everything to disk
1098 */
1099int
1100repo_write_filtered(Repo *repo, FILE *fp, int (*keyfilter)(Repo *repo, Repokey *key, void *kfdata), void *kfdata, Queue *keyq)
1101{
1102 Pool *pool = repo->pool;
1103 int i, j, n, lastfilelistn;
1104 Solvable *s;
1105 NeedId *needid;
1106 int nstrings, nrels;
1107 unsigned int sizeid;
1108 unsigned int solv_flags;
1109 Reldep *ran;
1110 Id *idarraydata;
1111
1112 Id id, *sp;
1113
1114 Id *dirmap;
1115 int ndirmap;
1116 Id *keyused;
1117 unsigned char *repodataused;
1118 int anyrepodataused = 0;
1119 int anysolvableused = 0;
1120
1121 struct cbdata cbdata;
1122 int clonepool;
1123 Repokey *key;
1124 int poolusage, dirpoolusage, idused, dirused;
1125 int reloff;
1126
1127 Repodata *data, *dirpooldata;
1128
1129 Repodata target;
1130
1131 Stringpool *spool;
1132 Dirpool *dirpool;
1133
1134 Id mainschema;
1135
1136 struct extdata *xd;
1137
1138 Id type_constantid = REPOKEY_TYPE_CONSTANTID;
1139
1140
1141 memset(&cbdata, 0, sizeof(cbdata));
1142 cbdata.repo = repo;
1143 cbdata.target = &target;
1144
1145 repodata_initdata(&target, repo, 1);
1146
1147 /* go through all repodata and find the keys we need */
1148 /* also unify keys */
1149 /* keymapstart - maps repo number to keymap offset */
1150 /* keymap - maps repo key to my key, 0 -> not used */
1151
1152 /* start with all KEY_STORAGE_SOLVABLE ids */
1153
1154 n = ID_NUM_INTERNAL;
1155 FOR_REPODATAS(repo, i, data)for (i = 1, data = repo->repodata + i; i < repo->nrepodata
; i++, data++)
1156 n += data->nkeys;
1157 cbdata.keymap = solv_calloc(n, sizeof(Id));
1158 cbdata.keymapstart = solv_calloc(repo->nrepodata, sizeof(Id));
1159 repodataused = solv_calloc(repo->nrepodata, 1);
1160
1161 clonepool = 0;
1162 poolusage = 0;
1163
1164 /* add keys for STORAGE_SOLVABLE */
1165 for (i = SOLVABLE_NAME; i <= RPM_RPMDBID; i++)
1166 {
1167 Repokey keyd;
1168 keyd.name = i;
1169 if (i < SOLVABLE_PROVIDES)
1170 keyd.type = REPOKEY_TYPE_ID;
1171 else if (i < RPM_RPMDBID)
1172 keyd.type = REPOKEY_TYPE_REL_IDARRAY;
1173 else
1174 keyd.type = REPOKEY_TYPE_NUM;
1175 keyd.size = 0;
1176 keyd.storage = KEY_STORAGE_SOLVABLE1;
1177 if (keyfilter)
1178 {
1179 keyd.storage = keyfilter(repo, &keyd, kfdata);
1180 if (keyd.storage == KEY_STORAGE_DROPPED0)
1181 continue;
1182 keyd.storage = KEY_STORAGE_SOLVABLE1;
1183 }
1184 poolusage = 1;
1185 clonepool = 1;
1186 cbdata.keymap[keyd.name] = repodata_key2id(&target, &keyd, 1);
1187 }
1188
1189 if (repo->nsolvables)
1190 {
1191 Repokey keyd;
1192 keyd.name = REPOSITORY_SOLVABLES;
1193 keyd.type = REPOKEY_TYPE_FLEXARRAY;
1194 keyd.size = 0;
1195 keyd.storage = KEY_STORAGE_INCORE2;
1196 cbdata.keymap[keyd.name] = repodata_key2id(&target, &keyd, 1);
1197 }
1198
1199 dirpoolusage = 0;
1200
1201 spool = 0;
1202 dirpool = 0;
1203 dirpooldata = 0;
1204 n = ID_NUM_INTERNAL;
1205 lastfilelistn = 0;
1206 FOR_REPODATAS(repo, i, data)for (i = 1, data = repo->repodata + i; i < repo->nrepodata
; i++, data++)
1207 {
1208 cbdata.keymapstart[i] = n;
1209 cbdata.keymap[n++] = 0; /* key 0 */
1210 idused = 0;
1211 dirused = 0;
1212 if (keyfilter)
1213 {
1214 Repokey keyd;
1215 /* check if we want this repodata */
1216 memset(&keyd, 0, sizeof(keyd));
1217 keyd.name = 1;
1218 keyd.type = 1;
1219 keyd.size = i;
1220 if (keyfilter(repo, &keyd, kfdata) == -1)
1221 continue;
1222 }
1223 for (j = 1; j < data->nkeys; j++, n++)
1224 {
1225 key = data->keys + j;
1226 if (key->name == REPOSITORY_SOLVABLES && key->type == REPOKEY_TYPE_FLEXARRAY)
1227 {
1228 cbdata.keymap[n] = cbdata.keymap[key->name];
1229 continue;
1230 }
1231 if (key->type == REPOKEY_TYPE_DELETED)
1232 {
1233 cbdata.keymap[n] = 0;
1234 continue;
1235 }
1236 if (key->type == REPOKEY_TYPE_CONSTANTID && data->localpool)
1237 {
1238 Repokey keyd = *key;
1239 keyd.size = repodata_globalize_id(data, key->size, 1);
1240 id = repodata_key2id(&target, &keyd, 0);
1241 }
1242 else
1243 id = repodata_key2id(&target, key, 0);
1244 if (!id)
1245 {
1246 Repokey keyd = *key;
1247 keyd.storage = KEY_STORAGE_INCORE2;
1248 if (keyd.type == REPOKEY_TYPE_CONSTANTID)
1249 keyd.size = repodata_globalize_id(data, key->size, 1);
1250 else if (keyd.type != REPOKEY_TYPE_CONSTANT)
1251 keyd.size = 0;
1252 if (keyfilter)
1253 {
1254 keyd.storage = keyfilter(repo, &keyd, kfdata);
1255 if (keyd.storage == KEY_STORAGE_DROPPED0)
1256 {
1257 cbdata.keymap[n] = 0;
1258 continue;
1259 }
1260 }
1261 id = repodata_key2id(&target, &keyd, 1);
1262 }
1263 cbdata.keymap[n] = id;
1264 /* load repodata if not already loaded */
1265 if (data->state == REPODATA_STUB1)
1266 {
1267 if (data->loadcallback)
1268 data->loadcallback(data);
1269 else
1270 data->state = REPODATA_ERROR2;
1271 if (data->state != REPODATA_ERROR2)
1272 {
1273 /* redo this repodata! */
1274 j = 0;
1275 n = cbdata.keymapstart[i];
1276 continue;
1277 }
1278 }
1279 if (data->state == REPODATA_ERROR2)
1280 {
1281 /* too bad! */
1282 cbdata.keymap[n] = 0;
1283 continue;
1284 }
1285
1286 repodataused[i] = 1;
1287 anyrepodataused = 1;
1288 if (key->type == REPOKEY_TYPE_CONSTANTID || key->type == REPOKEY_TYPE_ID ||
1289 key->type == REPOKEY_TYPE_IDARRAY || key->type == REPOKEY_TYPE_REL_IDARRAY)
1290 idused = 1;
1291 else if (key->type == REPOKEY_TYPE_DIR || key->type == REPOKEY_TYPE_DIRNUMNUMARRAY || key->type == REPOKEY_TYPE_DIRSTRARRAY)
1292 {
1293 idused = 1; /* dirs also use ids */
1294 dirused = 1;
1295 }
1296 if (key->type == REPOKEY_TYPE_DIRSTRARRAY && key->name == SOLVABLE_FILELIST)
1297 {
1298 /* is this a file list extension */
1299 if (is_filelist_extension(data))
1300 {
1301 /* hmm, we have a file list extension. Kill filelist of other repodata.
1302 * XXX: this is wrong, as the extension does not need to cover all
1303 * solvables of the other repodata */
1304 if (lastfilelistn)
1305 cbdata.keymap[lastfilelistn] = 0;
1306 }
1307 else
1308 lastfilelistn = n;
1309 }
1310 }
1311 if (idused)
1312 {
1313 if (data->localpool)
1314 {
1315 if (poolusage)
1316 poolusage = 3; /* need own pool */
1317 else
1318 {
1319 poolusage = 2;
1320 spool = &data->spool;
1321 }
1322 }
1323 else
1324 {
1325 if (poolusage == 0)
1326 poolusage = 1;
1327 else if (poolusage != 1)
1328 poolusage = 3; /* need own pool */
1329 }
1330 }
1331 if (dirused)
1332 {
1333 if (dirpoolusage)
1334 dirpoolusage = 3; /* need own dirpool */
1335 else
1336 {
1337 dirpoolusage = 2;
1338 dirpool = &data->dirpool;
1339 dirpooldata = data;
1340 }
1341 }
1342 }
1343 cbdata.nkeymap = n;
1344
1345 /* 0: no pool needed at all */
1346 /* 1: use global pool */
1347 /* 2: use repodata local pool */
1348 /* 3: need own pool */
1349 if (poolusage == 3)
1350 {
1351 spool = &target.spool;
1352 /* hack: reuse global pool data so we don't have to map pool ids */
1353 if (clonepool)
1354 {
1355 stringpool_free(spool);
1356 stringpool_clone(spool, &pool->ss);
1357 }
1358 cbdata.ownspool = spool;
1359 }
1360 else if (poolusage == 0 || poolusage == 1)
1361 {
1362 poolusage = 1;
1363 spool = &pool->ss;
1364 }
1365
1366 if (dirpoolusage == 3)
1367 {
1368 dirpool = &target.dirpool;
1369 dirpooldata = 0;
1370 cbdata.owndirpool = dirpool;
1371 }
1372 else if (dirpool)
1373 cbdata.dirused = solv_calloc(dirpool->ndirs, sizeof(Id));
1374
1375
1376/********************************************************************/
1377#if 0
1378fprintf(stderrstderr, "poolusage: %d\n", poolusage);
1379fprintf(stderrstderr, "dirpoolusage: %d\n", dirpoolusage);
1380fprintf(stderrstderr, "nkeys: %d\n", target.nkeys);
1381for (i = 1; i < target.nkeys; i++)
1382 fprintf(stderrstderr, " %2d: %s[%d] %d %d %d\n", i, pool_id2str(pool, target.keys[i].name), target.keys[i].name, target.keys[i].type, target.keys[i].size, target.keys[i].storage);
1383#endif
1384
1385 /* copy keys if requested */
1386 if (keyq)
1387 {
1388 queue_empty(keyq);
1389 for (i = 1; i < target.nkeys; i++)
1390 queue_push2(keyq, target.keys[i].name, target.keys[i].type);
1391 }
1392
1393 if (poolusage > 1)
1394 {
1395 /* put all the keys we need in our string pool */
1396 /* put mapped ids right into target.keys */
1397 for (i = 1, key = target.keys + i; i < target.nkeys; i++, key++)
1398 {
1399 key->name = stringpool_str2id(spool, pool_id2str(pool, key->name), 1);
1400 if (key->type == REPOKEY_TYPE_CONSTANTID)
1401 {
1402 key->type = stringpool_str2id(spool, pool_id2str(pool, key->type), 1);
1403 type_constantid = key->type;
1404 key->size = stringpool_str2id(spool, pool_id2str(pool, key->size), 1);
1405 }
1406 else
1407 key->type = stringpool_str2id(spool, pool_id2str(pool, key->type), 1);
1408 }
1409 if (poolusage == 2)
1410 stringpool_freehash(spool); /* free some mem */
1411 }
1412
1413
1414/********************************************************************/
1415
1416 /* set needed count of all strings and rels,
1417 * find which keys are used in the solvables
1418 * put all strings in own spool
1419 */
1420
1421 reloff = spool->nstrings;
1422 if (poolusage == 3)
1423 reloff = (reloff + NEEDED_BLOCK1023) & ~NEEDED_BLOCK1023;
1424
1425 needid = calloc(reloff + pool->nrels, sizeof(*needid));
1426 needid[0].map = reloff;
1427
1428 cbdata.needid = needid;
1429 cbdata.schema = solv_calloc(target.nkeys, sizeof(Id));
1430 cbdata.sp = cbdata.schema;
1431 cbdata.solvschemata = solv_calloc(repo->nsolvables, sizeof(Id));
1432
1433 /* create main schema */
1434 cbdata.sp = cbdata.schema;
1435 /* collect all other data from all repodatas */
1436 /* XXX: merge arrays of equal keys? */
1437 FOR_REPODATAS(repo, j, data)for (j = 1, data = repo->repodata + j; j < repo->nrepodata
; j++, data++)
1438 {
1439 if (!repodataused[j])
1440 continue;
1441 repodata_search(data, SOLVID_META-1, 0, SEARCH_SUB(1<<9)|SEARCH_ARRAYSENTINEL(1<<10), repo_write_cb_needed, &cbdata);
1442 }
1443 sp = cbdata.sp;
1444 /* add solvables if needed (may revert later) */
1445 if (repo->nsolvables)
1446 {
1447 *sp++ = cbdata.keymap[REPOSITORY_SOLVABLES];
1448 target.keys[cbdata.keymap[REPOSITORY_SOLVABLES]].size++;
1449 }
1450 *sp = 0;
1451 mainschema = repodata_schema2id(cbdata.target, cbdata.schema, 1);
1452
1453 idarraydata = repo->idarraydata;
1454
1455 anysolvableused = 0;
1456 cbdata.doingsolvables = 1;
1457 for (i = repo->start, s = pool->solvables + i, n = 0; i < repo->end; i++, s++)
1458 {
1459 if (s->repo != repo)
1460 continue;
1461
1462 /* set schema info, keep in sync with further down */
1463 sp = cbdata.schema;
1464 if (cbdata.keymap[SOLVABLE_NAME])
1465 {
1466 *sp++ = cbdata.keymap[SOLVABLE_NAME];
1467 needid[s->name].need++;
1468 }
1469 if (cbdata.keymap[SOLVABLE_ARCH])
1470 {
1471 *sp++ = cbdata.keymap[SOLVABLE_ARCH];
1472 needid[s->arch].need++;
1473 }
1474 if (cbdata.keymap[SOLVABLE_EVR])
1475 {
1476 *sp++ = cbdata.keymap[SOLVABLE_EVR];
1477 needid[s->evr].need++;
1478 }
1479 if (s->vendor && cbdata.keymap[SOLVABLE_VENDOR])
1480 {
1481 *sp++ = cbdata.keymap[SOLVABLE_VENDOR];
1482 needid[s->vendor].need++;
1483 }
1484 if (s->provides && cbdata.keymap[SOLVABLE_PROVIDES])
1485 {
1486 *sp++ = cbdata.keymap[SOLVABLE_PROVIDES];
1487 target.keys[cbdata.keymap[SOLVABLE_PROVIDES]].size += incneedidarray(pool, idarraydata + s->provides, needid);
1488 }
1489 if (s->obsoletes && cbdata.keymap[SOLVABLE_OBSOLETES])
1490 {
1491 *sp++ = cbdata.keymap[SOLVABLE_OBSOLETES];
1492 target.keys[cbdata.keymap[SOLVABLE_OBSOLETES]].size += incneedidarray(pool, idarraydata + s->obsoletes, needid);
1493 }
1494 if (s->conflicts && cbdata.keymap[SOLVABLE_CONFLICTS])
1495 {
1496 *sp++ = cbdata.keymap[SOLVABLE_CONFLICTS];
1497 target.keys[cbdata.keymap[SOLVABLE_CONFLICTS]].size += incneedidarray(pool, idarraydata + s->conflicts, needid);
1498 }
1499 if (s->requires && cbdata.keymap[SOLVABLE_REQUIRES])
1500 {
1501 *sp++ = cbdata.keymap[SOLVABLE_REQUIRES];
1502 target.keys[cbdata.keymap[SOLVABLE_REQUIRES]].size += incneedidarray(pool, idarraydata + s->requires, needid);
1503 }
1504 if (s->recommends && cbdata.keymap[SOLVABLE_RECOMMENDS])
1505 {
1506 *sp++ = cbdata.keymap[SOLVABLE_RECOMMENDS];
1507 target.keys[cbdata.keymap[SOLVABLE_RECOMMENDS]].size += incneedidarray(pool, idarraydata + s->recommends, needid);
1508 }
1509 if (s->suggests && cbdata.keymap[SOLVABLE_SUGGESTS])
1510 {
1511 *sp++ = cbdata.keymap[SOLVABLE_SUGGESTS];
1512 target.keys[cbdata.keymap[SOLVABLE_SUGGESTS]].size += incneedidarray(pool, idarraydata + s->suggests, needid);
1513 }
1514 if (s->supplements && cbdata.keymap[SOLVABLE_SUPPLEMENTS])
1515 {
1516 *sp++ = cbdata.keymap[SOLVABLE_SUPPLEMENTS];
1517 target.keys[cbdata.keymap[SOLVABLE_SUPPLEMENTS]].size += incneedidarray(pool, idarraydata + s->supplements, needid);
1518 }
1519 if (s->enhances && cbdata.keymap[SOLVABLE_ENHANCES])
1520 {
1521 *sp++ = cbdata.keymap[SOLVABLE_ENHANCES];
1522 target.keys[cbdata.keymap[SOLVABLE_ENHANCES]].size += incneedidarray(pool, idarraydata + s->enhances, needid);
1523 }
1524 if (repo->rpmdbid && cbdata.keymap[RPM_RPMDBID])
1525 {
1526 *sp++ = cbdata.keymap[RPM_RPMDBID];
1527 target.keys[cbdata.keymap[RPM_RPMDBID]].size++;
1528 }
1529 cbdata.sp = sp;
1530
1531 if (anyrepodataused)
1532 {
1533 FOR_REPODATAS(repo, j, data)for (j = 1, data = repo->repodata + j; j < repo->nrepodata
; j++, data++)
1534 {
1535 if (!repodataused[j])
1536 continue;
1537 if (i < data->start || i >= data->end)
1538 continue;
1539 repodata_search(data, i, 0, SEARCH_SUB(1<<9)|SEARCH_ARRAYSENTINEL(1<<10), repo_write_cb_needed, &cbdata);
1540 needid = cbdata.needid;
1541 }
1542 }
1543 *cbdata.sp = 0;
1544 cbdata.solvschemata[n] = repodata_schema2id(cbdata.target, cbdata.schema, 1);
1545 if (cbdata.solvschemata[n])
1546 anysolvableused = 1;
1547 n++;
1548 }
1549 cbdata.doingsolvables = 0;
1550 assert(n == repo->nsolvables)({ if (n == repo->nsolvables) ; else __assert_fail ("n == repo->nsolvables"
, "/home/brain/Projects/upstream/libsolv/src/repo_write.c", 1550
, __PRETTY_FUNCTION__); })
;
1551
1552 if (repo->nsolvables && !anysolvableused)
1553 {
1554 /* strip off solvable from the main schema */
1555 target.keys[cbdata.keymap[REPOSITORY_SOLVABLES]].size = 0;
1556 sp = cbdata.schema;
1557 for (i = 0; target.schemadata[target.schemata[mainschema] + i]; i++)
1558 {
1559 *sp = target.schemadata[target.schemata[mainschema] + i];
1560 if (*sp != cbdata.keymap[REPOSITORY_SOLVABLES])
1561 sp++;
1562 }
1563 assert(target.schemadatalen == target.schemata[mainschema] + i + 1)({ if (target.schemadatalen == target.schemata[mainschema] + i
+ 1) ; else __assert_fail ("target.schemadatalen == target.schemata[mainschema] + i + 1"
, "/home/brain/Projects/upstream/libsolv/src/repo_write.c", 1563
, __PRETTY_FUNCTION__); })
;
1564 *sp = 0;
1565 target.schemadatalen = target.schemata[mainschema];
1566 target.nschemata--;
1567 repodata_free_schemahash(&target);
1568 mainschema = repodata_schema2id(cbdata.target, cbdata.schema, 1);
1569 }
1570
1571/********************************************************************/
1572
1573 /* remove unused keys */
1574 keyused = solv_calloc(target.nkeys, sizeof(Id));
1575 for (i = 1; i < (int)target.schemadatalen; i++)
1576 keyused[target.schemadata[i]] = 1;
1577 keyused[0] = 0;
1578 for (n = i = 1; i < target.nkeys; i++)
1579 {
1580 if (!keyused[i])
1581 continue;
1582 keyused[i] = n;
1583 if (i != n)
1584 {
1585 target.keys[n] = target.keys[i];
1586 if (keyq)
1587 {
1588 keyq->elements[2 * n - 2] = keyq->elements[2 * i - 2];
1589 keyq->elements[2 * n - 1] = keyq->elements[2 * i - 1];
1590 }
1591 }
1592 n++;
1593 }
1594 target.nkeys = n;
1595 if (keyq)
1596 queue_truncate(keyq, 2 * n - 2);
1597
1598 /* update schema data to the new key ids */
1599 for (i = 1; i < (int)target.schemadatalen; i++)
1600 target.schemadata[i] = keyused[target.schemadata[i]];
1601 /* update keymap to the new key ids */
1602 for (i = 0; i < cbdata.nkeymap; i++)
1603 cbdata.keymap[i] = keyused[cbdata.keymap[i]];
1604 keyused = solv_free(keyused);
Value stored to 'keyused' is never read
1605
1606 /* increment needid of the used keys, they are already mapped to
1607 * the correct string pool */
1608 for (i = 1; i < target.nkeys; i++)
1609 {
1610 if (target.keys[i].type == type_constantid)
1611 needid[target.keys[i].size].need++;
1612 needid[target.keys[i].name].need++;
1613 needid[target.keys[i].type].need++;
1614 }
1615
1616/********************************************************************/
1617
1618 if (dirpool && cbdata.dirused && !cbdata.dirused[0])
1619 {
1620 /* no dirs used at all */
1621 cbdata.dirused = solv_free(cbdata.dirused);
1622 dirpool = 0;
1623 }
1624
1625 /* increment need id for used dir components */
1626 if (dirpool)
1627 {
1628 /* if we have own dirpool, all entries in it are used.
1629 also, all comp ids are already mapped by putinowndirpool(),
1630 so we can simply increment needid.
1631 (owndirpool != 0, dirused == 0, dirpooldata == 0) */
1632 /* else we re-use a dirpool of repodata "dirpooldata".
1633 dirused tells us which of the ids are used.
1634 we need to map comp ids if we generate a new pool.
1635 (owndirpool == 0, dirused != 0, dirpooldata != 0) */
1636 for (i = 1; i < dirpool->ndirs; i++)
1637 {
1638#if 0
1639fprintf(stderrstderr, "dir %d used %d\n", i, cbdata.dirused ? cbdata.dirused[i] : 1);
1640#endif
1641 if (cbdata.dirused && !cbdata.dirused[i])
1642 continue;
1643 id = dirpool->dirs[i];
1644 if (id <= 0)
1645 continue;
1646 if (dirpooldata && cbdata.ownspool && id > 1)
1647 {
1648 id = putinownpool(&cbdata, dirpooldata->localpool ? &dirpooldata->spool : &pool->ss, id);
1649 needid = cbdata.needid;
1650 }
1651 needid[id].need++;
1652 }
1653 }
1654
1655
1656/********************************************************************/
1657
1658 /*
1659 * create mapping table, new keys are sorted by needid[].need
1660 *
1661 * needid[key].need : old key -> new key
1662 * needid[key].map : new key -> old key
1663 */
1664
1665 /* zero out id 0 and rel 0 just in case */
1666 reloff = needid[0].map;
1667 needid[0].need = 0;
1668 needid[reloff].need = 0;
1669
1670 for (i = 1; i < reloff + pool->nrels; i++)
1671 needid[i].map = i;
1672
1673#if 0
1674 solv_sort(needid + 1, spool->nstrings - 1, sizeof(*needid), needid_cmp_need_s, spool);
1675#else
1676 /* make first entry '' */
1677 needid[1].need = 1;
1678 solv_sort(needid + 2, spool->nstrings - 2, sizeof(*needid), needid_cmp_need_s, spool);
1679#endif
1680 solv_sort(needid + reloff, pool->nrels, sizeof(*needid), needid_cmp_need, 0);
1681 /* now needid is in new order, needid[newid].map -> oldid */
1682
1683 /* calculate string space size, also zero out needid[].need */
1684 sizeid = 0;
1685 for (i = 1; i < reloff; i++)
1686 {
1687 if (!needid[i].need)
1688 break; /* as we have sorted, every entry after this also has need == 0 */
1689 needid[i].need = 0;
1690 sizeid += strlen(spool->stringspace + spool->strings[needid[i].map]) + 1;
1691 }
1692 nstrings = i; /* our new string id end */
1693
1694 /* make needid[oldid].need point to newid */
1695 for (i = 1; i < nstrings; i++)
1696 needid[needid[i].map].need = i;
1697
1698 /* same as above for relations */
1699 for (i = 0; i < pool->nrels; i++)
1700 {
1701 if (!needid[reloff + i].need)
1702 break;
1703 needid[reloff + i].need = 0;
1704 }
1705 nrels = i; /* our new rel id end */
1706
1707 for (i = 0; i < nrels; i++)
1708 needid[needid[reloff + i].map].need = nstrings + i;
1709
1710 /* now we have: needid[oldid].need -> newid
1711 needid[newid].map -> oldid
1712 both for strings and relations */
1713
1714
1715/********************************************************************/
1716
1717 ndirmap = 0;
1718 dirmap = 0;
1719 if (dirpool)
1720 {
1721 /* create our new target directory structure by traversing through all
1722 * used dirs. This will concatenate blocks with the same parent
1723 * directory into single blocks.
1724 * Instead of components, traverse_dirs stores the old dirids,
1725 * we will change this in the second step below */
1726 /* (dirpooldata and dirused are 0 if we have our own dirpool) */
1727 if (cbdata.dirused && !cbdata.dirused[1])
1728 cbdata.dirused[1] = 1; /* always want / entry */
1729 dirmap = solv_calloc(dirpool->ndirs, sizeof(Id));
1730 dirmap[0] = 0;
1731 ndirmap = traverse_dirs(dirpool, dirmap, 1, dirpool_child(dirpool, 0), cbdata.dirused);
1732
1733 /* (re)create dirused, so that it maps from "old dirid" to "new dirid" */
1734 /* change dirmap so that it maps from "new dirid" to "new compid" */
1735 if (!cbdata.dirused)
1736 cbdata.dirused = solv_malloc2(dirpool->ndirs, sizeof(Id));
1737 memset(cbdata.dirused, 0, dirpool->ndirs * sizeof(Id));
1738 for (i = 1; i < ndirmap; i++)
1739 {
1740 if (dirmap[i] <= 0)
1741 continue;
1742 cbdata.dirused[dirmap[i]] = i;
1743 id = dirpool->dirs[dirmap[i]];
1744 if (dirpooldata && cbdata.ownspool && id > 1)
1745 id = putinownpool(&cbdata, dirpooldata->localpool ? &dirpooldata->spool : &pool->ss, id);
1746 dirmap[i] = needid[id].need;
1747 }
1748 /* now the new target directory structure is complete (dirmap), and we have
1749 * dirused[olddirid] -> newdirid */
1750 }
1751
1752/********************************************************************/
1753
1754 /* collect all data
1755 * we use extdata[0] for incore data and extdata[keyid] for vertical data
1756 */
1757
1758 cbdata.extdata = solv_calloc(target.nkeys, sizeof(struct extdata));
1759
1760 xd = cbdata.extdata;
1761 cbdata.current_sub = 0;
1762 /* add main schema */
1763 cbdata.lastlen = 0;
1764 data_addid(xd, mainschema);
1765
1766#if 1
1767 FOR_REPODATAS(repo, j, data)for (j = 1, data = repo->repodata + j; j < repo->nrepodata
; j++, data++)
1768 {
1769 if (!repodataused[j])
1770 continue;
1771 repodata_search(data, SOLVID_META-1, 0, SEARCH_SUB(1<<9)|SEARCH_ARRAYSENTINEL(1<<10), repo_write_cb_adddata, &cbdata);
1772 }
1773#endif
1774
1775 if (xd->len - cbdata.lastlen > cbdata.maxdata)
1776 cbdata.maxdata = xd->len - cbdata.lastlen;
1777 cbdata.lastlen = xd->len;
1778
1779 if (anysolvableused)
1780 {
1781 data_addid(xd, repo->nsolvables); /* FLEXARRAY nentries */
1782 cbdata.doingsolvables = 1;
1783
1784 /* check if we can do the special filelist memory optimization */
1785 if (anyrepodataused)
1786 {
1787 for (i = 1; i < target.nkeys; i++)
1788 if (target.keys[i].storage == KEY_STORAGE_VERTICAL_OFFSET3)
1789 cbdata.filelistmode |= cbdata.filelistmode == 0 && target.keys[i].type == REPOKEY_TYPE_DIRSTRARRAY ? 1 : 2;
1790 else if (target.keys[i].type == REPOKEY_TYPE_DIRSTRARRAY)
1791 cbdata.filelistmode = 2;
1792 if (cbdata.filelistmode != 1)
1793 cbdata.filelistmode = 0;
1794 }
1795
1796 for (i = repo->start, s = pool->solvables + i, n = 0; i < repo->end; i++, s++)
1797 {
1798 if (s->repo != repo)
1799 continue;
1800 data_addid(xd, cbdata.solvschemata[n]);
1801 if (cbdata.keymap[SOLVABLE_NAME])
1802 data_addid(xd, needid[s->name].need);
1803 if (cbdata.keymap[SOLVABLE_ARCH])
1804 data_addid(xd, needid[s->arch].need);
1805 if (cbdata.keymap[SOLVABLE_EVR])
1806 data_addid(xd, needid[s->evr].need);
1807 if (s->vendor && cbdata.keymap[SOLVABLE_VENDOR])
1808 data_addid(xd, needid[s->vendor].need);
1809 if (s->provides && cbdata.keymap[SOLVABLE_PROVIDES])
1810 data_addidarray_sort(xd, pool, needid, idarraydata + s->provides, SOLVABLE_FILEMARKER);
1811 if (s->obsoletes && cbdata.keymap[SOLVABLE_OBSOLETES])
1812 data_addidarray_sort(xd, pool, needid, idarraydata + s->obsoletes, 0);
1813 if (s->conflicts && cbdata.keymap[SOLVABLE_CONFLICTS])
1814 data_addidarray_sort(xd, pool, needid, idarraydata + s->conflicts, 0);
1815 if (s->requires && cbdata.keymap[SOLVABLE_REQUIRES])
1816 data_addidarray_sort(xd, pool, needid, idarraydata + s->requires, SOLVABLE_PREREQMARKER);
1817 if (s->recommends && cbdata.keymap[SOLVABLE_RECOMMENDS])
1818 data_addidarray_sort(xd, pool, needid, idarraydata + s->recommends, 0);
1819 if (s->suggests && cbdata.keymap[SOLVABLE_SUGGESTS])
1820 data_addidarray_sort(xd, pool, needid, idarraydata + s->suggests, 0);
1821 if (s->supplements && cbdata.keymap[SOLVABLE_SUPPLEMENTS])
1822 data_addidarray_sort(xd, pool, needid, idarraydata + s->supplements, 0);
1823 if (s->enhances && cbdata.keymap[SOLVABLE_ENHANCES])
1824 data_addidarray_sort(xd, pool, needid, idarraydata + s->enhances, 0);
1825 if (repo->rpmdbid && cbdata.keymap[RPM_RPMDBID])
1826 data_addid(xd, repo->rpmdbid[i - repo->start]);
1827 if (anyrepodataused)
1828 {
1829 cbdata.vstart = -1;
1830 FOR_REPODATAS(repo, j, data)for (j = 1, data = repo->repodata + j; j < repo->nrepodata
; j++, data++)
1831 {
1832 if (!repodataused[j])
1833 continue;
1834 if (i < data->start || i >= data->end)
1835 continue;
1836 repodata_search(data, i, 0, SEARCH_SUB(1<<9)|SEARCH_ARRAYSENTINEL(1<<10), repo_write_cb_adddata, &cbdata);
1837 }
1838 }
1839 if (xd->len - cbdata.lastlen > cbdata.maxdata)
1840 cbdata.maxdata = xd->len - cbdata.lastlen;
1841 cbdata.lastlen = xd->len;
1842 n++;
1843 }
1844 cbdata.doingsolvables = 0;
1845 }
1846
1847 assert(cbdata.current_sub == cbdata.nsubschemata)({ if (cbdata.current_sub == cbdata.nsubschemata) ; else __assert_fail
("cbdata.current_sub == cbdata.nsubschemata", "/home/brain/Projects/upstream/libsolv/src/repo_write.c"
, 1847, __PRETTY_FUNCTION__); })
;
1848 if (cbdata.subschemata)
1849 {
1850 cbdata.subschemata = solv_free(cbdata.subschemata);
1851 cbdata.nsubschemata = 0;
1852 }
1853
1854/********************************************************************/
1855
1856 target.fp = fp;
1857
1858 /* write header */
1859
1860 /* write file header */
1861 write_u32(&target, 'S' << 24 | 'O' << 16 | 'L' << 8 | 'V');
1862 write_u32(&target, SOLV_VERSION_88);
1863
1864
1865 /* write counts */
1866 write_u32(&target, nstrings);
1867 write_u32(&target, nrels);
1868 write_u32(&target, ndirmap);
1869 write_u32(&target, anysolvableused ? repo->nsolvables : 0);
1870 write_u32(&target, target.nkeys);
1871 write_u32(&target, target.nschemata);
1872 solv_flags = 0;
1873 solv_flags |= SOLV_FLAG_PREFIX_POOL4;
1874 solv_flags |= SOLV_FLAG_SIZE_BYTES8;
1875 write_u32(&target, solv_flags);
1876
1877 if (nstrings)
1878 {
1879 /*
1880 * calculate prefix encoding of the strings
1881 */
1882 unsigned char *prefixcomp = solv_malloc(nstrings);
1883 unsigned int compsum = 0;
1884 char *old_str = "";
1885
1886 prefixcomp[0] = 0;
1887 for (i = 1; i < nstrings; i++)
1888 {
1889 char *str = spool->stringspace + spool->strings[needid[i].map];
1890 int same;
1891 for (same = 0; same < 255; same++)
1892 if (!old_str[same] || old_str[same] != str[same])
1893 break;
1894 prefixcomp[i] = same;
1895 compsum += same;
1896 old_str = str;
1897 }
1898
1899 /*
1900 * write strings
1901 */
1902 write_u32(&target, sizeid);
1903 /* we save compsum bytes but need 1 extra byte for every string */
1904 write_u32(&target, sizeid + nstrings - 1 - compsum);
1905 for (i = 1; i < nstrings; i++)
1906 {
1907 char *str = spool->stringspace + spool->strings[needid[i].map];
1908 write_u8(&target, prefixcomp[i]);
1909 write_str(&target, str + prefixcomp[i]);
1910 }
1911 solv_free(prefixcomp);
1912 }
1913 else
1914 {
1915 write_u32(&target, 0);
1916 write_u32(&target, 0);
1917 }
1918
1919 /*
1920 * write RelDeps
1921 */
1922 for (i = 0; i < nrels; i++)
1923 {
1924 ran = pool->rels + (needid[reloff + i].map - reloff);
1925 write_id(&target, needid[ISRELDEP(ran->name)(((ran->name) & 0x80000000) != 0) ? RELOFF(ran->name)(needid[0].map + ((ran->name) ^ 0x80000000)) : ran->name].need);
1926 write_id(&target, needid[ISRELDEP(ran->evr)(((ran->evr) & 0x80000000) != 0) ? RELOFF(ran->evr)(needid[0].map + ((ran->evr) ^ 0x80000000)) : ran->evr].need);
1927 write_u8(&target, ran->flags);
1928 }
1929
1930 /*
1931 * write dirs (skip both root and / entry)
1932 */
1933 for (i = 2; i < ndirmap; i++)
1934 {
1935 if (dirmap[i] > 0)
1936 write_id(&target, dirmap[i]);
1937 else
1938 write_id(&target, nstrings - dirmap[i]);
1939 }
1940 solv_free(dirmap);
1941
1942 /*
1943 * write keys
1944 */
1945 for (i = 1; i < target.nkeys; i++)
1946 {
1947 write_id(&target, needid[target.keys[i].name].need);
1948 write_id(&target, needid[target.keys[i].type].need);
1949 if (target.keys[i].storage != KEY_STORAGE_VERTICAL_OFFSET3)
1950 {
1951 if (target.keys[i].type == type_constantid)
1952 write_id(&target, needid[target.keys[i].size].need);
1953 else
1954 write_id(&target, target.keys[i].size);
1955 }
1956 else
1957 write_id(&target, cbdata.extdata[i].len);
1958 write_id(&target, target.keys[i].storage);
1959 }
1960
1961 /*
1962 * write schemata
1963 */
1964 write_id(&target, target.schemadatalen); /* XXX -1? */
1965 for (i = 1; i < target.nschemata; i++)
1966 write_idarray(&target, pool, 0, repodata_id2schema(&target, i));
1967
1968/********************************************************************/
1969
1970 write_id(&target, cbdata.maxdata);
1971 write_id(&target, cbdata.extdata[0].len);
1972 if (cbdata.extdata[0].len)
1973 write_blob(&target, cbdata.extdata[0].buf, cbdata.extdata[0].len);
1974 solv_free(cbdata.extdata[0].buf);
1975
1976 /* do we have vertical data? */
1977 for (i = 1; i < target.nkeys; i++)
1978 if (cbdata.extdata[i].len)
1979 break;
1980 if (i < target.nkeys)
1981 {
1982 /* yes, write it in pages */
1983 unsigned char vpage[REPOPAGE_BLOBSIZE(1 << 15)];
1984 int lpage = 0;
1985
1986 write_u32(&target, REPOPAGE_BLOBSIZE(1 << 15));
1987 for (i = 1; i < target.nkeys; i++)
1988 if (cbdata.extdata[i].len)
1989 {
1990 if (cbdata.filelistmode)
1991 break;
1992 lpage = write_compressed_extdata(&target, cbdata.extdata + i, vpage, lpage);
1993 }
1994 if (cbdata.filelistmode && i < target.nkeys)
1995 {
1996 /* ok, just this single extdata, which is a filelist */
1997 xd = cbdata.extdata + i;
1998 xd->len = 0;
1999 cbdata.filelistmode = -1;
2000 for (j = 0; j < cbdata.nkeymap; j++)
2001 if (cbdata.keymap[j] != i)
2002 cbdata.keymap[j] = 0;
2003 for (i = repo->start, s = pool->solvables + i; i < repo->end; i++, s++)
2004 {
2005 if (s->repo != repo)
2006 continue;
2007 FOR_REPODATAS(repo, j, data)for (j = 1, data = repo->repodata + j; j < repo->nrepodata
; j++, data++)
2008 {
2009 if (!repodataused[j])
2010 continue;
2011 if (i < data->start || i >= data->end)
2012 continue;
2013 repodata_search(data, i, 0, SEARCH_SUB(1<<9)|SEARCH_ARRAYSENTINEL(1<<10), repo_write_cb_adddata, &cbdata);
2014 }
2015 if (xd->len > 1024 * 1024)
2016 {
2017 lpage = write_compressed_extdata(&target, xd, vpage, lpage);
2018 xd->len = 0;
2019 }
2020 }
2021 if (xd->len)
2022 lpage = write_compressed_extdata(&target, xd, vpage, lpage);
2023 }
2024 if (lpage)
2025 write_compressed_page(&target, vpage, lpage);
2026 }
2027
2028 for (i = 1; i < target.nkeys; i++)
2029 solv_free(cbdata.extdata[i].buf);
2030 solv_free(cbdata.extdata);
2031
2032 target.fp = 0;
2033 repodata_freedata(&target);
2034
2035 solv_free(needid);
2036 solv_free(cbdata.solvschemata);
2037 solv_free(cbdata.schema);
2038
2039 solv_free(cbdata.keymap);
2040 solv_free(cbdata.keymapstart);
2041 solv_free(cbdata.dirused);
2042 solv_free(repodataused);
2043 return target.error;
2044}
2045
2046struct repodata_write_data {
2047 int (*keyfilter)(Repo *repo, Repokey *key, void *kfdata);
2048 void *kfdata;
2049 int repodataid;
2050};
2051
2052static int
2053repodata_write_keyfilter(Repo *repo, Repokey *key, void *kfdata)
2054{
2055 struct repodata_write_data *wd = kfdata;
2056
2057 /* XXX: special repodata selection hack */
2058 if (key->name == 1 && key->size != wd->repodataid)
2059 return -1;
2060 if (key->storage == KEY_STORAGE_SOLVABLE1)
2061 return KEY_STORAGE_DROPPED0; /* not part of this repodata */
2062 if (wd->keyfilter)
2063 return (*wd->keyfilter)(repo, key, wd->kfdata);
2064 return key->storage;
2065}
2066
2067int
2068repodata_write_filtered(Repodata *data, FILE *fp, int (*keyfilter)(Repo *repo, Repokey *key, void *kfdata), void *kfdata, Queue *keyq)
2069{
2070 struct repodata_write_data wd;
2071
2072 wd.keyfilter = keyfilter;
2073 wd.kfdata = kfdata;
2074 wd.repodataid = data->repodataid;
2075 return repo_write_filtered(data->repo, fp, repodata_write_keyfilter, &wd, keyq);
2076}
2077
2078int
2079repodata_write(Repodata *data, FILE *fp)
2080{
2081 return repodata_write_filtered(data, fp, repo_write_stdkeyfilter, 0, 0);
2082}
2083
2084int
2085repo_write(Repo *repo, FILE *fp)
2086{
2087 return repo_write_filtered(repo, fp, repo_write_stdkeyfilter, 0, 0);
2088}