Codebase list centrifuge / 7416efb
Use 2to3 to port to Python3 Andreas Tille 4 years ago
3 changed file(s) with 450 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
0 centrifuge (1.0.3-3) UNRELEASED; urgency=medium
1
2 * Use 2to3 to port to Python3
3 Closes: #936281
4
5 -- Andreas Tille <tille@debian.org> Tue, 10 Sep 2019 08:02:24 +0200
6
07 centrifuge (1.0.3-2) unstable; urgency=medium
18
29 [ Steffen Moeller ]
0 Description: Use 2to3 to port to Python3
1 Bug-Debian: https://bugs.debian.org/936281
2 Author: Andreas Tille <tille@debian.org>
3 Last-Update: Tue, 10 Sep 2019 08:02:24 +0200
4
5 --- a/Makefile
6 +++ b/Makefile
7 @@ -364,11 +364,11 @@ centrifuge.bat:
8
9 centrifuge-build.bat:
10 echo "@echo off" > centrifuge-build.bat
11 - echo "python %~dp0/centrifuge-build %*" >> centrifuge-build.bat
12 + echo "python3 %~dp0/centrifuge-build %*" >> centrifuge-build.bat
13
14 centrifuge-inspect.bat:
15 echo "@echo off" > centrifuge-inspect.bat
16 - echo "python %~dp0/centrifuge-inspect %*" >> centrifuge-inspect.bat
17 + echo "python3 %~dp0/centrifuge-inspect %*" >> centrifuge-inspect.bat
18
19
20 .PHONY: centrifuge-src
21 --- a/centrifuge-build
22 +++ b/centrifuge-build
23 @@ -1,4 +1,4 @@
24 -#!/usr/bin/env python
25 +#!/usr/bin/python3
26
27 """
28 Copyright 2014, Daehwan Kim <infphilo@gmail.com>
29 --- a/centrifuge-inspect
30 +++ b/centrifuge-inspect
31 @@ -1,4 +1,4 @@
32 -#!/usr/bin/env python
33 +#!/usr/bin/python3
34
35 """
36 Copyright 2014, Daehwan Kim <infphilo@gmail.com>
37 --- a/evaluation/centrifuge_evaluate.py
38 +++ b/evaluation/centrifuge_evaluate.py
39 @@ -1,4 +1,4 @@
40 -#!/usr/bin/env python
41 +#!/usr/bin/python3
42
43 import sys, os, subprocess, inspect
44 import platform, multiprocessing
45 @@ -25,7 +25,7 @@ def read_taxonomy_tree(tax_file):
46 """
47 def compare_scm(centrifuge_out, true_out, taxonomy_tree, rank):
48 ancestors = set()
49 - for tax_id in taxonomy_tree.keys():
50 + for tax_id in list(taxonomy_tree.keys()):
51 if tax_id in ancestors:
52 continue
53 while True:
54 @@ -106,7 +106,7 @@ def compare_scm(centrifuge_out, true_out
55 unclassified += 1
56
57 raw_unique_classified = 0
58 - for value in db_dic.values():
59 + for value in list(db_dic.values()):
60 if len(value) == 1:
61 raw_unique_classified += 1
62 return classified, unique_classified, unclassified, len(db_dic), raw_unique_classified
63 @@ -152,7 +152,7 @@ def compare_abundance(centrifuge_out, tr
64 if tax_id in db_dic:
65 SSR += (abundance - db_dic[tax_id]) ** 2;
66 if debug:
67 - print >> sys.stderr, "\t\t\t\t{:<10}: {:.6} vs. {:.6} (truth vs. centrifuge)".format(tax_id, abundance, db_dic[tax_id])
68 + print("\t\t\t\t{:<10}: {:.6} vs. {:.6} (truth vs. centrifuge)".format(tax_id, abundance, db_dic[tax_id]), file=sys.stderr)
69 else:
70 SSR += (abundance) ** 2
71
72 @@ -179,7 +179,7 @@ def sql_execute(sql_db, sql_query):
73 """
74 def create_sql_db(sql_db):
75 if os.path.exists(sql_db):
76 - print >> sys.stderr, sql_db, "already exists!"
77 + print(sql_db, "already exists!", file=sys.stderr)
78 return
79
80 columns = [
81 @@ -316,7 +316,7 @@ def evaluate(index_base,
82 os.mkdir(index_path)
83 index_fnames = ["%s/%s.%d.cf" % (index_path, index_base, i+1) for i in range(3)]
84 if not check_files(index_fnames):
85 - print >> sys.stderr, "Downloading indexes: %s" % ("index")
86 + print("Downloading indexes: %s" % ("index"), file=sys.stderr)
87 os.system("cd %s; wget ftp://ftp.ccb.jhu.edu/pub/infphilo/centrifuge/data/%s.tar.gz; tar xvzf %s.tar.gz; rm %s.tar.gz; ln -s %s/%s* .; cd -" % \
88 (index_path, index_base, index_base, index_base, index_base, index_base))
89 assert check_files(index_fnames)
90 @@ -356,7 +356,7 @@ def evaluate(index_base,
91 scm_fname = "%s/%s.scm" % (read_path, read_base)
92 read_fnames = [read1_fname, read2_fname, truth_fname, scm_fname]
93 if not check_files(read_fnames):
94 - print >> sys.stderr, "Simulating reads %s_1.fq %s_2.fq ..." % (read_base, read_base)
95 + print("Simulating reads %s_1.fq %s_2.fq ..." % (read_base, read_base), file=sys.stderr)
96 centrifuge_simulate = os.path.join(path_base, "centrifuge_simulate_reads.py")
97 simulate_cmd = [centrifuge_simulate,
98 "--num-fragment", str(num_fragment)]
99 @@ -377,11 +377,11 @@ def evaluate(index_base,
100 else:
101 base_fname = read_base + "_single"
102
103 - print >> sys.stderr, "Database: %s" % (index_base)
104 + print("Database: %s" % (index_base), file=sys.stderr)
105 if paired:
106 - print >> sys.stderr, "\t%d million pairs" % (num_fragment / 1000000)
107 + print("\t%d million pairs" % (num_fragment / 1000000), file=sys.stderr)
108 else:
109 - print >> sys.stderr, "\t%d million reads" % (num_fragment / 1000000)
110 + print("\t%d million reads" % (num_fragment / 1000000), file=sys.stderr)
111
112 program_bin_base = "%s/.." % path_base
113 def get_program_version(program, version):
114 @@ -428,7 +428,7 @@ def evaluate(index_base,
115 if version:
116 program_name += ("_%s" % version)
117
118 - print >> sys.stderr, "\t%s\t%s" % (program_name, str(datetime.now()))
119 + print("\t%s\t%s" % (program_name, str(datetime.now())), file=sys.stderr)
120 if paired:
121 program_dir = program_name + "_paired"
122 else:
123 @@ -449,7 +449,7 @@ def evaluate(index_base,
124 program_cmd = get_program_cmd(program, version, read1_fname, read2_fname, out_fname)
125 start_time = datetime.now()
126 if verbose:
127 - print >> sys.stderr, "\t", start_time, " ".join(program_cmd)
128 + print("\t", start_time, " ".join(program_cmd), file=sys.stderr)
129 if program in ["centrifuge"]:
130 proc = subprocess.Popen(program_cmd, stdout=open(out_fname, "w"), stderr=subprocess.PIPE)
131 else:
132 @@ -462,7 +462,7 @@ def evaluate(index_base,
133 if duration < 0.1:
134 duration = 0.1
135 if verbose:
136 - print >> sys.stderr, "\t", finish_time, "finished:", duration
137 + print("\t", finish_time, "finished:", duration, file=sys.stderr)
138
139 results = {"strain" : [0, 0, 0],
140 "species" : [0, 0, 0],
141 @@ -484,21 +484,21 @@ def evaluate(index_base,
142 # if rank == "strain":
143 # assert num_cases == num_fragment
144
145 - print >> sys.stderr, "\t\t%s" % rank
146 - print >> sys.stderr, "\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(classified, num_cases, float(classified) / num_cases)
147 - print >> sys.stderr, "\t\t\tprecision : {:,} / {:,} ({:.2%})".format(classified, raw_classified, float(classified) / raw_classified)
148 - print >> sys.stderr, "\n\t\t\tfor uniquely classified ",
149 + print("\t\t%s" % rank, file=sys.stderr)
150 + print("\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(classified, num_cases, float(classified) / num_cases), file=sys.stderr)
151 + print("\t\t\tprecision : {:,} / {:,} ({:.2%})".format(classified, raw_classified, float(classified) / raw_classified), file=sys.stderr)
152 + print("\n\t\t\tfor uniquely classified ", end=' ', file=sys.stderr)
153 if paired:
154 - print >> sys.stderr, "pairs"
155 + print("pairs", file=sys.stderr)
156 else:
157 - print >> sys.stderr, "reads"
158 - print >> sys.stderr, "\t\t\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(unique_classified, num_cases, float(unique_classified) / num_cases)
159 - print >> sys.stderr, "\t\t\t\t\tprecision : {:,} / {:,} ({:.2%})".format(unique_classified, raw_unique_classified, float(unique_classified) / raw_unique_classified)
160 + print("reads", file=sys.stderr)
161 + print("\t\t\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(unique_classified, num_cases, float(unique_classified) / num_cases), file=sys.stderr)
162 + print("\t\t\t\t\tprecision : {:,} / {:,} ({:.2%})".format(unique_classified, raw_unique_classified, float(unique_classified) / raw_unique_classified), file=sys.stderr)
163
164 # Calculate sum of squared residuals in abundance
165 if rank == "strain":
166 abundance_SSR = compare_abundance("centrifuge_report.tsv", truth_fname, taxonomy_tree, debug)
167 - print >> sys.stderr, "\t\t\tsum of squared residuals in abundance: {}".format(abundance_SSR)
168 + print("\t\t\tsum of squared residuals in abundance: {}".format(abundance_SSR), file=sys.stderr)
169
170 if runtime_only:
171 os.chdir("..")
172 --- a/evaluation/centrifuge_simulate_reads.py
173 +++ b/evaluation/centrifuge_simulate_reads.py
174 @@ -1,4 +1,4 @@
175 -#!/usr/bin/env python
176 +#!/usr/bin/python3
177
178 #
179 # Copyright 2015, Daehwan Kim <infphilo@gmail.com>
180 @@ -156,7 +156,7 @@ def read_transcript(genomes_seq, gtf_fil
181 transcripts[transcript_id][2].append([left, right])
182
183 # Sort exons and merge where separating introns are <=5 bps
184 - for tran, [chr, strand, exons] in transcripts.items():
185 + for tran, [chr, strand, exons] in list(transcripts.items()):
186 exons.sort()
187 tmp_exons = [exons[0]]
188 for i in range(1, len(exons)):
189 @@ -167,7 +167,7 @@ def read_transcript(genomes_seq, gtf_fil
190 transcripts[tran] = [chr, strand, tmp_exons]
191
192 tmp_transcripts = {}
193 - for tran, [chr, strand, exons] in transcripts.items():
194 + for tran, [chr, strand, exons] in list(transcripts.items()):
195 exon_lens = [e[1] - e[0] + 1 for e in exons]
196 transcript_len = sum(exon_lens)
197 if transcript_len >= frag_len:
198 @@ -444,8 +444,8 @@ def getSamAlignment(dna, exons, genome_s
199 MD += ("{}".format(MD_match_len))
200
201 if len(read_seq) != read_len:
202 - print >> sys.stderr, "read length differs:", len(read_seq), "vs.", read_len
203 - print >> sys.stderr, pos, "".join(cigars), cigar_descs, MD, XM, NM, Zs
204 + print("read length differs:", len(read_seq), "vs.", read_len, file=sys.stderr)
205 + print(pos, "".join(cigars), cigar_descs, MD, XM, NM, Zs, file=sys.stderr)
206 assert False
207
208 return pos, cigars, cigar_descs, MD, XM, NM, Zs, read_seq
209 @@ -575,8 +575,8 @@ def samRepOk(genome_seq, read_seq, chr,
210 tMD += ("{}".format(match_len))
211
212 if tMD != MD or tXM != XM or tNM != NM or XM > max_mismatch or XM != NM:
213 - print >> sys.stderr, chr, pos, cigar, MD, XM, NM, Zs
214 - print >> sys.stderr, tMD, tXM, tNM
215 + print(chr, pos, cigar, MD, XM, NM, Zs, file=sys.stderr)
216 + print(tMD, tXM, tNM, file=sys.stderr)
217 assert False
218
219
220 @@ -631,7 +631,7 @@ def simulate_reads(index_fname, base_fna
221 # Read genome sequences into memory
222 genomes_fname = index_fname + ".fa"
223 if not os.path.exists(genomes_fname):
224 - print >> sys.stderr, "Extracting genomes from Centrifuge index to %s, which may take a few hours ..." % (genomes_fname)
225 + print("Extracting genomes from Centrifuge index to %s, which may take a few hours ..." % (genomes_fname), file=sys.stderr)
226 extract_cmd = [centrifuge_inspect,
227 index_fname]
228 extract_proc = subprocess.Popen(extract_cmd, stdout=open(genomes_fname, 'w'))
229 @@ -660,15 +660,15 @@ def simulate_reads(index_fname, base_fna
230 assert num_frag == sum(expr_profile)
231
232 if dna:
233 - genome_ids = genome_seqs.keys()
234 + genome_ids = list(genome_seqs.keys())
235 else:
236 - transcript_ids = transcripts.keys()
237 + transcript_ids = list(transcripts.keys())
238 random.shuffle(transcript_ids)
239 assert len(transcript_ids) >= len(expr_profile)
240
241 # Truth table
242 truth_file = open(base_fname + ".truth", "w")
243 - print >> truth_file, "taxID\tgenomeLen\tnumReads\tabundance\tname"
244 + print("taxID\tgenomeLen\tnumReads\tabundance\tname", file=truth_file)
245 truth_list = []
246 normalized_sum = 0.0
247 debug_num_frag = 0
248 @@ -695,19 +695,19 @@ def simulate_reads(index_fname, base_fna
249 if can_tax_id in names:
250 name = names[can_tax_id]
251 abundance = raw_abundance / genome_len / normalized_sum
252 - print >> truth_file, "{}\t{}\t{}\t{:.6}\t{}".format(tax_id, genome_len, t_num_frags, abundance, name)
253 + print("{}\t{}\t{}\t{:.6}\t{}".format(tax_id, genome_len, t_num_frags, abundance, name), file=truth_file)
254 truth_file.close()
255
256 # Sequence Classification Map (SCM) - something I made up ;-)
257 scm_file = open(base_fname + ".scm", "w")
258
259 # Write SCM header
260 - print >> scm_file, "@HD\tVN:1.0\tSO:unsorted"
261 - for tax_id in genome_seqs.keys():
262 + print("@HD\tVN:1.0\tSO:unsorted", file=scm_file)
263 + for tax_id in list(genome_seqs.keys()):
264 name = ""
265 if tax_id in names:
266 name = names[tax_id]
267 - print >> scm_file, "@SQ\tTID:%s\tSN:%s\tLN:%d" % (tax_id, name, len(genome_seqs[tax_id]))
268 + print("@SQ\tTID:%s\tSN:%s\tLN:%d" % (tax_id, name, len(genome_seqs[tax_id])), file=scm_file)
269
270 read_file = open(base_fname + "_1.fa", "w")
271 if paired_end:
272 @@ -718,11 +718,11 @@ def simulate_reads(index_fname, base_fna
273 t_num_frags = expr_profile[t]
274 if dna:
275 tax_id = genome_ids[t]
276 - print >> sys.stderr, "TaxID: %s, num fragments: %d" % (tax_id, t_num_frags)
277 + print("TaxID: %s, num fragments: %d" % (tax_id, t_num_frags), file=sys.stderr)
278 else:
279 transcript_id = transcript_ids[t]
280 chr, strand, transcript_len, exons = transcripts[transcript_id]
281 - print >> sys.stderr, transcript_id, t_num_frags
282 + print(transcript_id, t_num_frags, file=sys.stderr)
283
284 genome_seq = genome_seqs[tax_id]
285 genome_len = len(genome_seq)
286 @@ -763,14 +763,14 @@ def simulate_reads(index_fname, base_fna
287 XS = "\tXS:A:{}".format(strand)
288 TI = "\tTI:Z:{}".format(transcript_id)
289
290 - print >> read_file, ">{}".format(cur_read_id)
291 - print >> read_file, read_seq
292 + print(">{}".format(cur_read_id), file=read_file)
293 + print(read_seq, file=read_file)
294 output = "{}\t{}\t{}\t{}\tNM:i:{}\tMD:Z:{}".format(cur_read_id, tax_id, pos + 1, cigar_str, NM, MD)
295 if paired_end:
296 - print >> read2_file, ">{}".format(cur_read_id)
297 - print >> read2_file, reverse_complement(read2_seq)
298 + print(">{}".format(cur_read_id), file=read2_file)
299 + print(reverse_complement(read2_seq), file=read2_file)
300 output += "\t{}\t{}\tNM2:i:{}\tMD2:Z:{}".format(pos2 + 1, cigar2_str, NM2, MD2)
301 - print >> scm_file, output
302 + print(output, file=scm_file)
303
304 cur_read_id += 1
305
306 @@ -865,7 +865,7 @@ if __name__ == '__main__':
307 parser.print_help()
308 exit(1)
309 if not args.dna:
310 - print >> sys.stderr, "Error: --rna is not implemented."
311 + print("Error: --rna is not implemented.", file=sys.stderr)
312 exit(1)
313 # if args.dna:
314 # args.expr_profile = "constant"
315 --- a/evaluation/test/centrifuge_evaluate_mason.py
316 +++ b/evaluation/test/centrifuge_evaluate_mason.py
317 @@ -1,4 +1,4 @@
318 -#!/usr/bin/env python
319 +#!/usr/bin/python3
320
321 import sys, os, subprocess, inspect
322 import platform, multiprocessing
323 @@ -27,7 +27,7 @@ def compare_scm(centrifuge_out, true_out
324 higher_ranked = {}
325
326 ancestors = set()
327 - for tax_id in taxonomy_tree.keys():
328 + for tax_id in list(taxonomy_tree.keys()):
329 if tax_id in ancestors:
330 continue
331 while True:
332 @@ -82,7 +82,7 @@ def compare_scm(centrifuge_out, true_out
333
334 fields = line.strip().split('\t')
335 if len(fields) != 3:
336 - print >> sys.stderr, "Warning: %s missing" % (line.strip())
337 + print("Warning: %s missing" % (line.strip()), file=sys.stderr)
338 continue
339 read_name, tax_id = fields[1:3]
340 # Traverse up taxonomy tree to match the given rank parameter
341 @@ -117,7 +117,7 @@ def compare_scm(centrifuge_out, true_out
342 # print read_name
343
344 raw_unique_classified = 0
345 - for read_name, maps in db_dic.items():
346 + for read_name, maps in list(db_dic.items()):
347 if len(maps) == 1 and read_name not in higher_ranked:
348 raw_unique_classified += 1
349 return classified, unique_classified, unclassified, len(db_dic), raw_unique_classified
350 @@ -184,7 +184,7 @@ def evaluate(index_base,
351 read_fname]
352
353 if verbose:
354 - print >> sys.stderr, ' '.join(centrifuge_cmd)
355 + print(' '.join(centrifuge_cmd), file=sys.stderr)
356
357 out_fname = "centrifuge.output"
358 proc = subprocess.Popen(centrifuge_cmd, stdout=open(out_fname, "w"), stderr=subprocess.PIPE)
359 @@ -208,12 +208,12 @@ def evaluate(index_base,
360 # if rank == "strain":
361 # assert num_cases == num_fragment
362
363 - print >> sys.stderr, "\t\t%s" % rank
364 - print >> sys.stderr, "\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(classified, num_cases, float(classified) / num_cases)
365 - print >> sys.stderr, "\t\t\tprecision : {:,} / {:,} ({:.2%})".format(classified, raw_classified, float(classified) / raw_classified)
366 - print >> sys.stderr, "\n\t\t\tfor uniquely classified "
367 - print >> sys.stderr, "\t\t\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(unique_classified, num_cases, float(unique_classified) / num_cases)
368 - print >> sys.stderr, "\t\t\t\t\tprecision : {:,} / {:,} ({:.2%})".format(unique_classified, raw_unique_classified, float(unique_classified) / raw_unique_classified)
369 + print("\t\t%s" % rank, file=sys.stderr)
370 + print("\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(classified, num_cases, float(classified) / num_cases), file=sys.stderr)
371 + print("\t\t\tprecision : {:,} / {:,} ({:.2%})".format(classified, raw_classified, float(classified) / raw_classified), file=sys.stderr)
372 + print("\n\t\t\tfor uniquely classified ", file=sys.stderr)
373 + print("\t\t\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(unique_classified, num_cases, float(unique_classified) / num_cases), file=sys.stderr)
374 + print("\t\t\t\t\tprecision : {:,} / {:,} ({:.2%})".format(unique_classified, raw_unique_classified, float(unique_classified) / raw_unique_classified), file=sys.stderr)
375
376 # Calculate sum of squared residuals in abundance
377 """
378 @@ -252,12 +252,12 @@ def evaluate(index_base,
379 if rank_taxID not in true_abundance:
380 true_abundance[rank_taxID] = 0.0
381 true_abundance[rank_taxID] += (reads / float(genomeSize))
382 - for taxID, reads in true_abundance.items():
383 + for taxID, reads in list(true_abundance.items()):
384 true_abundance[taxID] /= total_sum
385
386 - print >> sys.stderr, "number of genomes:", num_genomes
387 - print >> sys.stderr, "number of species:", num_species
388 - print >> sys.stderr, "number of uniq species:", len(true_abundance)
389 + print("number of genomes:", num_genomes, file=sys.stderr)
390 + print("number of species:", num_species, file=sys.stderr)
391 + print("number of uniq species:", len(true_abundance), file=sys.stderr)
392
393 read_fname = "centrifuge_data/bacteria_sim10M/bacteria_sim10M.fa"
394 summary_fname = "centrifuge.summary"
395 @@ -271,14 +271,14 @@ def evaluate(index_base,
396 read_fname]
397
398 if verbose:
399 - print >> sys.stderr, ' '.join(centrifuge_cmd)
400 + print(' '.join(centrifuge_cmd), file=sys.stderr)
401
402 out_fname = "centrifuge.output"
403 proc = subprocess.Popen(centrifuge_cmd, stdout=open(out_fname, "w"), stderr=subprocess.PIPE)
404 proc.communicate()
405
406 calc_abundance = {}
407 - for taxID in true_abundance.keys():
408 + for taxID in list(true_abundance.keys()):
409 calc_abundance[taxID] = 0.0
410 first = True
411 for line in open(summary_fname):
412 @@ -296,12 +296,12 @@ def evaluate(index_base,
413 """
414
415 abundance_file = open("abundance.cmp", 'w')
416 - print >> abundance_file, "taxID\ttrue\tcalc\trank"
417 + print("taxID\ttrue\tcalc\trank", file=abundance_file)
418 for rank in ranks:
419 if rank == "strain":
420 continue
421 true_abundance_rank, calc_abundance_rank = {}, {}
422 - for taxID in true_abundance.keys():
423 + for taxID in list(true_abundance.keys()):
424 assert taxID in calc_abundance
425 rank_taxID = taxID
426 while True:
427 @@ -322,11 +322,11 @@ def evaluate(index_base,
428 calc_abundance_rank[rank_taxID] += calc_abundance[taxID]
429
430 ssr = 0.0 # Sum of Squared Residuals
431 - for taxID in true_abundance_rank.keys():
432 + for taxID in list(true_abundance_rank.keys()):
433 assert taxID in calc_abundance_rank
434 ssr += (true_abundance_rank[taxID] - calc_abundance_rank[taxID]) ** 2
435 - print >> abundance_file, "%s\t%.6f\t%.6f\t%s" % (taxID, true_abundance_rank[taxID], calc_abundance_rank[taxID], rank)
436 - print >> sys.stderr, "%s) Sum of squared residuals: %.6f" % (rank, ssr)
437 + print("%s\t%.6f\t%.6f\t%s" % (taxID, true_abundance_rank[taxID], calc_abundance_rank[taxID], rank), file=abundance_file)
438 + print("%s) Sum of squared residuals: %.6f" % (rank, ssr), file=sys.stderr)
439 abundance_file.close()
440
441
22 0003-Fix-make-install-DESTDIR.patch
33 hardening.patch
44 no_msse2.patch
5 2to3.patch