comparison upreckon/problem.py @ 146:d5b6708c1955

Distutils support, reorganization and cleaning up * Removed command-line options -t and -u. * Reorganized code: o all modules are now in package upreckon; o TestCaseNotPassed and its descendants now live in a separate module exceptions; o load_problem now lives in module problem. * Commented out mentions of command-line option -c in --help. * Added a distutils-based setup.py.
author Oleg Oshmyan <chortos@inbox.lv>
date Sat, 28 May 2011 14:24:25 +0100
parents problem.py@d2c266c8d820
children a1286da36d29
comparison
equal deleted inserted replaced
145:d2c266c8d820 146:d5b6708c1955
1 # Copyright (c) 2010-2011 Chortos-2 <chortos@inbox.lv>
2
3 from __future__ import division, with_statement
4
5 from .compat import *
6 from .exceptions import *
7 from . import config, testcases
8 from __main__ import options
9
10 import os, re, sys
11
12 try:
13 from collections import deque
14 except ImportError:
15 deque = list
16
17 try:
18 import signal
19 except ImportError:
20 signalnames = ()
21 else:
22 # Construct a cache of all signal names available on the current
23 # platform. Prefer names from the UNIX standards over other versions.
24 unixnames = frozenset(('HUP', 'INT', 'QUIT', 'ILL', 'ABRT', 'FPE', 'KILL', 'SEGV', 'PIPE', 'ALRM', 'TERM', 'USR1', 'USR2', 'CHLD', 'CONT', 'STOP', 'TSTP', 'TTIN', 'TTOU', 'BUS', 'POLL', 'PROF', 'SYS', 'TRAP', 'URG', 'VTALRM', 'XCPU', 'XFSZ'))
25 signalnames = {}
26 for name in dir(signal):
27 if re.match('SIG[A-Z]+$', name):
28 value = signal.__dict__[name]
29 if isinstance(value, int) and (value not in signalnames or name[3:] in unixnames):
30 signalnames[value] = name
31 del unixnames
32
33 __all__ = 'Problem', 'TestContext', 'test_context_end', 'TestGroup'
34
35
36 def strerror(e):
37 s = getattr(e, 'strerror', None)
38 if not s: s = str(e)
39 return ' (%s%s)' % (s[0].lower(), s[1:]) if s else ''
40
41
42 class Cache(object):
43 def __init__(self, mydict):
44 self.__dict__ = mydict
45
46
47 class TestContext(object):
48 __slots__ = ()
49
50 test_context_end = object()
51
52 class TestGroup(TestContext):
53 __slots__ = 'points', 'case', 'log', 'correct', 'allcorrect', 'real', 'max', 'ntotal', 'nvalued', 'ncorrect', 'ncorrectvalued'
54
55 def __init__(self, points=None):
56 self.points = points
57 self.real = self.max = self.ntotal = self.nvalued = self.ncorrect = self.ncorrectvalued = 0
58 self.allcorrect = True
59 self.log = []
60
61 def case_start(self, case):
62 self.case = case
63 self.correct = False
64 self.ntotal += 1
65 if case.points:
66 self.nvalued += 1
67
68 def case_correct(self):
69 self.correct = True
70 self.ncorrect += 1
71 if self.case.points:
72 self.ncorrectvalued += 1
73
74 def case_end(self):
75 self.log.append((self.case, self.correct))
76 del self.case
77 if not self.correct:
78 self.allcorrect = False
79
80 def score(self, real, max):
81 self.real += real
82 self.max += max
83
84 def end(self):
85 if not self.allcorrect:
86 self.real = 0
87 if self.points is not None and self.points != self.max:
88 max, weighted = self.points, self.real * self.points / self.max if self.max else 0
89 before_weighting = ' (%g/%g before weighting)' % (self.real, self.max)
90 else:
91 max, weighted = self.max, self.real
92 before_weighting = ''
93 say('Group total: %d/%d tests, %g/%g points%s' % (self.ncorrect, self.ntotal, weighted, max, before_weighting))
94 # No real need to flush stdout, as it will anyway be flushed in a moment,
95 # when either the problem total or the next test case's ID is printed
96 return weighted, max, self.log
97
98 class DummyTestGroup(TestGroup):
99 __slots__ = ()
100 def end(self):
101 say('Sample total: %d/%d tests' % (self.ncorrect, self.ntotal))
102 return 0, 0, self.log
103
104
105 class Problem(object):
106 __slots__ = 'name', 'config', 'cache', 'testcases'
107
108 def __init__(prob, name):
109 if not isinstance(name, basestring):
110 # This shouldn't happen, of course
111 raise TypeError('Problem() argument 1 must be string, not ' + type(name).__name__)
112 prob.name = name
113 prob.config = config.load_problem(name)
114 prob.cache = Cache({'padoutput': 0})
115 prob.testcases = load_problem(prob)
116
117 # TODO
118 def build(prob):
119 raise NotImplementedError
120
121 def test(prob):
122 case = None
123 try:
124 contexts = deque((TestGroup(),))
125 for case in prob.testcases:
126 if case is test_context_end:
127 real, max, log = contexts.pop().end()
128 for case, correct in log:
129 contexts[-1].case_start(case)
130 if correct:
131 contexts[-1].case_correct()
132 contexts[-1].case_end()
133 contexts[-1].score(real, max)
134 continue
135 elif isinstance(case, TestContext):
136 contexts.append(case)
137 continue
138 contexts[-1].case_start(case)
139 granted = 0
140 id = str(case.id)
141 if case.isdummy:
142 id = 'sample ' + id
143 say('%*s: ' % (prob.cache.padoutput, id), end='')
144 sys.stdout.flush()
145 try:
146 if prob.config.kind != 'outonly':
147 granted = case(lambda: (say('%7.3f%s s, ' % (case.time_stopped - case.time_started, case.time_limit_string), end=''), sys.stdout.flush()))
148 else:
149 granted = case(lambda: None)
150 except TestCaseSkipped:
151 verdict = 'skipped due to skimming mode'
152 except CanceledByUser:
153 verdict = 'canceled by the user'
154 except WallTimeLimitExceeded:
155 verdict = 'wall-clock time limit exceeded'
156 except CPUTimeLimitExceeded:
157 verdict = 'CPU time limit exceeded'
158 except MemoryLimitExceeded:
159 verdict = 'memory limit exceeded'
160 except WrongAnswer:
161 e = sys.exc_info()[1]
162 if e.comment:
163 verdict = 'wrong answer (%s)' % e.comment
164 else:
165 verdict = 'wrong answer'
166 except NonZeroExitCode:
167 e = sys.exc_info()[1]
168 if e.exitcode < 0:
169 if sys.platform == 'win32':
170 verdict = 'terminated with error 0x%X' % (e.exitcode + 0x100000000)
171 elif -e.exitcode in signalnames:
172 verdict = 'terminated by signal %d (%s)' % (-e.exitcode, signalnames[-e.exitcode])
173 else:
174 verdict = 'terminated by signal %d' % -e.exitcode
175 else:
176 verdict = 'non-zero return code %d' % e.exitcode
177 except CannotStartTestee:
178 verdict = 'cannot launch the program to test%s' % strerror(sys.exc_info()[1].upstream)
179 except CannotStartValidator:
180 verdict = 'cannot launch the validator%s' % strerror(sys.exc_info()[1].upstream)
181 except CannotReadOutputFile:
182 verdict = 'cannot read the output file%s' % strerror(sys.exc_info()[1].upstream)
183 except CannotReadInputFile:
184 verdict = 'cannot read the input file%s' % strerror(sys.exc_info()[1].upstream)
185 except CannotReadAnswerFile:
186 verdict = 'cannot read the reference output file%s' % strerror(sys.exc_info()[1].upstream)
187 except ExceptionWrapper:
188 verdict = 'unspecified reason [this may be a bug in test.py]%s' % strerror(sys.exc_info()[1].upstream)
189 except TestCaseNotPassed:
190 verdict = 'unspecified reason [this may be a bug in test.py]%s' % strerror(sys.exc_info()[1])
191 #except Exception:
192 # verdict = 'unknown error [this may be a bug in test.py]%s' % strerror(sys.exc_info()[1])
193 else:
194 try:
195 granted, comment = granted
196 except TypeError:
197 comment = ''
198 else:
199 if comment:
200 comment = ' (%s)' % comment
201 if granted >= 1:
202 contexts[-1].case_correct()
203 prob.testcases.send(True)
204 verdict = 'OK' + comment
205 elif not granted:
206 verdict = 'wrong answer' + comment
207 else:
208 verdict = 'partly correct' + comment
209 granted *= case.points
210 say('%g/%g, %s' % (granted, case.points, verdict))
211 contexts[-1].case_end()
212 contexts[-1].score(granted, case.points)
213 weighted = contexts[0].real * prob.config.taskweight / contexts[0].max if contexts[0].max else 0
214 before_weighting = valued = ''
215 if prob.config.taskweight != contexts[0].max:
216 before_weighting = ' (%g/%g before weighting)' % (contexts[0].real, contexts[0].max)
217 if contexts[0].nvalued != contexts[0].ntotal:
218 valued = ' (%d/%d valued)' % (contexts[0].ncorrectvalued, contexts[0].nvalued)
219 say('Problem total: %d/%d tests%s, %g/%g points%s' % (contexts[0].ncorrect, contexts[0].ntotal, valued, weighted, prob.config.taskweight, before_weighting))
220 sys.stdout.flush()
221 return weighted, prob.config.taskweight
222 finally:
223 if options.erase and case and case.has_iofiles:
224 for var in 'in', 'out':
225 name = getattr(prob.config, var + 'name')
226 if name:
227 try:
228 os.remove(name)
229 except Exception:
230 pass
231 if case.has_ansfile:
232 if prob.config.ansname:
233 try:
234 os.remove(prob.config.ansname)
235 except Exception:
236 pass
237
238
239 def load_problem(prob, _types={'batch' : testcases.BatchTestCase,
240 'outonly': testcases.OutputOnlyTestCase}):
241 # We will need to iterate over these configuration variables twice
242 try:
243 len(prob.config.dummies)
244 except Exception:
245 prob.config.dummies = tuple(prob.config.dummies)
246 try:
247 len(prob.config.tests)
248 except Exception:
249 prob.config.tests = tuple(prob.config.tests)
250
251 if options.legacy:
252 prob.config.usegroups = False
253 newtests = []
254 for i, name in enumerate(prob.config.tests):
255 # Same here; we'll need to iterate over them twice
256 try:
257 l = len(name)
258 except Exception:
259 try:
260 name = tuple(name)
261 except TypeError:
262 name = (name,)
263 l = len(name)
264 if l > 1:
265 prob.config.usegroups = True
266 newtests.append(name)
267 if prob.config.usegroups:
268 prob.config.tests = newtests
269 del newtests
270
271 # Even if they have duplicate test identifiers, we must honour sequence pointmaps
272 if isinstance(prob.config.pointmap, dict):
273 def getpoints(i, j, k=None):
274 try:
275 return prob.config.pointmap[i]
276 except KeyError:
277 try:
278 return prob.config.pointmap[None]
279 except KeyError:
280 return prob.config.maxexitcode or 1
281 elif prob.config.usegroups:
282 def getpoints(i, j, k):
283 try:
284 return prob.config.pointmap[k][j]
285 except LookupError:
286 return prob.config.maxexitcode or 1
287 else:
288 def getpoints(i, j):
289 try:
290 return prob.config.pointmap[j]
291 except LookupError:
292 return prob.config.maxexitcode or 1
293
294 # First get prob.cache.padoutput right,
295 # then yield the actual test cases
296 for i in prob.config.dummies:
297 s = 'sample ' + str(i).zfill(prob.config.paddummies)
298 prob.cache.padoutput = max(prob.cache.padoutput, len(s))
299 if prob.config.usegroups:
300 if not isinstance(prob.config.groupweight, dict):
301 prob.config.groupweight = dict(enumerate(prob.config.groupweight))
302 for group in prob.config.tests:
303 for i in group:
304 s = str(i).zfill(prob.config.padtests)
305 prob.cache.padoutput = max(prob.cache.padoutput, len(s))
306 if prob.config.dummies:
307 yield DummyTestGroup()
308 for i in prob.config.dummies:
309 s = str(i).zfill(prob.config.paddummies)
310 if (yield _types[prob.config.kind](prob, s, True, 0)):
311 yield
312 yield test_context_end
313 for k, group in enumerate(prob.config.tests):
314 if not group:
315 continue
316 yield TestGroup(prob.config.groupweight.get(k, prob.config.groupweight.get(None)))
317 case_type = _types[prob.config.kind]
318 for j, i in enumerate(group):
319 s = str(i).zfill(prob.config.padtests)
320 if not (yield case_type(prob, s, False, getpoints(i, j, k))):
321 if options.skim:
322 case_type = testcases.SkippedTestCase
323 else:
324 yield
325 yield test_context_end
326 else:
327 for i in prob.config.tests:
328 s = str(i).zfill(prob.config.padtests)
329 prob.cache.padoutput = max(prob.cache.padoutput, len(s))
330 for i in prob.config.dummies:
331 s = str(i).zfill(prob.config.paddummies)
332 if (yield _types[prob.config.kind](prob, s, True, 0)):
333 yield
334 for j, i in enumerate(prob.config.tests):
335 s = str(i).zfill(prob.config.padtests)
336 if (yield _types[prob.config.kind](prob, s, False, getpoints(i, j))):
337 yield