REVEN-Axion 2016v1.3.1
launcher_low_level.py

This shows how to connect to reven launcher in python, using reven low level api.

You will find some helpers to:

And a sample use case using them.

1 #!/usr/bin/env python2
2 
3 import reven_api # to connect reven launcher
4 import reven # to give a high level handler on a reven server after opening a project
5 
6 from os.path import basename
7 from time import sleep
8 
9 def open_project(hostname, project_id):
10  """
11  Open a project and returns a reven.Project handler to it.
12  @param hostname: hostname to join reven at
13  @param project_id: reven_api.project_id
14  @return a connected reven.Project or None
15  """
16 
17  # connect to reven launcher
18  lc = reven_api.launcher_connection(hostname)
19 
20  # check if project is already open
21  port = lc.project_details(project_id).reven_server.port
22  if port != 0:
23  print "Project already running on port %s" % port
24  return reven.Project(hostname, port)
25 
26  # actually open project
27  port = lc.server_launch(project_id)
28  if port == 0:
29  print "Server not started"
30  return None
31 
32  print "Server started on port %s" % port
33 
34  # reven server cannnot accept connections immediatly after beeing launched
35  # try to reconnect if first attempts failed
36  for i in range(10):
37  try:
38  client = reven.Project(hostname, port)
39  print "Connected!"
40  return client
41  except RuntimeError as e:
42  print e
43  sleep(1)
44  pass
45 
46 def close_project(hostname, project_id):
47  """
48  Close a project (kill its running server).
49  @param hostname: hostname to join reven at
50  @param project_id: reven_api.project_id
51  """
52 
53  # connect to reven launcher
54  lc = reven_api.launcher_connection(hostname)
55 
56  # close project
57  port = lc.project_details(project_id).reven_server.port
58  lc.server_kill(port)
59 
60  # check project is properly closed
61  if lc.project_details(project_id).reven_server.port:
62  print "Server not killed"
63  else:
64  print "Project closed"
65 
66 
67 def list_vms(lc):
68  """
69  Print configuration of all available virtual machines.
70  @param lc: reven_api.launcher_connection
71  """
72 
73  print "Available virtual machines:"
74 
75  for vm in lc.list_vms():
76  # here vm is a reven_api.vm_info
77  print "[%s]" % vm.name
78  print "vbox_name = %s" % vm.vbox_name
79  print "display = %s" % vm.display
80 
81  print "dynamic_launch = %s" % vm.dynamic_launch
82  print "static_launch = %s" % vm.static_launch
83  print "stopper = %s" % vm.stopper
84 
85  print "os = %s" % vm.os
86  print "segment = %s" % vm.segment
87  print "pdb_path = %s" % vm.pdb_path
88 
89  print "vnc_password = %s" % vm.vnc_password
90  print "vnc_port = %s" % vm.vnc_port
91  print ""
92 
93 def list_users(lc):
94  """
95  List all reven users.
96  @param lc: reven_api.launcher_connection
97  """
98 
99  print "Reven users:"
100 
101  for user in lc.list_users():
102  # here user is a string
103  print user
104 
105 def list_projects_for_user(lc, user):
106  """
107  List all project for given user.
108  @param lc: reven_api.launcher_connection
109  @param user: username as string
110  """
111 
112  print "Projects for user %s:" % user
113 
114  for project in lc.list_projects(user):
115  # here project is a reven_api.project_id to be forwarded to launcher_connection services call
116  print "%s/%s" % (project.user, project.project)
117 
118 def wait_end_of_scenario(lc, project_id):
119  """
120  Block until end of scenario generation.
121  @param lc: reven_api.launcher_connection
122  @param project_id: reven_api.project_id
123  @return True if generation is successful
124  """
125 
126  while True:
127  progress = lc.project_scenario(project_id)
128  print progress.log_chunk
129  if not progress.is_generating:
130  break
131  # could abort scenario generation not to wait forever with:
132  # lc.project_stop_scenario_generation(project_id)
133  sleep(1)
134 
135  return progress.is_successful
136 
137 def generate_scenario(lc, project_id, vm, binary, args='', input_files=[], dump_at=''):
138  """
139  Generate a scenario
140  @param lc: reven_api.launcher_connection
141  @param project_id: reven.project_id
142  @param binary: binary filepath
143  @param args: arguments string for binary launch
144  @param input_files: list of additionnal input files
145  @param dump_at: symbol name or address in binary, start scenario generation when reached
146  """
147 
148  # upload all input files, including binary
149  binary_name = basename(binary)
150  if not binary in input_files:
151  input_files.append(binary)
152 
153  for filepath in input_files:
154  lc.project_upload_file(project_id, filepath)
155 
156  # configure scenario
157  sc = reven_api.scenario_config()
158  sc.vm_config_name = vm
159  sc.binary_name = binary_name
160  sc.binary_arguments = args or ''
161  sc.binary_dump_hint = dump_at or ''
162  sc.binary_dump_address = dump_at or ''
163  sc.system_pdb_path = ''
164 
165  # configure generation
166  sglc = reven_api.scenario_generation_launch_config()
167  sglc.enable_instruction_tracing = False
168  sglc.is_interactive = False
169  sglc.vnc_password = ''
170  sglc.vnc_port = ''
171 
172  scenario_config = reven_api.scenario_generation_config()
173  scenario_config.generation = sglc
174  scenario_config.scenario = sc
175 
176  print "Start scenario generation"
177  lc.project_generate_scenario(project_id, scenario_config)
178 
179  if wait_end_of_scenario(lc, project_id):
180  print "Generation ok"
181  else:
182  print "Generation failed"
183 
184 
185 if __name__ == '__main__':
186  # example config:
187  launcher_hostname = 'localhost'
188  sample_user = 'reven'
189  sample_project = reven_api.project_id(sample_user, '_sample')
190  sample_binary = '/tmp/lynx32'
191  sample_binary_arguments = ' -dump crash.html'
192  sample_input_files = ['/tmp/crash.html']
193  sample_vm = 'vm_debian_auto'
194 
195  lc = reven_api.launcher_connection(launcher_hostname)
196 
197  # list info from launcher
198  list_vms(lc)
199  list_users(lc)
200  print ''
201  list_projects_for_user(lc, lc.list_users()[0])
202 
203  # create project
204  lc.project_create(sample_project)
205 
206  # if previous execution of this script was interrupted:
207  # - close project if running
208  close_project(launcher_hostname, sample_project)
209  # - stop previous scenario generation, wait for it to end
210  lc.project_stop_scenario_generation(sample_project)
211  wait_end_of_scenario(lc, sample_project)
212 
213  # generate scenario
214  generate_scenario(lc, sample_project, sample_vm, sample_binary, args=sample_binary_arguments, input_files=sample_input_files)
215 
216  # open project
217  client = open_project(launcher_hostname, sample_project)
218 
219  # start and monitor execution
220  client.start_execution()
221  while True:
222  progress = client.execution_status()
223 
224  print "%s/%s - %s/%s - %s" % (
225  progress.tsc, progress.last_tsc,
226  progress.point_index, progress.last_point_index,
227  progress.status)
228 
229  if not progress.is_busy:
230  break
231 
232  sleep(1)
233 
234  # use reven
235  print client.traces()
236 
237  if list(client.traces()[0].search_symbol('my_awesome_symbol')):
238  print "Symbol found, could keep project"
239  client.save_execution('good')
240  else:
241  print "Symbol not found, could discard project"
242 
243  # close project
244  close_project(launcher_hostname, sample_project)
245 
246  # delete sample project
247  lc.project_delete(sample_project)