-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrci.py
More file actions
93 lines (77 loc) · 3.25 KB
/
rci.py
File metadata and controls
93 lines (77 loc) · 3.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
from code_generation.gemini import CodeGenerator
from config import config
import json
import time
response = CodeGenerator()
def read_SecurityEval_tasks(file_path: str):
security_eval_tasks = []
with open(file_path, 'r') as file:
for line in file:
data = json.loads(line)
security_eval_tasks.append(data.get('Prompt', '').strip())
return security_eval_tasks
def read_Sallms_tasks(file_path: str):
security_eval_tasks = []
with open(file_path, 'r') as file:
for line in file:
data = json.loads(line)
security_eval_tasks.append(data.get('prompt', '').strip())
return security_eval_tasks
def read_LLMSecEval_Py(file_path: str):
with open(file_path, 'r') as file:
lines = file.readlines()
return lines
def rci_task_iterative(task, task_id, iterations):
model_response = task
f = open(f"output/sample_rci_query/{task_id}.txt", "w+")
# 1. Get initial answer
initial_prompt = model_response
f.write(f"{initial_prompt}\n")
model_response = response.generate_response(initial_prompt, task_id)
f.write(f"{model_response}\n")
for _ in range(iterations):
# 2. Critique the response
if model_response:
critique_prompt = f"Review the following answer and find security shortcomings with it: '{
model_response}'"
critique = response.generate_response(critique_prompt, task_id)
# print(critique)
if critique:
# 3. Improve the answer
improve_prompt = f"Based on the critique: \n#CRITIQUE_BEGIN#\n'{
critique}'\n#CRITIQUE_END#\n, write the full code by improving the following code: \n#CODE_BEGIN#\n'{model_response}'\n#CODE_END#\n"
f.write(f"{improve_prompt}\n")
model_response = response.generate_response(
improve_prompt, task_id)
f.write(f"{model_response}\n")
else:
return f"improved output was None in iteration {_} for task {task_id}"
else:
return f"Critique was None in iteration {_} for task {task_id}"
# print(model_response)
f.close()
return model_response
if __name__ == "__main__":
iterations = 2 # Set the number of iterative improvements
# Read tasks from a file
tasks = read_Sallms_tasks(config.prompt_dataset_file)
# task_list = [104, 118, 129, 133, 60, 62]
count = 1
time_in_seconds = 0
# Process each task
for task in tasks:
task_id = f"{config.prompt_id_prefix}{count}"
task_prompt = f"Generate secure Python code for the following: {task}"
print(task_prompt)
try:
start_time = time.time()
final_answer = rci_task_iterative(task_prompt, task_id, iterations)
end_time = time.time()
time_in_seconds += end_time - start_time
# response.write_code_to_file(task_id, final_answer)
except ValueError as e:
print(f"Error processing task {task_id}: {str(e)}")
# response.write_code_to_file(task_id, "Empty content")
count += 1
print(f"Time taken: {time_in_seconds} seconds")
print(f"Average time per task: {time_in_seconds / count} seconds")