@@ -194,6 +194,62 @@ final class ResponseModelValidationTests: XCTestCase {
194194 XCTAssertEqual ( responseModel. usage? . outputTokens, 1035 )
195195 }
196196
197+ // MARK: - InstructionsType Tests
198+
199+ func testInstructionsTypeStringDecoding( ) throws {
200+ let decoder = JSONDecoder ( )
201+ let responseModel = try decoder. decode ( ResponseModel . self, from: instructionsStringJSON. data ( using: . utf8) !)
202+
203+ XCTAssertNotNil ( responseModel. instructions)
204+ if case . string( let value) = responseModel. instructions {
205+ XCTAssertEqual ( value, " You are a helpful assistant. " )
206+ } else {
207+ XCTFail ( " Expected string instructions type " )
208+ }
209+ }
210+
211+ func testInstructionsTypeArrayOfStringsDecoding( ) throws {
212+ let decoder = JSONDecoder ( )
213+ let responseModel = try decoder. decode ( ResponseModel . self, from: instructionsArrayOfStringsJSON. data ( using: . utf8) !)
214+
215+ XCTAssertNotNil ( responseModel. instructions)
216+ if case . array( let values) = responseModel. instructions {
217+ XCTAssertEqual ( values. count, 2 )
218+ XCTAssertEqual ( values [ 0 ] , " Be helpful. " )
219+ XCTAssertEqual ( values [ 1 ] , " Be concise. " )
220+ } else {
221+ XCTFail ( " Expected array of strings instructions type " )
222+ }
223+ }
224+
225+ func testInstructionsTypeMessagesDecoding( ) throws {
226+ // This tests the fix for issue #187 - reusable prompts return instructions as message objects
227+ let decoder = JSONDecoder ( )
228+ let responseModel = try decoder. decode ( ResponseModel . self, from: instructionsMessagesJSON. data ( using: . utf8) !)
229+
230+ XCTAssertNotNil ( responseModel. instructions)
231+ if case . messages( let messages) = responseModel. instructions {
232+ XCTAssertEqual ( messages. count, 2 )
233+ XCTAssertEqual ( messages [ 0 ] . role, " developer " )
234+ XCTAssertEqual ( messages [ 0 ] . type, " message " )
235+ XCTAssertEqual ( messages [ 1 ] . role, " assistant " )
236+
237+ // Validate content of first message
238+ if case . array( let contentItems) = messages [ 0 ] . content {
239+ XCTAssertEqual ( contentItems. count, 1 )
240+ if case . text( let textContent) = contentItems [ 0 ] {
241+ XCTAssertEqual ( textContent. text, " You are a helpful assistant for {{customer_name}}. " )
242+ } else {
243+ XCTFail ( " Expected text content item " )
244+ }
245+ } else {
246+ XCTFail ( " Expected array content in message " )
247+ }
248+ } else {
249+ XCTFail ( " Expected messages instructions type " )
250+ }
251+ }
252+
197253 // MARK: - Test Data
198254
199255 private let textInputResponseJSON = """
@@ -679,4 +735,114 @@ final class ResponseModelValidationTests: XCTestCase {
679735 " metadata " : {}
680736 }
681737 """
738+
739+ // MARK: - InstructionsType Test Data
740+
741+ private let instructionsStringJSON = """
742+ {
743+ " id " : " resp_test_string_instructions " ,
744+ " object " : " response " ,
745+ " created_at " : 1741476542,
746+ " status " : " completed " ,
747+ " error " : null,
748+ " incomplete_details " : null,
749+ " instructions " : " You are a helpful assistant. " ,
750+ " max_output_tokens " : null,
751+ " model " : " gpt-4.1-2025-04-14 " ,
752+ " output " : [],
753+ " parallel_tool_calls " : true,
754+ " previous_response_id " : null,
755+ " reasoning " : null,
756+ " store " : true,
757+ " temperature " : 1.0,
758+ " text " : null,
759+ " tool_choice " : " auto " ,
760+ " tools " : [],
761+ " top_p " : 1.0,
762+ " truncation " : " disabled " ,
763+ " usage " : {
764+ " input_tokens " : 10,
765+ " output_tokens " : 10,
766+ " total_tokens " : 20
767+ },
768+ " user " : null,
769+ " metadata " : {}
770+ }
771+ """
772+
773+ private let instructionsArrayOfStringsJSON = """
774+ {
775+ " id " : " resp_test_array_instructions " ,
776+ " object " : " response " ,
777+ " created_at " : 1741476542,
778+ " status " : " completed " ,
779+ " error " : null,
780+ " incomplete_details " : null,
781+ " instructions " : [ " Be helpful. " , " Be concise. " ],
782+ " max_output_tokens " : null,
783+ " model " : " gpt-4.1-2025-04-14 " ,
784+ " output " : [],
785+ " parallel_tool_calls " : true,
786+ " previous_response_id " : null,
787+ " reasoning " : null,
788+ " store " : true,
789+ " temperature " : 1.0,
790+ " text " : null,
791+ " tool_choice " : " auto " ,
792+ " tools " : [],
793+ " top_p " : 1.0,
794+ " truncation " : " disabled " ,
795+ " usage " : {
796+ " input_tokens " : 10,
797+ " output_tokens " : 10,
798+ " total_tokens " : 20
799+ },
800+ " user " : null,
801+ " metadata " : {}
802+ }
803+ """
804+
805+ /// This JSON represents the response format when using reusable prompts with variables (issue #187)
806+ private let instructionsMessagesJSON = """
807+ {
808+ " id " : " resp_test_messages_instructions " ,
809+ " object " : " response " ,
810+ " created_at " : 1741476542,
811+ " status " : " completed " ,
812+ " error " : null,
813+ " incomplete_details " : null,
814+ " instructions " : [
815+ {
816+ " type " : " message " ,
817+ " content " : [{ " type " : " input_text " , " text " : " You are a helpful assistant for {{customer_name}}. " }],
818+ " role " : " developer "
819+ },
820+ {
821+ " type " : " message " ,
822+ " content " : [{ " type " : " input_text " , " text " : " " }],
823+ " role " : " assistant "
824+ }
825+ ],
826+ " max_output_tokens " : null,
827+ " model " : " gpt-4.1-2025-04-14 " ,
828+ " output " : [],
829+ " parallel_tool_calls " : true,
830+ " previous_response_id " : null,
831+ " reasoning " : null,
832+ " store " : true,
833+ " temperature " : 1.0,
834+ " text " : null,
835+ " tool_choice " : " auto " ,
836+ " tools " : [],
837+ " top_p " : 1.0,
838+ " truncation " : " disabled " ,
839+ " usage " : {
840+ " input_tokens " : 10,
841+ " output_tokens " : 10,
842+ " total_tokens " : 20
843+ },
844+ " user " : null,
845+ " metadata " : {}
846+ }
847+ """
682848}
0 commit comments