11from typing_extensions import Generic , TypeVar
22
33import pydantic_core
4- import ast
5- import io
6- import tokenize
7- import inspect
84
95from typechat ._internal .model import PromptSection , TypeChatLanguageModel
106from typechat ._internal .result import Failure , Result , Success
@@ -126,99 +122,4 @@ def _create_repair_prompt(self, validation_error: str) -> str:
126122'''
127123The following is a revised JSON object:
128124"""
129- return prompt
130-
131- def _convert_pythonic_comments_to_annotated_docs (schema_class , debug = False ):
132-
133- def _extract_tokens_between_line_numbers (gen , start_lineno , end_lineno ):
134- # Extract tokens between start_lineno and end_lineno obtained from the tokenize generator
135- tokens = []
136- for tok in gen :
137- if tok .start [0 ] < start_lineno : # Skip tokens before start_lineno
138- continue
139- if tok .start [0 ] >= start_lineno and tok .end [0 ] <= end_lineno :
140- # Add token if it is within the range
141- tokens .append ((tok .type , tok .string ))
142- elif tok .start [0 ] > end_lineno : # Stop if token is beyond end_lineno
143- break
144-
145- return tokens
146-
147- schema_path = inspect .getfile (schema_class )
148-
149- with open (schema_path , 'r' ) as f :
150- schema_class_source = f .read ()
151- gen = tokenize .tokenize (io .BytesIO (
152- schema_class_source .encode ('utf-8' )).readline )
153-
154- tree = ast .parse (schema_class_source )
155-
156- if debug :
157- print ("Source code before transformation:" )
158- print ("--" * 50 )
159- print (schema_class_source )
160- print ("--" * 50 )
161-
162- has_comments = False # Flag later used to perform imports of Annotated and Doc if needed
163-
164- for node in tree .body :
165- if isinstance (node , ast .ClassDef ):
166- for n in node .body :
167- if isinstance (n , ast .AnnAssign ): # Check if the node is an annotated assignment
168- assgn_comment = None
169- tokens = _extract_tokens_between_line_numbers (
170- # Extract tokens between the line numbers of the annotated assignment
171- gen , n .lineno , n .end_lineno
172- )
173- for toknum , tokval in tokens :
174- if toknum == tokenize .COMMENT :
175- # Extract the comment
176- assgn_comment = tokval
177- break
178-
179- if assgn_comment :
180- # If a comment is found, transform the annotation to include the comment
181- assgn_subscript = n .annotation
182- has_comments = True
183- n .annotation = ast .Subscript (
184- value = ast .Name (id = "Annotated" , ctx = ast .Load ()),
185- slice = ast .Tuple (
186- elts = [
187- assgn_subscript ,
188- ast .Call (
189- func = ast .Name (
190- id = "Doc" , ctx = ast .Load ()
191- ),
192- args = [
193- ast .Constant (
194- value = assgn_comment .strip ("#" ).strip ()
195- )
196- ],
197- keywords = []
198- )
199- ],
200- ctx = ast .Load ()
201- ),
202- ctx = ast .Load ()
203- )
204-
205- if has_comments :
206- for node in tree .body :
207- if isinstance (node , ast .ImportFrom ):
208- if node .module == "typing_extensions" :
209- if ast .alias (name = "Annotated" ) not in node .names :
210- node .names .append (ast .alias (name = "Annotated" ))
211- if ast .alias (name = "Doc" ) not in node .names :
212- node .names .append (ast .alias (name = "Doc" ))
213-
214- transformed_schema_source = ast .unparse (tree )
215-
216- if debug :
217- print ("Source code after transformation:" )
218- print ("--" * 50 )
219- print (transformed_schema_source )
220- print ("--" * 50 )
221-
222- namespace = {}
223- exec (transformed_schema_source , namespace )
224- return namespace [schema_class .__name__ ]
125+ return prompt
0 commit comments