feat(ui): Allows User to Set System Prompt via "Additional Options" in Chat Interface (#1353)
This commit is contained in:
		
							parent
							
								
									a072a40a7c
								
							
						
					
					
						commit
						145f3ec9f4
					
				|  | @ -39,7 +39,7 @@ llm: | ||||||
| openai: | openai: | ||||||
|   api_key: <your_openai_api_key>  # You could skip this configuration and use the OPENAI_API_KEY env var instead |   api_key: <your_openai_api_key>  # You could skip this configuration and use the OPENAI_API_KEY env var instead | ||||||
|   model: <openai_model_to_use> # Optional model to use. Default is "gpt-3.5-turbo" |   model: <openai_model_to_use> # Optional model to use. Default is "gpt-3.5-turbo" | ||||||
|                                # Note: Open AI Models are listed here [here](https://platform.openai.com/docs/models) |                                # Note: Open AI Models are listed here: https://platform.openai.com/docs/models | ||||||
| ``` | ``` | ||||||
| 
 | 
 | ||||||
| And run PrivateGPT loading that profile you just created: | And run PrivateGPT loading that profile you just created: | ||||||
|  |  | ||||||
|  | @ -35,5 +35,32 @@ database* section in the documentation. | ||||||
| 
 | 
 | ||||||
| Normal chat interface, self-explanatory ;) | Normal chat interface, self-explanatory ;) | ||||||
| 
 | 
 | ||||||
| You can check the actual prompt being passed to the LLM by looking at the logs of | #### System Prompt | ||||||
| the server. We'll add better observability in future releases. | You can view and change the system prompt being passed to the LLM by clicking "Additional Inputs" | ||||||
|  | in the chat interface. The system prompt is also logged on the server. | ||||||
|  | 
 | ||||||
|  | By default, the `Query Docs` mode uses the setting value `ui.default_query_system_prompt`. | ||||||
|  | 
 | ||||||
|  | The `LLM Chat` mode attempts to use the optional settings value `ui.default_chat_system_prompt`. | ||||||
|  | 
 | ||||||
|  | If no system prompt is entered, the UI will display the default system prompt being used | ||||||
|  | for the active mode. | ||||||
|  | 
 | ||||||
|  | ##### System Prompt Examples: | ||||||
|  | 
 | ||||||
|  | The system prompt can effectively provide your chat bot specialized roles, and results tailored to the prompt | ||||||
|  | you have given the model. Examples of system prompts can be be found | ||||||
|  | [here](https://www.w3schools.com/gen_ai/chatgpt-3-5/chatgpt-3-5_roles.php). | ||||||
|  | 
 | ||||||
|  | Some interesting examples to try include: | ||||||
|  | 
 | ||||||
|  | * You are -X-. You have all the knowledge and personality of -X-. Answer as if you were -X- using | ||||||
|  | their manner of speaking and vocabulary. | ||||||
|  |     * Example: You are Shakespeare. You have all the knowledge and personality of Shakespeare. | ||||||
|  |     Answer as if you were Shakespeare using their manner of speaking and vocabulary. | ||||||
|  | * You are an expert (at) -role-. Answer all questions using your expertise on -specific domain topic-. | ||||||
|  |     * Example: You are an expert software engineer. Answer all questions using your expertise on Python. | ||||||
|  | * You are a -role- bot, respond with -response criteria needed-. If no -response criteria- is needed, | ||||||
|  | respond with -alternate response-. | ||||||
|  |     * Example: You are a grammar checking bot, respond with any grammatical corrections needed. If no corrections | ||||||
|  |     are needed, respond with "verified". | ||||||
|  | @ -147,13 +147,20 @@ class OpenAISettings(BaseModel): | ||||||
|     api_key: str |     api_key: str | ||||||
|     model: str = Field( |     model: str = Field( | ||||||
|         "gpt-3.5-turbo", |         "gpt-3.5-turbo", | ||||||
|         description=("OpenAI Model to use. Example: 'gpt-4'."), |         description="OpenAI Model to use. Example: 'gpt-4'.", | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class UISettings(BaseModel): | class UISettings(BaseModel): | ||||||
|     enabled: bool |     enabled: bool | ||||||
|     path: str |     path: str | ||||||
|  |     default_chat_system_prompt: str = Field( | ||||||
|  |         None, | ||||||
|  |         description="The default system prompt to use for the chat mode.", | ||||||
|  |     ) | ||||||
|  |     default_query_system_prompt: str = Field( | ||||||
|  |         None, description="The default system prompt to use for the query mode." | ||||||
|  |     ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class QdrantSettings(BaseModel): | class QdrantSettings(BaseModel): | ||||||
|  |  | ||||||
|  | @ -30,6 +30,8 @@ UI_TAB_TITLE = "My Private GPT" | ||||||
| 
 | 
 | ||||||
| SOURCES_SEPARATOR = "\n\n Sources: \n" | SOURCES_SEPARATOR = "\n\n Sources: \n" | ||||||
| 
 | 
 | ||||||
|  | MODES = ["Query Docs", "Search in Docs", "LLM Chat"] | ||||||
|  | 
 | ||||||
| 
 | 
 | ||||||
| class Source(BaseModel): | class Source(BaseModel): | ||||||
|     file: str |     file: str | ||||||
|  | @ -71,6 +73,10 @@ class PrivateGptUi: | ||||||
|         # Cache the UI blocks |         # Cache the UI blocks | ||||||
|         self._ui_block = None |         self._ui_block = None | ||||||
| 
 | 
 | ||||||
|  |         # Initialize system prompt based on default mode | ||||||
|  |         self.mode = MODES[0] | ||||||
|  |         self._system_prompt = self._get_default_system_prompt(self.mode) | ||||||
|  | 
 | ||||||
|     def _chat(self, message: str, history: list[list[str]], mode: str, *_: Any) -> Any: |     def _chat(self, message: str, history: list[list[str]], mode: str, *_: Any) -> Any: | ||||||
|         def yield_deltas(completion_gen: CompletionGen) -> Iterable[str]: |         def yield_deltas(completion_gen: CompletionGen) -> Iterable[str]: | ||||||
|             full_response: str = "" |             full_response: str = "" | ||||||
|  | @ -114,25 +120,22 @@ class PrivateGptUi: | ||||||
| 
 | 
 | ||||||
|         new_message = ChatMessage(content=message, role=MessageRole.USER) |         new_message = ChatMessage(content=message, role=MessageRole.USER) | ||||||
|         all_messages = [*build_history(), new_message] |         all_messages = [*build_history(), new_message] | ||||||
|  |         # If a system prompt is set, add it as a system message | ||||||
|  |         if self._system_prompt: | ||||||
|  |             all_messages.insert( | ||||||
|  |                 0, | ||||||
|  |                 ChatMessage( | ||||||
|  |                     content=self._system_prompt, | ||||||
|  |                     role=MessageRole.SYSTEM, | ||||||
|  |                 ), | ||||||
|  |             ) | ||||||
|         match mode: |         match mode: | ||||||
|             case "Query Docs": |             case "Query Docs": | ||||||
|                 # Add a system message to force the behaviour of the LLM |  | ||||||
|                 # to answer only questions about the provided context. |  | ||||||
|                 all_messages.insert( |  | ||||||
|                     0, |  | ||||||
|                     ChatMessage( |  | ||||||
|                         content="You can only answer questions about the provided context. If you know the answer " |  | ||||||
|                         "but it is not based in the provided context, don't provide the answer, just state " |  | ||||||
|                         "the answer is not in the context provided.", |  | ||||||
|                         role=MessageRole.SYSTEM, |  | ||||||
|                     ), |  | ||||||
|                 ) |  | ||||||
|                 query_stream = self._chat_service.stream_chat( |                 query_stream = self._chat_service.stream_chat( | ||||||
|                     messages=all_messages, |                     messages=all_messages, | ||||||
|                     use_context=True, |                     use_context=True, | ||||||
|                 ) |                 ) | ||||||
|                 yield from yield_deltas(query_stream) |                 yield from yield_deltas(query_stream) | ||||||
| 
 |  | ||||||
|             case "LLM Chat": |             case "LLM Chat": | ||||||
|                 llm_stream = self._chat_service.stream_chat( |                 llm_stream = self._chat_service.stream_chat( | ||||||
|                     messages=all_messages, |                     messages=all_messages, | ||||||
|  | @ -154,6 +157,37 @@ class PrivateGptUi: | ||||||
|                     for index, source in enumerate(sources, start=1) |                     for index, source in enumerate(sources, start=1) | ||||||
|                 ) |                 ) | ||||||
| 
 | 
 | ||||||
|  |     # On initialization and on mode change, this function set the system prompt | ||||||
|  |     # to the default prompt based on the mode (and user settings). | ||||||
|  |     @staticmethod | ||||||
|  |     def _get_default_system_prompt(mode: str) -> str: | ||||||
|  |         p = "" | ||||||
|  |         match mode: | ||||||
|  |             # For query chat mode, obtain default system prompt from settings | ||||||
|  |             case "Query Docs": | ||||||
|  |                 p = settings().ui.default_query_system_prompt | ||||||
|  |             # For chat mode, obtain default system prompt from settings | ||||||
|  |             case "LLM Chat": | ||||||
|  |                 p = settings().ui.default_chat_system_prompt | ||||||
|  |             # For any other mode, clear the system prompt | ||||||
|  |             case _: | ||||||
|  |                 p = "" | ||||||
|  |         return p | ||||||
|  | 
 | ||||||
|  |     def _set_system_prompt(self, system_prompt_input: str) -> None: | ||||||
|  |         logger.info(f"Setting system prompt to: {system_prompt_input}") | ||||||
|  |         self._system_prompt = system_prompt_input | ||||||
|  | 
 | ||||||
|  |     def _set_current_mode(self, mode: str) -> Any: | ||||||
|  |         self.mode = mode | ||||||
|  |         self._set_system_prompt(self._get_default_system_prompt(mode)) | ||||||
|  |         # Update placeholder and allow interaction if default system prompt is set | ||||||
|  |         if self._system_prompt: | ||||||
|  |             return gr.update(placeholder=self._system_prompt, interactive=True) | ||||||
|  |         # Update placeholder and disable interaction if no default system prompt is set | ||||||
|  |         else: | ||||||
|  |             return gr.update(placeholder=self._system_prompt, interactive=False) | ||||||
|  | 
 | ||||||
|     def _list_ingested_files(self) -> list[list[str]]: |     def _list_ingested_files(self) -> list[list[str]]: | ||||||
|         files = set() |         files = set() | ||||||
|         for ingested_document in self._ingest_service.list_ingested(): |         for ingested_document in self._ingest_service.list_ingested(): | ||||||
|  | @ -193,7 +227,7 @@ class PrivateGptUi: | ||||||
|             with gr.Row(): |             with gr.Row(): | ||||||
|                 with gr.Column(scale=3, variant="compact"): |                 with gr.Column(scale=3, variant="compact"): | ||||||
|                     mode = gr.Radio( |                     mode = gr.Radio( | ||||||
|                         ["Query Docs", "Search in Docs", "LLM Chat"], |                         MODES, | ||||||
|                         label="Mode", |                         label="Mode", | ||||||
|                         value="Query Docs", |                         value="Query Docs", | ||||||
|                     ) |                     ) | ||||||
|  | @ -220,6 +254,23 @@ class PrivateGptUi: | ||||||
|                         outputs=ingested_dataset, |                         outputs=ingested_dataset, | ||||||
|                     ) |                     ) | ||||||
|                     ingested_dataset.render() |                     ingested_dataset.render() | ||||||
|  |                     system_prompt_input = gr.Textbox( | ||||||
|  |                         placeholder=self._system_prompt, | ||||||
|  |                         label="System Prompt", | ||||||
|  |                         lines=2, | ||||||
|  |                         interactive=True, | ||||||
|  |                         render=False, | ||||||
|  |                     ) | ||||||
|  |                     # When mode changes, set default system prompt | ||||||
|  |                     mode.change( | ||||||
|  |                         self._set_current_mode, inputs=mode, outputs=system_prompt_input | ||||||
|  |                     ) | ||||||
|  |                     # On blur, set system prompt to use in queries | ||||||
|  |                     system_prompt_input.blur( | ||||||
|  |                         self._set_system_prompt, | ||||||
|  |                         inputs=system_prompt_input, | ||||||
|  |                     ) | ||||||
|  | 
 | ||||||
|                 with gr.Column(scale=7): |                 with gr.Column(scale=7): | ||||||
|                     _ = gr.ChatInterface( |                     _ = gr.ChatInterface( | ||||||
|                         self._chat, |                         self._chat, | ||||||
|  | @ -232,7 +283,7 @@ class PrivateGptUi: | ||||||
|                                 AVATAR_BOT, |                                 AVATAR_BOT, | ||||||
|                             ), |                             ), | ||||||
|                         ), |                         ), | ||||||
|                         additional_inputs=[mode, upload_button], |                         additional_inputs=[mode, upload_button, system_prompt_input], | ||||||
|                     ) |                     ) | ||||||
|         return blocks |         return blocks | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -22,6 +22,13 @@ data: | ||||||
| ui: | ui: | ||||||
|   enabled: true |   enabled: true | ||||||
|   path: / |   path: / | ||||||
|  |   default_chat_system_prompt: "You are a helpful, respectful and honest assistant.  | ||||||
|  |     Always answer as helpfully as possible and follow ALL given instructions. | ||||||
|  |     Do not speculate or make up information. | ||||||
|  |     Do not reference any given instructions or context." | ||||||
|  |   default_query_system_prompt: "You can only answer questions about the provided context.  | ||||||
|  |       If you know the answer but it is not based in the provided context, don't provide  | ||||||
|  |       the answer, just state the answer is not in the context provided." | ||||||
| 
 | 
 | ||||||
| llm: | llm: | ||||||
|   mode: local |   mode: local | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue