promptquality.constants package

Submodules

promptquality.constants.config module

promptquality.constants.dataset_format module

class DatasetFormat(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)

Bases: str, Enum

csv = 'csv'
feather = 'feather'
jsonl = 'jsonl'

promptquality.constants.integrations module

promptquality.constants.job module

class JobStatus(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)

Bases: str, Enum

unstarted = 'unstarted'
in_progress = 'in_progress'
completed = 'completed'
failed = 'failed'
error = 'error'
static is_incomplete(status)
Return type:

bool

static is_failed(status)
Return type:

bool

promptquality.constants.models module

class Models(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)

Bases: str, Enum

chat_gpt = 'ChatGPT (4K context)'
chat_gpt_16k = 'ChatGPT (16K context)'
gpt_35_turbo = 'ChatGPT (4K context)'
gpt_35_turbo_16k = 'ChatGPT (16K context)'
gpt_35_turbo_16k_0125 = 'ChatGPT (16K context, 0125)'
gpt_35_turbo_instruct = 'gpt-3.5-turbo-instruct'
gpt_4 = 'gpt-4 (8K context)'
gpt_4_turbo = 'GPT-4 Turbo'
gpt_4_turbo_0125 = 'GPT-4 Turbo (0125)'
gpt_4_128k = 'gpt-4 (128K context)'
babbage_2 = 'babbage-002'
davinci_2 = 'davinci-002'
azure_chat_gpt = 'ChatGPT (4K context) (Azure)'
azure_chat_gpt_16k = 'ChatGPT (16K context) (Azure)'
azure_gpt_35_turbo = 'ChatGPT (4K context) (Azure)'
azure_gpt_35_turbo_16k = 'ChatGPT (16K context) (Azure)'
azure_gpt_35_turbo_instruct = 'gpt-3.5-turbo-instruct (Azure)'
azure_gpt_4 = 'gpt-4 (Azure)'
text_bison = 'text-bison'
text_bison_001 = 'text-bison@001'
gemini_pro = 'gemini-1.0-pro'
aws_titan_tg1_large = 'AWS - Titan TG1 Large (Bedrock)'
aws_titan_text_lite_v1 = 'AWS - Titan Lite v1 (Bedrock)'
aws_titan_text_express_v1 = 'AWS - Titan Express v1 (Bedrock)'
cohere_command_r_v1 = 'Cohere - Command R v1 (Bedrock)'
cohere_command_r_plus_v1 = 'Cohere - Command R+ v1 (Bedrock)'
cohere_command_text_v14 = 'Cohere - Command v14 (Bedrock)'
cohere_command_light_text_v14 = 'Cohere - Command Light v14 (Bedrock)'
ai21_j2_mid_v1 = 'AI21 - Jurassic-2 Mid v1 (Bedrock)'
ai21_j2_ultra_v1 = 'AI21 - Jurassic-2 Ultra v1 (Bedrock)'
anthropic_claude_instant_v1 = 'Anthropic - Claude Instant v1 (Bedrock)'
anthropic_claude_v1 = 'Anthropic - Claude v1 (Bedrock)'
anthropic_claude_v2 = 'Anthropic - Claude v2 (Bedrock)'
anthropic_claude_v21 = 'Anthropic - Claude v2.1 (Bedrock)'
anthropic_claude_3_sonnet = 'Anthropic - Claude 3 Sonnet (Bedrock)'
anthropic_claude_3_haiku = 'Anthropic - Claude 3 Haiku (Bedrock)'
meta_llama2_13b_chat_v1 = 'Meta - Llama 2 Chat 13B v1 (Bedrock)'
meta_llama3_8b_instruct_v1 = 'Meta - Llama 3 8B Instruct v1 (Bedrock)'
meta_llama3_70b_instruct_v1 = 'Meta - Llama 3 70B Instruct v1 (Bedrock)'
mistral_7b_instruct = 'Mistral - 7B Instruct (Bedrock)'
mistral_8x7b_instruct = 'Mixtral - 8x7B Instruct (Bedrock)'
mistral_large = 'Mistral - Large (Bedrock)'
palmyra_base = 'Palmyra Base'
palmyra_large = 'Palmyra Large'
palmyra_instruct = 'Palmyra Instruct'
palmyra_instruct_30 = 'Palmyra Instruct 30'
palmyra_beta = 'Palmyra Beta'
silk_road = 'Silk Road'
palmyra_e = 'Palmyra E'
palmyra_x = 'Palmyra X'
palmyra_x_32k = 'Palmyra X 32K'
palmyra_med = 'Palmyra Med'
examworks_v1 = 'Exam Works'

promptquality.constants.prompt_optimization module

promptquality.constants.routes module

promptquality.constants.run module

class TagType(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)

Bases: str, Enum

GENERIC = 'generic'
RAG = 'rag'

promptquality.constants.scorers module

class Scorers(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)

Bases: str, Enum

toxicity = 'toxicity'
factuality = 'factuality'
correctness = 'factuality'
groundedness = 'groundedness'
context_adherence = 'groundedness'
context_adherence_plus = 'groundedness'
pii = 'pii'
latency = 'latency'
context_relevance = 'context_relevance'
sexist = 'sexist'
tone = 'tone'
prompt_perplexity = 'prompt_perplexity'
chunk_attribution_utilization_gpt = 'chunk_attribution_utilization_gpt'
chunk_attribution_utilization_plus = 'chunk_attribution_utilization_gpt'
completeness_gpt = 'completeness_gpt'
completeness_plus = 'completeness_gpt'
prompt_injection = 'prompt_injection'
adherence_basic = 'adherence_nli'
context_adherence_basic = 'adherence_nli'
completeness_basic = 'completeness_nli'
chunk_attribution_utilization_basic = 'chunk_attribution_utilization_nli'

Module contents